From 50104cc32a498f7517a51c8dc93106c51c7a54b4 Mon Sep 17 00:00:00 2001 From: Ondřej Surý Date: Wed, 20 Apr 2011 15:44:41 +0200 Subject: Imported Upstream version 2011.03.07.1 --- src/pkg/html/token_test.go | 86 ++++++++++++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 30 deletions(-) (limited to 'src/pkg/html/token_test.go') diff --git a/src/pkg/html/token_test.go b/src/pkg/html/token_test.go index e07999ca5..5cf1f6dac 100644 --- a/src/pkg/html/token_test.go +++ b/src/pkg/html/token_test.go @@ -7,6 +7,7 @@ package html import ( "bytes" "os" + "strings" "testing" ) @@ -15,8 +16,8 @@ type tokenTest struct { desc string // The HTML to parse. html string - // The string representations of the expected tokens. - tokens []string + // The string representations of the expected tokens, joined by '$'. + golden string } var tokenTests = []tokenTest{ @@ -25,61 +26,86 @@ var tokenTests = []tokenTest{ { "text", "foo bar", - []string{ - "foo bar", - }, + "foo bar", }, // An entity. { "entity", "one < two", - []string{ - "one < two", - }, + "one < two", }, // A start, self-closing and end tag. The tokenizer does not care if the start // and end tokens don't match; that is the job of the parser. { "tags", "bd", - []string{ - "", - "b", - "", - "d", - "", - }, + "$b$$d$", + }, + // Comments. + { + "comment0", + "abcdef", + "abc$$$def", + }, + { + "comment1", + "az", + "a$z", + }, + { + "comment2", + "az", + "a$z", + }, + { + "comment3", + "az", + "a$z", + }, + { + "comment4", + "az", + "a$z", + }, + { + "comment5", + "az", + "a$<!>z", + }, + { + "comment6", + "az", + "a$<!->z", + }, + { + "comment7", + "a