summaryrefslogtreecommitdiff
path: root/src/pkg/go/scanner/scanner_test.go
diff options
context:
space:
mode:
authorRobert Griesemer <gri@golang.org>2009-12-15 15:35:38 -0800
committerRobert Griesemer <gri@golang.org>2009-12-15 15:35:38 -0800
commite4bd81f903362d998f7bfc02095935408aff0bc5 (patch)
tree05f75a90e239d33be427da4f9c5596d2fcb3dc96 /src/pkg/go/scanner/scanner_test.go
parentd9527dd16f72598b54a64550607bf892efa12384 (diff)
downloadgolang-e4bd81f903362d998f7bfc02095935408aff0bc5.tar.gz
1) Change default gofmt default settings for
parsing and printing to new syntax. Use -oldparser to parse the old syntax, use -oldprinter to print the old syntax. 2) Change default gofmt formatting settings to use tabs for indentation only and to use spaces for alignment. This will make the code alignment insensitive to an editor's tabwidth. Use -spaces=false to use tabs for alignment. 3) Manually changed src/exp/parser/parser_test.go so that it doesn't try to parse the parser's source files using the old syntax (they have new syntax now). 4) gofmt -w src misc test/bench 3rd set of files. R=rsc CC=golang-dev http://codereview.appspot.com/180048
Diffstat (limited to 'src/pkg/go/scanner/scanner_test.go')
-rw-r--r--src/pkg/go/scanner/scanner_test.go154
1 files changed, 77 insertions, 77 deletions
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index b6d7e99ca..6ea4b2d58 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -5,18 +5,18 @@
package scanner
import (
- "go/token";
- "os";
- "strings";
- "testing";
+ "go/token"
+ "os"
+ "strings"
+ "testing"
)
const /* class */ (
- special = iota;
- literal;
- operator;
- keyword;
+ special = iota
+ literal
+ operator
+ keyword
)
@@ -29,14 +29,14 @@ func tokenclass(tok token.Token) int {
case tok.IsKeyword():
return keyword
}
- return special;
+ return special
}
type elt struct {
- tok token.Token;
- lit string;
- class int;
+ tok token.Token
+ lit string
+ class int
}
@@ -162,10 +162,10 @@ var tokens = [...]elt{
}
-const whitespace = " \t \n\n\n" // to separate tokens
+const whitespace = " \t \n\n\n" // to separate tokens
type TestErrorHandler struct {
- t *testing.T;
+ t *testing.T
}
func (h *TestErrorHandler) Error(pos token.Position, msg string) {
@@ -174,13 +174,13 @@ func (h *TestErrorHandler) Error(pos token.Position, msg string) {
func NewlineCount(s string) int {
- n := 0;
+ n := 0
for i := 0; i < len(s); i++ {
if s[i] == '\n' {
n++
}
}
- return n;
+ return n
}
@@ -203,27 +203,27 @@ func checkPos(t *testing.T, lit string, pos, expected token.Position) {
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
- var src string;
+ var src string
for _, e := range tokens {
src += e.lit + whitespace
}
- whitespace_linecount := NewlineCount(whitespace);
+ whitespace_linecount := NewlineCount(whitespace)
// verify scan
- index := 0;
- epos := token.Position{"", 0, 1, 1};
+ index := 0
+ epos := token.Position{"", 0, 1, 1}
nerrors := Tokenize("", strings.Bytes(src), &TestErrorHandler{t}, ScanComments,
func(pos token.Position, tok token.Token, litb []byte) bool {
- e := elt{token.EOF, "", special};
+ e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
- lit := string(litb);
+ lit := string(litb)
if tok == token.EOF {
- lit = "<EOF>";
- epos.Column = 0;
+ lit = "<EOF>"
+ epos.Column = 0
}
- checkPos(t, lit, pos, epos);
+ checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
}
@@ -233,16 +233,16 @@ func TestScan(t *testing.T) {
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
- epos.Offset += len(lit) + len(whitespace);
- epos.Line += NewlineCount(lit) + whitespace_linecount;
+ epos.Offset += len(lit) + len(whitespace)
+ epos.Line += NewlineCount(lit) + whitespace_linecount
if tok == token.COMMENT && litb[1] == '/' {
// correct for unaccounted '/n' in //-style comment
- epos.Offset++;
- epos.Line++;
+ epos.Offset++
+ epos.Line++
}
- index++;
- return tok != token.EOF;
- });
+ index++
+ return tok != token.EOF
+ })
if nerrors != 0 {
t.Errorf("found %d errors", nerrors)
}
@@ -255,14 +255,14 @@ func getTok(_ token.Position, tok token.Token, _ []byte) token.Token {
func checkSemi(t *testing.T, line string, mode uint) {
- var S Scanner;
- S.Init("TestSemis", strings.Bytes(line), nil, mode);
- pos, tok, lit := S.Scan();
+ var S Scanner
+ S.Init("TestSemis", strings.Bytes(line), nil, mode)
+ pos, tok, lit := S.Scan()
for tok != token.EOF {
if tok == token.ILLEGAL {
// next token must be a semicolon
- offs := pos.Offset + 1;
- pos, tok, lit = S.Scan();
+ offs := pos.Offset + 1
+ pos, tok, lit = S.Scan()
if tok == token.SEMICOLON {
if pos.Offset != offs {
t.Errorf("bad offset for %q: got %d, expected %d", line, pos.Offset, offs)
@@ -276,7 +276,7 @@ func checkSemi(t *testing.T, line string, mode uint) {
} else if tok == token.SEMICOLON {
t.Errorf("bad token for %q: got ;, expected no ;", line)
}
- pos, tok, lit = S.Scan();
+ pos, tok, lit = S.Scan()
}
}
@@ -406,9 +406,9 @@ func TestSemis(t *testing.T) {
type seg struct {
- srcline string; // a line of source text
- filename string; // filename for current token
- line int; // line number for current token
+ srcline string // a line of source text
+ filename string // filename for current token
+ line int // line number for current token
}
@@ -416,15 +416,15 @@ var segments = []seg{
// exactly one token per line since the test consumes one token per segment
seg{" line1", "TestLineComments", 1},
seg{"\nline2", "TestLineComments", 2},
- seg{"\nline3 //line File1.go:100", "TestLineComments", 3}, // bad line comment, ignored
+ seg{"\nline3 //line File1.go:100", "TestLineComments", 3}, // bad line comment, ignored
seg{"\nline4", "TestLineComments", 4},
seg{"\n//line File1.go:100\n line100", "File1.go", 100},
seg{"\n//line File2.go:200\n line200", "File2.go", 200},
seg{"\n//line :1\n line1", "", 1},
seg{"\n//line foo:42\n line42", "foo", 42},
- seg{"\n //line foo:42\n line44", "foo", 44}, // bad line comment, ignored
- seg{"\n//line foo 42\n line46", "foo", 46}, // bad line comment, ignored
- seg{"\n//line foo:42 extra text\n line48", "foo", 48}, // bad line comment, ignored
+ seg{"\n //line foo:42\n line44", "foo", 44}, // bad line comment, ignored
+ seg{"\n//line foo 42\n line46", "foo", 46}, // bad line comment, ignored
+ seg{"\n//line foo:42 extra text\n line48", "foo", 48}, // bad line comment, ignored
seg{"\n//line foo:42\n line42", "foo", 42},
seg{"\n//line foo:42\n line42", "foo", 42},
seg{"\n//line File1.go:100\n line100", "File1.go", 100},
@@ -434,17 +434,17 @@ var segments = []seg{
// Verify that comments of the form "//line filename:line" are interpreted correctly.
func TestLineComments(t *testing.T) {
// make source
- var src string;
+ var src string
for _, e := range segments {
src += e.srcline
}
// verify scan
- var S Scanner;
- S.Init("TestLineComments", strings.Bytes(src), nil, 0);
+ var S Scanner
+ S.Init("TestLineComments", strings.Bytes(src), nil, 0)
for _, s := range segments {
- pos, _, lit := S.Scan();
- checkPos(t, string(lit), pos, token.Position{s.filename, pos.Offset, s.line, pos.Column});
+ pos, _, lit := S.Scan()
+ checkPos(t, string(lit), pos, token.Position{s.filename, pos.Offset, s.line, pos.Column})
}
if S.ErrorCount != 0 {
@@ -455,20 +455,20 @@ func TestLineComments(t *testing.T) {
// Verify that initializing the same scanner more then once works correctly.
func TestInit(t *testing.T) {
- var s Scanner;
+ var s Scanner
// 1st init
- s.Init("", strings.Bytes("if true { }"), nil, 0);
- s.Scan(); // if
- s.Scan(); // true
- _, tok, _ := s.Scan(); // {
+ s.Init("", strings.Bytes("if true { }"), nil, 0)
+ s.Scan() // if
+ s.Scan() // true
+ _, tok, _ := s.Scan() // {
if tok != token.LBRACE {
t.Errorf("bad token: got %s, expected %s", tok.String(), token.LBRACE)
}
// 2nd init
- s.Init("", strings.Bytes("go true { ]"), nil, 0);
- _, tok, _ = s.Scan(); // go
+ s.Init("", strings.Bytes("go true { ]"), nil, 0)
+ _, tok, _ = s.Scan() // go
if tok != token.GO {
t.Errorf("bad token: got %s, expected %s", tok.String(), token.GO)
}
@@ -480,12 +480,12 @@ func TestInit(t *testing.T) {
func TestIllegalChars(t *testing.T) {
- var s Scanner;
+ var s Scanner
- const src = "*?*$*@*";
- s.Init("", strings.Bytes(src), &TestErrorHandler{t}, AllowIllegalChars);
+ const src = "*?*$*@*"
+ s.Init("", strings.Bytes(src), &TestErrorHandler{t}, AllowIllegalChars)
for offs, ch := range src {
- pos, tok, lit := s.Scan();
+ pos, tok, lit := s.Scan()
if pos.Offset != offs {
t.Errorf("bad position for %s: got %d, expected %d", string(lit), pos.Offset, offs)
}
@@ -501,37 +501,37 @@ func TestIllegalChars(t *testing.T) {
func TestStdErrorHander(t *testing.T) {
- const src = "@\n" + // illegal character, cause an error
- "@ @\n" + // two errors on the same line
+ const src = "@\n" + // illegal character, cause an error
+ "@ @\n" + // two errors on the same line
"//line File2:20\n" +
- "@\n" + // different file, but same line
+ "@\n" + // different file, but same line
"//line File2:1\n" +
- "@ @\n" + // same file, decreasing line number
+ "@ @\n" + // same file, decreasing line number
"//line File1:1\n" +
- "@ @ @"; // original file, line 1 again
+ "@ @ @" // original file, line 1 again
- v := new(ErrorVector);
+ v := new(ErrorVector)
nerrors := Tokenize("File1", strings.Bytes(src), v, 0,
func(pos token.Position, tok token.Token, litb []byte) bool {
return tok != token.EOF
- });
+ })
- list := v.GetErrorList(Raw);
+ list := v.GetErrorList(Raw)
if len(list) != 9 {
- t.Errorf("found %d raw errors, expected 9", len(list));
- PrintError(os.Stderr, list);
+ t.Errorf("found %d raw errors, expected 9", len(list))
+ PrintError(os.Stderr, list)
}
- list = v.GetErrorList(Sorted);
+ list = v.GetErrorList(Sorted)
if len(list) != 9 {
- t.Errorf("found %d sorted errors, expected 9", len(list));
- PrintError(os.Stderr, list);
+ t.Errorf("found %d sorted errors, expected 9", len(list))
+ PrintError(os.Stderr, list)
}
- list = v.GetErrorList(NoMultiples);
+ list = v.GetErrorList(NoMultiples)
if len(list) != 4 {
- t.Errorf("found %d one-per-line errors, expected 4", len(list));
- PrintError(os.Stderr, list);
+ t.Errorf("found %d one-per-line errors, expected 4", len(list))
+ PrintError(os.Stderr, list)
}
if v.ErrorCount() != nerrors {