diff options
author | Ondřej Surý <ondrej@sury.org> | 2011-02-14 13:23:51 +0100 |
---|---|---|
committer | Ondřej Surý <ondrej@sury.org> | 2011-02-14 13:23:51 +0100 |
commit | 758ff64c69e34965f8af5b2d6ffd65e8d7ab2150 (patch) | |
tree | 6d6b34f8c678862fe9b56c945a7b63f68502c245 /src/pkg/go | |
parent | 3e45412327a2654a77944249962b3652e6142299 (diff) | |
download | golang-758ff64c69e34965f8af5b2d6ffd65e8d7ab2150.tar.gz |
Imported Upstream version 2011-02-01.1upstream/2011-02-01.1
Diffstat (limited to 'src/pkg/go')
-rw-r--r-- | src/pkg/go/ast/ast.go | 19 | ||||
-rw-r--r-- | src/pkg/go/ast/walk.go | 11 | ||||
-rw-r--r-- | src/pkg/go/doc/doc.go | 2 | ||||
-rw-r--r-- | src/pkg/go/parser/parser.go | 93 | ||||
-rw-r--r-- | src/pkg/go/parser/parser_test.go | 1 | ||||
-rw-r--r-- | src/pkg/go/printer/nodes.go | 90 | ||||
-rw-r--r-- | src/pkg/go/printer/printer.go | 42 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/comments.golden | 6 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/comments.input | 6 | ||||
-rw-r--r-- | src/pkg/go/scanner/scanner.go | 28 | ||||
-rw-r--r-- | src/pkg/go/scanner/scanner_test.go | 25 | ||||
-rw-r--r-- | src/pkg/go/token/position.go | 38 | ||||
-rw-r--r-- | src/pkg/go/token/position_test.go | 47 | ||||
-rw-r--r-- | src/pkg/go/token/token.go | 12 | ||||
-rw-r--r-- | src/pkg/go/typechecker/typechecker_test.go | 3 |
15 files changed, 279 insertions, 144 deletions
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go index cf2ce36df..2e8f0973f 100644 --- a/src/pkg/go/ast/ast.go +++ b/src/pkg/go/ast/ast.go @@ -535,6 +535,13 @@ type ( X Expr // expression } + // A SendStmt node represents a send statement. + SendStmt struct { + Chan Expr + Arrow token.Pos // position of "<-" + Value Expr + } + // An IncDecStmt node represents an increment or decrement statement. IncDecStmt struct { X Expr @@ -629,11 +636,10 @@ type ( // A CommClause node represents a case of a select statement. CommClause struct { - Case token.Pos // position of "case" or "default" keyword - Tok token.Token // ASSIGN or DEFINE (valid only if Lhs != nil) - Lhs, Rhs Expr // Rhs == nil means default case - Colon token.Pos // position of ":" - Body []Stmt // statement list; or nil + Case token.Pos // position of "case" or "default" keyword + Comm Stmt // send or receive statement; nil means default case + Colon token.Pos // position of ":" + Body []Stmt // statement list; or nil } // An SelectStmt node represents a select statement. @@ -670,6 +676,7 @@ func (s *DeclStmt) Pos() token.Pos { return s.Decl.Pos() } func (s *EmptyStmt) Pos() token.Pos { return s.Semicolon } func (s *LabeledStmt) Pos() token.Pos { return s.Label.Pos() } func (s *ExprStmt) Pos() token.Pos { return s.X.Pos() } +func (s *SendStmt) Pos() token.Pos { return s.Chan.Pos() } func (s *IncDecStmt) Pos() token.Pos { return s.X.Pos() } func (s *AssignStmt) Pos() token.Pos { return s.Lhs[0].Pos() } func (s *GoStmt) Pos() token.Pos { return s.Go } @@ -695,6 +702,7 @@ func (s *EmptyStmt) End() token.Pos { } func (s *LabeledStmt) End() token.Pos { return s.Stmt.End() } func (s *ExprStmt) End() token.Pos { return s.X.End() } +func (s *SendStmt) End() token.Pos { return s.Value.End() } func (s *IncDecStmt) End() token.Pos { return s.TokPos + 2 /* len("++") */ } @@ -753,6 +761,7 @@ func (s *DeclStmt) stmtNode() {} func (s *EmptyStmt) stmtNode() {} func (s *LabeledStmt) stmtNode() {} func (s *ExprStmt) stmtNode() {} +func (s *SendStmt) stmtNode() {} func (s *IncDecStmt) stmtNode() {} func (s *AssignStmt) stmtNode() {} func (s *GoStmt) stmtNode() {} diff --git a/src/pkg/go/ast/walk.go b/src/pkg/go/ast/walk.go index 875a92f3f..a77f8ee5e 100644 --- a/src/pkg/go/ast/walk.go +++ b/src/pkg/go/ast/walk.go @@ -195,6 +195,10 @@ func Walk(v Visitor, node Node) { case *ExprStmt: Walk(v, n.X) + case *SendStmt: + Walk(v, n.Chan) + Walk(v, n.Value) + case *IncDecStmt: Walk(v, n.X) @@ -258,11 +262,8 @@ func Walk(v Visitor, node Node) { Walk(v, n.Body) case *CommClause: - if n.Lhs != nil { - Walk(v, n.Lhs) - } - if n.Rhs != nil { - Walk(v, n.Rhs) + if n.Comm != nil { + Walk(v, n.Comm) } walkStmtList(v, n.Body) diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go index fb1c4e03d..e46857cb8 100644 --- a/src/pkg/go/doc/doc.go +++ b/src/pkg/go/doc/doc.go @@ -154,7 +154,7 @@ func (doc *docReader) addValue(decl *ast.GenDecl) { // determine values list const threshold = 0.75 values := &doc.values - if domName != "" && domFreq >= int(float(len(decl.Specs))*threshold) { + if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) { // typed entries are sufficiently frequent typ := doc.lookupTypeDoc(domName) if typ != nil { diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go index 3b2fe4577..2395b8158 100644 --- a/src/pkg/go/parser/parser.go +++ b/src/pkg/go/parser/parser.go @@ -70,7 +70,8 @@ func scannerMode(mode uint) uint { func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) { - p.file = p.scanner.Init(fset, filename, src, p, scannerMode(mode)) + p.file = fset.AddFile(filename, fset.Base(), len(src)) + p.scanner.Init(p.file, src, p, scannerMode(mode)) p.mode = mode p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently) p.next() @@ -1192,18 +1193,6 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { x := p.parseExprList() switch p.tok { - case token.COLON: - // labeled statement - colon := p.pos - p.next() - if labelOk && len(x) == 1 { - if label, isIdent := x[0].(*ast.Ident); isIdent { - return &ast.LabeledStmt{label, colon, p.parseStmt()} - } - } - p.error(x[0].Pos(), "illegal label declaration") - return &ast.BadStmt{x[0].Pos(), colon + 1} - case token.DEFINE, token.ASSIGN, token.ADD_ASSIGN, token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, @@ -1217,11 +1206,29 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { } if len(x) > 1 { - p.error(x[0].Pos(), "only one expression allowed") + p.errorExpected(x[0].Pos(), "1 expression") // continue with first expression } - if p.tok == token.INC || p.tok == token.DEC { + switch p.tok { + case token.COLON: + // labeled statement + colon := p.pos + p.next() + if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent { + return &ast.LabeledStmt{label, colon, p.parseStmt()} + } + p.error(x[0].Pos(), "illegal label declaration") + return &ast.BadStmt{x[0].Pos(), colon + 1} + + case token.ARROW: + // send statement + arrow := p.pos + p.next() // consume "<-" + y := p.parseExpr() + return &ast.SendStmt{x[0], arrow, y} + + case token.INC, token.DEC: // increment or decrement s := &ast.IncDecStmt{x[0], p.pos, p.tok} p.next() // consume "++" or "--" @@ -1485,28 +1492,52 @@ func (p *parser) parseCommClause() *ast.CommClause { // CommCase pos := p.pos - var tok token.Token - var lhs, rhs ast.Expr + var comm ast.Stmt if p.tok == token.CASE { p.next() + lhs := p.parseExprList() if p.tok == token.ARROW { - // RecvExpr without assignment - rhs = p.parseExpr() + // SendStmt + if len(lhs) > 1 { + p.errorExpected(lhs[0].Pos(), "1 expression") + // continue with first expression + } + arrow := p.pos + p.next() + rhs := p.parseExpr() + comm = &ast.SendStmt{lhs[0], arrow, rhs} } else { - // SendExpr or RecvExpr - rhs = p.parseExpr() + // RecvStmt + pos := p.pos + tok := p.tok + var rhs ast.Expr if p.tok == token.ASSIGN || p.tok == token.DEFINE { - // RecvExpr with assignment - tok = p.tok + // RecvStmt with assignment + if len(lhs) > 2 { + p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") + // continue with first two expressions + lhs = lhs[0:2] + } p.next() - lhs = rhs - if p.tok == token.ARROW { - rhs = p.parseExpr() - } else { - p.expect(token.ARROW) // use expect() error handling + rhs = p.parseExpr() + } else { + // rhs must be single receive operation + if len(lhs) > 1 { + p.errorExpected(lhs[0].Pos(), "1 expression") + // continue with first expression } + rhs = lhs[0] + lhs = nil // there is no lhs + } + if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW { + p.errorExpected(rhs.Pos(), "send or receive operation") + rhs = &ast.BadExpr{rhs.Pos(), rhs.End()} + } + if lhs != nil { + comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}} + } else { + comm = &ast.ExprStmt{rhs} } - // else SendExpr } } else { p.expect(token.DEFAULT) @@ -1515,7 +1546,7 @@ func (p *parser) parseCommClause() *ast.CommClause { colon := p.expect(token.COLON) body := p.parseStmtList() - return &ast.CommClause{pos, tok, lhs, rhs, colon, body} + return &ast.CommClause{pos, comm, colon, body} } @@ -1567,7 +1598,7 @@ func (p *parser) parseForStmt() ast.Stmt { } // check rhs if len(as.Rhs) != 1 { - p.errorExpected(as.Rhs[0].Pos(), "1 expressions") + p.errorExpected(as.Rhs[0].Pos(), "1 expression") return &ast.BadStmt{pos, body.End()} } if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go index 56bd80ef1..5a7f05ca8 100644 --- a/src/pkg/go/parser/parser_test.go +++ b/src/pkg/go/parser/parser_test.go @@ -46,6 +46,7 @@ var validPrograms = []interface{}{ `package main; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`, `package main; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`, `package main; var a = T{{1, 2}, {3, 4}}`, + `package main; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`, } diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go index 1ee0846f6..7933c2f18 100644 --- a/src/pkg/go/printer/nodes.go +++ b/src/pkg/go/printer/nodes.go @@ -228,7 +228,7 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp useFF = false } else { const r = 4 // threshold - ratio := float(size) / float(prevSize) + ratio := float64(size) / float64(prevSize) useFF = ratio <= 1/r || r <= ratio } } @@ -506,12 +506,12 @@ const ( ) -func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { +func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { switch e.Op.Precedence() { + case 4: + has4 = true case 5: has5 = true - case 6: - has6 = true } switch l := e.X.(type) { @@ -521,9 +521,9 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { // pretend this is an *ast.ParenExpr and do nothing. break } - h5, h6, mp := walkBinary(l) + h4, h5, mp := walkBinary(l) + has4 = has4 || h4 has5 = has5 || h5 - has6 = has6 || h6 if maxProblem < mp { maxProblem = mp } @@ -536,25 +536,25 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { // pretend this is an *ast.ParenExpr and do nothing. break } - h5, h6, mp := walkBinary(r) + h4, h5, mp := walkBinary(r) + has4 = has4 || h4 has5 = has5 || h5 - has6 = has6 || h6 if maxProblem < mp { maxProblem = mp } case *ast.StarExpr: if e.Op.String() == "/" { - maxProblem = 6 + maxProblem = 5 } case *ast.UnaryExpr: switch e.Op.String() + r.Op.String() { case "/*", "&&", "&^": - maxProblem = 6 + maxProblem = 5 case "++", "--": - if maxProblem < 5 { - maxProblem = 5 + if maxProblem < 4 { + maxProblem = 4 } } } @@ -563,20 +563,20 @@ func walkBinary(e *ast.BinaryExpr) (has5, has6 bool, maxProblem int) { func cutoff(e *ast.BinaryExpr, depth int) int { - has5, has6, maxProblem := walkBinary(e) + has4, has5, maxProblem := walkBinary(e) if maxProblem > 0 { return maxProblem + 1 } - if has5 && has6 { + if has4 && has5 { if depth == 1 { - return 6 + return 5 } - return 5 + return 4 } if depth == 1 { - return 7 + return 6 } - return 5 + return 4 } @@ -603,15 +603,14 @@ func reduceDepth(depth int) int { // (Algorithm suggestion by Russ Cox.) // // The precedences are: -// 6 * / % << >> & &^ -// 5 + - | ^ -// 4 == != < <= > >= -// 3 <- +// 5 * / % << >> & &^ +// 4 + - | ^ +// 3 == != < <= > >= // 2 && // 1 || // -// The only decision is whether there will be spaces around levels 5 and 6. -// There are never spaces at level 7 (unary), and always spaces at levels 4 and below. +// The only decision is whether there will be spaces around levels 4 and 5. +// There are never spaces at level 6 (unary), and always spaces at levels 3 and below. // // To choose the cutoff, look at the whole expression but excluding primary // expressions (function calls, parenthesized exprs), and apply these rules: @@ -619,21 +618,21 @@ func reduceDepth(depth int) int { // 1) If there is a binary operator with a right side unary operand // that would clash without a space, the cutoff must be (in order): // -// /* 7 -// && 7 -// &^ 7 -// ++ 6 -// -- 6 +// /* 6 +// && 6 +// &^ 6 +// ++ 5 +// -- 5 // // (Comparison operators always have spaces around them.) // -// 2) If there is a mix of level 6 and level 5 operators, then the cutoff -// is 6 (use spaces to distinguish precedence) in Normal mode -// and 5 (never use spaces) in Compact mode. +// 2) If there is a mix of level 5 and level 4 operators, then the cutoff +// is 5 (use spaces to distinguish precedence) in Normal mode +// and 4 (never use spaces) in Compact mode. // -// 3) If there are no level 5 operators or no level 6 operators, then the -// cutoff is 7 (always use spaces) in Normal mode -// and 5 (never use spaces) in Compact mode. +// 3) If there are no level 4 operators or no level 5 operators, then the +// cutoff is 6 (always use spaces) in Normal mode +// and 4 (never use spaces) in Compact mode. // // Sets multiLine to true if the binary expression spans multiple lines. func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiLine *bool) { @@ -872,7 +871,10 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi } p.print(x.Lbrace, token.LBRACE) p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace) - p.print(x.Rbrace, token.RBRACE) + // do not insert extra line breaks because of comments before + // the closing '}' as it might break the code if there is no + // trailing ',' + p.print(noExtraLinebreak, x.Rbrace, token.RBRACE, noExtraLinebreak) case *ast.Ellipsis: p.print(token.ELLIPSIS) @@ -1080,6 +1082,12 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { const depth = 1 p.expr0(s.X, depth, multiLine) + case *ast.SendStmt: + const depth = 1 + p.expr0(s.Chan, depth, multiLine) + p.print(blank, s.Arrow, token.ARROW, blank) + p.expr0(s.Value, depth, multiLine) + case *ast.IncDecStmt: const depth = 1 p.expr0(s.X, depth+1, multiLine) @@ -1176,13 +1184,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { *multiLine = true case *ast.CommClause: - if s.Rhs != nil { + if s.Comm != nil { p.print(token.CASE, blank) - if s.Lhs != nil { - p.expr(s.Lhs, multiLine) - p.print(blank, s.Tok, blank) - } - p.expr(s.Rhs, multiLine) + p.stmt(s.Comm, false, ignoreMultiLine) } else { p.print(token.DEFAULT) } @@ -1388,7 +1392,7 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi if isLit { sep = blank } - p.print(sep, b.Pos(), token.LBRACE) + p.print(sep, b.Lbrace, token.LBRACE) if len(b.List) > 0 { p.print(blank) for i, s := range b.List { diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go index a4ddad50e..34b0c4e2d 100644 --- a/src/pkg/go/printer/printer.go +++ b/src/pkg/go/printer/printer.go @@ -58,6 +58,15 @@ var infinity = 1 << 30 var ignoreMultiLine = new(bool) +// A pmode value represents the current printer mode. +type pmode int + +const ( + inLiteral pmode = 1 << iota + noExtraLinebreak +) + + type printer struct { // Configuration (does not change after initialization) output io.Writer @@ -69,7 +78,7 @@ type printer struct { nesting int // nesting level (0: top-level (package scope), >0: functions/decls.) written int // number of bytes written indent int // current indentation - escape bool // true if in escape sequence + mode pmode // current printer mode lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace) // Buffered whitespace @@ -162,7 +171,7 @@ func (p *printer) write(data []byte) { p.pos.Line++ p.pos.Column = 1 - if !p.escape { + if p.mode&inLiteral == 0 { // write indentation // use "hard" htabs - indentation columns // must not be discarded by the tabwriter @@ -211,7 +220,7 @@ func (p *printer) write(data []byte) { } case tabwriter.Escape: - p.escape = !p.escape + p.mode ^= inLiteral // ignore escape chars introduced by printer - they are // invisible and must not affect p.pos (was issue #1089) @@ -272,7 +281,7 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { // (used when printing merged ASTs of different files // e.g., the result of ast.MergePackageFiles) p.indent = 0 - p.escape = false + p.mode = 0 p.buffer = p.buffer[0:0] fileChanged = true } @@ -683,9 +692,13 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (dro // follows on the same line: separate with an extra blank p.write([]byte{' '}) } - // ensure that there is a newline after a //-style comment - // or if we are before a closing '}' or at the end of a file - return p.writeCommentSuffix(last.Text[1] == '/' || tok == token.RBRACE || tok == token.EOF) + // ensure that there is a line break after a //-style comment, + // before a closing '}' unless explicitly disabled, or at eof + needsLinebreak := + last.Text[1] == '/' || + tok == token.RBRACE && p.mode&noExtraLinebreak == 0 || + tok == token.EOF + return p.writeCommentSuffix(needsLinebreak) } // no comment was written - we should never reach here since @@ -787,6 +800,9 @@ func (p *printer) print(args ...interface{}) { var tok token.Token switch x := f.(type) { + case pmode: + // toggle printer mode + p.mode ^= x case whiteSpace: if x == ignore { // don't add ignore's to the buffer; they @@ -818,10 +834,14 @@ func (p *printer) print(args ...interface{}) { data = x.Value } // escape all literals so they pass through unchanged - // (note that valid Go programs cannot contain esc ('\xff') - // bytes since they do not appear in legal UTF-8 sequences) - // TODO(gri): do this more efficiently. - data = []byte("\xff" + string(data) + "\xff") + // (note that valid Go programs cannot contain + // tabwriter.Escape bytes since they do not appear in + // legal UTF-8 sequences) + escData := make([]byte, 0, len(data)+2) + escData = append(escData, tabwriter.Escape) + escData = append(escData, data...) + escData = append(escData, tabwriter.Escape) + data = escData tok = x.Kind case token.Token: s := x.String() diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden index 200ea332f..a86d66174 100644 --- a/src/pkg/go/printer/testdata/comments.golden +++ b/src/pkg/go/printer/testdata/comments.golden @@ -422,7 +422,7 @@ func _() { func ( /* comment1 */ T /* comment2 */ ) _() {} -func _() { /* one-liner */ +func _() { /* one-line functions with comments are formatted as multi-line functions */ } func _() { @@ -430,6 +430,10 @@ func _() { /* closing curly brace should be on new line */ } +func _() { + _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */ } +} + // Comments immediately adjacent to punctuation (for which the go/printer // may obly have estimated position information) must remain after the punctuation. diff --git a/src/pkg/go/printer/testdata/comments.input b/src/pkg/go/printer/testdata/comments.input index 4a9ea4742..14cd4cf7a 100644 --- a/src/pkg/go/printer/testdata/comments.input +++ b/src/pkg/go/printer/testdata/comments.input @@ -422,12 +422,16 @@ func _() { func (/* comment1 */ T /* comment2 */) _() {} -func _() { /* one-liner */ } +func _() { /* one-line functions with comments are formatted as multi-line functions */ } func _() { _ = 0 /* closing curly brace should be on new line */ } +func _() { + _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */} +} + // Comments immediately adjacent to punctuation (for which the go/printer // may obly have estimated position information) must remain after the punctuation. diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go index 6ce846cd8..8c3205230 100644 --- a/src/pkg/go/scanner/scanner.go +++ b/src/pkg/go/scanner/scanner.go @@ -96,24 +96,28 @@ const ( InsertSemis // automatically insert semicolons ) -// TODO(gri) Would it be better to simply provide *token.File to Init -// instead of fset, and filename, and then return the file? -// It could cause an error/panic if the provided file.Size() -// doesn't match len(src). - -// Init prepares the scanner S to tokenize the text src. It sets the -// scanner at the beginning of the source text, adds a new file with -// the given filename to the file set fset, and returns that file. +// Init prepares the scanner S to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. // // Calls to Scan will use the error handler err if they encounter a // syntax error and err is not nil. Also, for each error encountered, // the Scanner field ErrorCount is incremented by one. The mode parameter // determines how comments, illegal characters, and semicolons are handled. // -func (S *Scanner) Init(fset *token.FileSet, filename string, src []byte, err ErrorHandler, mode uint) *token.File { +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode uint) { // Explicitly initialize all fields since a scanner may be reused. - S.file = fset.AddFile(filename, fset.Base(), len(src)) - S.dir, _ = path.Split(filename) + if file.Size() != len(src) { + panic("file size does not match src len") + } + S.file = file + S.dir, _ = path.Split(file.Name()) S.src = src S.err = err S.mode = mode @@ -126,8 +130,6 @@ func (S *Scanner) Init(fset *token.FileSet, filename string, src []byte, err Err S.ErrorCount = 0 S.next() - - return S.file } diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go index b1004f89d..c622ff482 100644 --- a/src/pkg/go/scanner/scanner_test.go +++ b/src/pkg/go/scanner/scanner_test.go @@ -223,12 +223,12 @@ func TestScan(t *testing.T) { for _, e := range tokens { src += e.lit + whitespace } - src_linecount := newlineCount(src) + 1 + src_linecount := newlineCount(src) whitespace_linecount := newlineCount(whitespace) // verify scan var s Scanner - s.Init(fset, "", []byte(src), &testErrorHandler{t}, ScanComments) + s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, ScanComments) index := 0 epos := token.Position{"", 0, 1, 1} // expected position for { @@ -241,7 +241,7 @@ func TestScan(t *testing.T) { if tok == token.EOF { lit = "<EOF>" epos.Line = src_linecount - epos.Column = 1 + epos.Column = 2 } checkPos(t, lit, pos, epos) if tok != e.tok { @@ -273,7 +273,8 @@ func TestScan(t *testing.T) { func checkSemi(t *testing.T, line string, mode uint) { var S Scanner - file := S.Init(fset, "TestSemis", []byte(line), nil, mode) + file := fset.AddFile("TestSemis", fset.Base(), len(line)) + S.Init(file, []byte(line), nil, mode) pos, tok, lit := S.Scan() for tok != token.EOF { if tok == token.ILLEGAL { @@ -476,7 +477,8 @@ func TestLineComments(t *testing.T) { // verify scan var S Scanner - file := S.Init(fset, "dir/TestLineComments", []byte(src), nil, 0) + file := fset.AddFile("dir/TestLineComments", fset.Base(), len(src)) + S.Init(file, []byte(src), nil, 0) for _, s := range segments { p, _, lit := S.Scan() pos := file.Position(p) @@ -495,7 +497,8 @@ func TestInit(t *testing.T) { // 1st init src1 := "if true { }" - f1 := s.Init(fset, "", []byte(src1), nil, 0) + f1 := fset.AddFile("src1", fset.Base(), len(src1)) + s.Init(f1, []byte(src1), nil, 0) if f1.Size() != len(src1) { t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1)) } @@ -508,7 +511,8 @@ func TestInit(t *testing.T) { // 2nd init src2 := "go true { ]" - f2 := s.Init(fset, "", []byte(src2), nil, 0) + f2 := fset.AddFile("src2", fset.Base(), len(src2)) + s.Init(f2, []byte(src2), nil, 0) if f2.Size() != len(src2) { t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2)) } @@ -527,7 +531,8 @@ func TestIllegalChars(t *testing.T) { var s Scanner const src = "*?*$*@*" - file := s.Init(fset, "", []byte(src), &testErrorHandler{t}, AllowIllegalChars) + file := fset.AddFile("", fset.Base(), len(src)) + s.Init(file, []byte(src), &testErrorHandler{t}, AllowIllegalChars) for offs, ch := range src { pos, tok, lit := s.Scan() if poffs := file.Offset(pos); poffs != offs { @@ -556,7 +561,7 @@ func TestStdErrorHander(t *testing.T) { v := new(ErrorVector) var s Scanner - s.Init(fset, "File1", []byte(src), v, 0) + s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), v, 0) for { if _, tok, _ := s.Scan(); tok == token.EOF { break @@ -604,7 +609,7 @@ func (h *errorCollector) Error(pos token.Position, msg string) { func checkError(t *testing.T, src string, tok token.Token, pos int, err string) { var s Scanner var h errorCollector - s.Init(fset, "", []byte(src), &h, ScanComments) + s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &h, ScanComments) _, tok0, _ := s.Scan() _, tok1, _ := s.Scan() if tok0 != tok { diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go index 0044a0ed7..809e53f0a 100644 --- a/src/pkg/go/token/position.go +++ b/src/pkg/go/token/position.go @@ -153,7 +153,7 @@ type lineInfo struct { // AddLineInfo adds alternative file and line number information for // a given file offset. The offset must be larger than the offset for -// the previously added alternative line info and not larger than the +// the previously added alternative line info and smaller than the // file size; otherwise the information is ignored. // // AddLineInfo is typically used to register alternative position @@ -161,7 +161,7 @@ type lineInfo struct { // func (f *File) AddLineInfo(offset int, filename string, line int) { f.set.mutex.Lock() - if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset <= f.size { + if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset < f.size { f.infos = append(f.infos, lineInfo{offset, filename, line}) } f.set.mutex.Unlock() @@ -212,27 +212,30 @@ func (f *File) LineCount() int { // AddLine adds the line offset for a new line. // The line offset must be larger than the offset for the previous line -// and not larger than the file size; otherwise the line offset is ignored. +// and smaller than the file size; otherwise the line offset is ignored. // func (f *File) AddLine(offset int) { f.set.mutex.Lock() - if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset <= f.size { + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { f.lines = append(f.lines, offset) } f.set.mutex.Unlock() } -// SetLines sets all line offsets for a file and returns true if successful. +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. // Each line offset must be larger than the offset for the previous line -// and not larger than the file size; otherwise the SetLines fails and returns +// and smaller than the file size; otherwise SetLines fails and returns // false. // func (f *File) SetLines(lines []int) bool { // verify validity of lines table size := f.size for i, offset := range lines { - if i > 0 && offset <= lines[i-1] || size < offset { + if i > 0 && offset <= lines[i-1] || size <= offset { return false } } @@ -245,6 +248,27 @@ func (f *File) SetLines(lines []int) bool { } +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + + // Pos returns the Pos value for the given file offset; // the offset must be <= f.Size(). // f.Pos(f.Offset(p)) == p. diff --git a/src/pkg/go/token/position_test.go b/src/pkg/go/token/position_test.go index 1cffcc3c2..979c9b1e8 100644 --- a/src/pkg/go/token/position_test.go +++ b/src/pkg/go/token/position_test.go @@ -39,14 +39,18 @@ func TestNoPos(t *testing.T) { var tests = []struct { filename string + source []byte // may be nil size int lines []int }{ - {"a", 0, []int{}}, - {"b", 5, []int{0}}, - {"c", 10, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, - {"d", 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}}, - {"e", 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}}, + {"a", []byte{}, 0, []int{}}, + {"b", []byte("01234"), 5, []int{0}}, + {"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}}, + {"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}}, + {"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}}, + {"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}}, + {"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}}, + {"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}}, } @@ -77,10 +81,26 @@ func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) { } +func makeTestSource(size int, lines []int) []byte { + src := make([]byte, size) + for _, offs := range lines { + if offs > 0 { + src[offs-1] = '\n' + } + } + return src +} + + func TestPositions(t *testing.T) { const delta = 7 // a non-zero base offset increment fset := NewFileSet() for _, test := range tests { + // verify consistency of test case + if test.source != nil && len(test.source) != test.size { + t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source)) + } + // add file and verify name and size f := fset.AddFile(test.filename, fset.Base()+delta, test.size) if f.Name() != test.filename { @@ -107,15 +127,26 @@ func TestPositions(t *testing.T) { verifyPositions(t, fset, f, test.lines[0:i+1]) } - // add lines at once and verify all positions - ok := f.SetLines(test.lines) - if !ok { + // add lines with SetLines and verify all positions + if ok := f.SetLines(test.lines); !ok { t.Errorf("%s: SetLines failed", f.Name()) } if f.LineCount() != len(test.lines) { t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) } verifyPositions(t, fset, f, test.lines) + + // add lines with SetLinesForContent and verify all positions + src := test.source + if src == nil { + // no test source available - create one from scratch + src = makeTestSource(test.size, test.lines) + } + f.SetLinesForContent(src) + if f.LineCount() != len(test.lines) { + t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount()) + } + verifyPositions(t, fset, f, test.lines) } } diff --git a/src/pkg/go/token/token.go b/src/pkg/go/token/token.go index 1bd81c1b1..2a2d3ecc4 100644 --- a/src/pkg/go/token/token.go +++ b/src/pkg/go/token/token.go @@ -252,8 +252,8 @@ func (tok Token) String() string { // const ( LowestPrec = 0 // non-operators - UnaryPrec = 7 - HighestPrec = 8 + UnaryPrec = 6 + HighestPrec = 7 ) @@ -267,14 +267,12 @@ func (op Token) Precedence() int { return 1 case LAND: return 2 - case ARROW: - return 3 case EQL, NEQ, LSS, LEQ, GTR, GEQ: - return 4 + return 3 case ADD, SUB, OR, XOR: - return 5 + return 4 case MUL, QUO, REM, SHL, SHR, AND, AND_NOT: - return 6 + return 5 } return LowestPrec } diff --git a/src/pkg/go/typechecker/typechecker_test.go b/src/pkg/go/typechecker/typechecker_test.go index 9c5b52e41..33f4a6223 100644 --- a/src/pkg/go/typechecker/typechecker_test.go +++ b/src/pkg/go/typechecker/typechecker_test.go @@ -68,7 +68,8 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) { } var s scanner.Scanner - s.Init(fset, filename, src, nil, scanner.ScanComments) + file := fset.AddFile(filename, fset.Base(), len(src)) + s.Init(file, src, nil, scanner.ScanComments) var prev token.Pos // position of last non-comment token loop: for { |