summaryrefslogtreecommitdiff
path: root/src/pkg/go
diff options
context:
space:
mode:
authorOndřej Surý <ondrej@sury.org>2012-04-06 15:14:11 +0200
committerOndřej Surý <ondrej@sury.org>2012-04-06 15:14:11 +0200
commit505c19580e0f43fe5224431459cacb7c21edd93d (patch)
tree79e2634c253d60afc0cc0b2f510dc7dcbb48497b /src/pkg/go
parent1336a7c91e596c423a49d1194ea42d98bca0d958 (diff)
downloadgolang-505c19580e0f43fe5224431459cacb7c21edd93d.tar.gz
Imported Upstream version 1upstream/1
Diffstat (limited to 'src/pkg/go')
-rw-r--r--src/pkg/go/ast/Makefile16
-rw-r--r--src/pkg/go/ast/ast.go180
-rw-r--r--src/pkg/go/ast/example_test.go136
-rw-r--r--src/pkg/go/ast/filter.go348
-rw-r--r--src/pkg/go/ast/import.go134
-rw-r--r--src/pkg/go/ast/print.go38
-rw-r--r--src/pkg/go/ast/print_test.go17
-rw-r--r--src/pkg/go/ast/resolve.go16
-rw-r--r--src/pkg/go/ast/scope.go10
-rw-r--r--src/pkg/go/build/Makefile22
-rw-r--r--src/pkg/go/build/build.go1254
-rw-r--r--src/pkg/go/build/build_test.go100
-rw-r--r--src/pkg/go/build/cgotest/cgotest.go19
-rw-r--r--src/pkg/go/build/deps_test.go424
-rw-r--r--src/pkg/go/build/dir.go172
-rw-r--r--src/pkg/go/build/doc.go109
-rw-r--r--src/pkg/go/build/path.go182
-rw-r--r--src/pkg/go/build/pkgtest/pkgtest.go9
-rw-r--r--src/pkg/go/build/pkgtest/sqrt_386.s10
-rw-r--r--src/pkg/go/build/pkgtest/sqrt_amd64.s9
-rw-r--r--src/pkg/go/build/pkgtest/sqrt_arm.s10
-rw-r--r--src/pkg/go/build/syslist.go (renamed from src/pkg/go/build/cmdtest/main.go)10
-rw-r--r--src/pkg/go/build/syslist_test.go4
-rw-r--r--src/pkg/go/build/testdata/other/file/file.go5
-rw-r--r--src/pkg/go/build/testdata/other/main.go11
-rw-r--r--src/pkg/go/doc/Makefile11
-rw-r--r--src/pkg/go/doc/comment.go376
-rw-r--r--src/pkg/go/doc/comment_test.go109
-rw-r--r--src/pkg/go/doc/doc.go668
-rw-r--r--src/pkg/go/doc/doc_test.go136
-rw-r--r--src/pkg/go/doc/example.go117
-rw-r--r--src/pkg/go/doc/exports.go199
-rw-r--r--src/pkg/go/doc/filter.go105
-rw-r--r--src/pkg/go/doc/headscan.go113
-rw-r--r--src/pkg/go/doc/reader.go774
-rw-r--r--src/pkg/go/doc/synopsis.go52
-rw-r--r--src/pkg/go/doc/synopsis_test.go44
-rw-r--r--src/pkg/go/doc/testdata/a.0.golden13
-rw-r--r--src/pkg/go/doc/testdata/a.1.golden13
-rw-r--r--src/pkg/go/doc/testdata/a.2.golden13
-rw-r--r--src/pkg/go/doc/testdata/a0.go (renamed from src/pkg/go/build/cgotest/cgotest.h)7
-rw-r--r--src/pkg/go/doc/testdata/a1.go (renamed from src/pkg/go/build/cgotest/cgotest.c)11
-rw-r--r--src/pkg/go/doc/testdata/b.0.golden71
-rw-r--r--src/pkg/go/doc/testdata/b.1.golden83
-rw-r--r--src/pkg/go/doc/testdata/b.2.golden71
-rw-r--r--src/pkg/go/doc/testdata/b.go58
-rw-r--r--src/pkg/go/doc/testdata/benchmark.go293
-rw-r--r--src/pkg/go/doc/testdata/c.0.golden48
-rw-r--r--src/pkg/go/doc/testdata/c.1.golden48
-rw-r--r--src/pkg/go/doc/testdata/c.2.golden48
-rw-r--r--src/pkg/go/doc/testdata/c.go62
-rw-r--r--src/pkg/go/doc/testdata/d.0.golden104
-rw-r--r--src/pkg/go/doc/testdata/d.1.golden104
-rw-r--r--src/pkg/go/doc/testdata/d.2.golden104
-rw-r--r--src/pkg/go/doc/testdata/d1.go57
-rw-r--r--src/pkg/go/doc/testdata/d2.go45
-rw-r--r--src/pkg/go/doc/testdata/e.0.golden109
-rw-r--r--src/pkg/go/doc/testdata/e.1.golden144
-rw-r--r--src/pkg/go/doc/testdata/e.2.golden130
-rw-r--r--src/pkg/go/doc/testdata/e.go147
-rw-r--r--src/pkg/go/doc/testdata/error1.0.golden30
-rw-r--r--src/pkg/go/doc/testdata/error1.1.golden32
-rw-r--r--src/pkg/go/doc/testdata/error1.2.golden30
-rw-r--r--src/pkg/go/doc/testdata/error1.go24
-rw-r--r--src/pkg/go/doc/testdata/error2.0.golden27
-rw-r--r--src/pkg/go/doc/testdata/error2.1.golden37
-rw-r--r--src/pkg/go/doc/testdata/error2.2.golden27
-rw-r--r--src/pkg/go/doc/testdata/error2.go29
-rw-r--r--src/pkg/go/doc/testdata/example.go81
-rw-r--r--src/pkg/go/doc/testdata/f.0.golden13
-rw-r--r--src/pkg/go/doc/testdata/f.1.golden16
-rw-r--r--src/pkg/go/doc/testdata/f.2.golden13
-rw-r--r--src/pkg/go/doc/testdata/f.go14
-rw-r--r--src/pkg/go/doc/testdata/template.txt65
-rw-r--r--src/pkg/go/doc/testdata/testing.0.golden156
-rw-r--r--src/pkg/go/doc/testdata/testing.1.golden298
-rw-r--r--src/pkg/go/doc/testdata/testing.2.golden156
-rw-r--r--src/pkg/go/doc/testdata/testing.go404
-rw-r--r--src/pkg/go/parser/Makefile12
-rw-r--r--src/pkg/go/parser/error_test.go166
-rw-r--r--src/pkg/go/parser/example_test.go34
-rw-r--r--src/pkg/go/parser/interface.go183
-rw-r--r--src/pkg/go/parser/parser.go628
-rw-r--r--src/pkg/go/parser/parser_test.go193
-rw-r--r--src/pkg/go/parser/short_test.go75
-rw-r--r--src/pkg/go/parser/testdata/commas.src19
-rw-r--r--src/pkg/go/parser/testdata/issue3106.src46
-rw-r--r--src/pkg/go/printer/Makefile12
-rw-r--r--src/pkg/go/printer/example_test.go67
-rw-r--r--src/pkg/go/printer/nodes.go600
-rw-r--r--src/pkg/go/printer/performance_test.go4
-rw-r--r--src/pkg/go/printer/printer.go926
-rw-r--r--src/pkg/go/printer/printer_test.go242
-rw-r--r--src/pkg/go/printer/testdata/comments.golden174
-rw-r--r--src/pkg/go/printer/testdata/comments.input163
-rw-r--r--src/pkg/go/printer/testdata/declarations.golden167
-rw-r--r--src/pkg/go/printer/testdata/declarations.input118
-rw-r--r--src/pkg/go/printer/testdata/expressions.golden28
-rw-r--r--src/pkg/go/printer/testdata/expressions.input22
-rw-r--r--src/pkg/go/printer/testdata/expressions.raw28
-rw-r--r--src/pkg/go/printer/testdata/linebreaks.golden52
-rw-r--r--src/pkg/go/printer/testdata/linebreaks.input48
-rw-r--r--src/pkg/go/printer/testdata/parser.go8
-rw-r--r--src/pkg/go/printer/testdata/statements.golden121
-rw-r--r--src/pkg/go/printer/testdata/statements.input119
-rw-r--r--src/pkg/go/scanner/Makefile12
-rw-r--r--src/pkg/go/scanner/errors.go142
-rw-r--r--src/pkg/go/scanner/example_test.go46
-rw-r--r--src/pkg/go/scanner/scanner.go578
-rw-r--r--src/pkg/go/scanner/scanner_test.go155
-rw-r--r--src/pkg/go/token/Makefile12
-rw-r--r--src/pkg/go/token/position.go277
-rw-r--r--src/pkg/go/token/position_test.go9
-rw-r--r--src/pkg/go/token/serialize.go56
-rw-r--r--src/pkg/go/token/serialize_test.go111
-rw-r--r--src/pkg/go/token/token.go12
-rw-r--r--src/pkg/go/typechecker/Makefile14
-rw-r--r--src/pkg/go/typechecker/scope.go69
-rw-r--r--src/pkg/go/typechecker/testdata/test0.src94
-rw-r--r--src/pkg/go/typechecker/testdata/test1.src13
-rw-r--r--src/pkg/go/typechecker/testdata/test3.src41
-rw-r--r--src/pkg/go/typechecker/testdata/test4.src11
-rw-r--r--src/pkg/go/typechecker/type.go118
-rw-r--r--src/pkg/go/typechecker/typechecker.go468
-rw-r--r--src/pkg/go/typechecker/typechecker_test.go163
-rw-r--r--src/pkg/go/typechecker/universe.go36
-rw-r--r--src/pkg/go/types/Makefile16
-rw-r--r--src/pkg/go/types/check.go226
-rw-r--r--src/pkg/go/types/check_test.go215
-rw-r--r--src/pkg/go/types/const.go332
-rw-r--r--src/pkg/go/types/exportdata.go132
-rw-r--r--src/pkg/go/types/gcimporter.go799
-rw-r--r--src/pkg/go/types/gcimporter_test.go101
-rw-r--r--src/pkg/go/types/testdata/exports.go84
-rw-r--r--src/pkg/go/types/testdata/test0.src154
-rw-r--r--src/pkg/go/types/types.go255
-rw-r--r--src/pkg/go/types/universe.go108
137 files changed, 11201 insertions, 7097 deletions
diff --git a/src/pkg/go/ast/Makefile b/src/pkg/go/ast/Makefile
deleted file mode 100644
index 40be10208..000000000
--- a/src/pkg/go/ast/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/ast
-GOFILES=\
- ast.go\
- filter.go\
- print.go\
- resolve.go\
- scope.go\
- walk.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go
index 22bd5ee22..7123fe58f 100644
--- a/src/pkg/go/ast/ast.go
+++ b/src/pkg/go/ast/ast.go
@@ -9,8 +9,9 @@ package ast
import (
"go/token"
+ "strings"
"unicode"
- "utf8"
+ "unicode/utf8"
)
// ----------------------------------------------------------------------------
@@ -76,6 +77,74 @@ type CommentGroup struct {
func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() }
func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
+func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
+
+func stripTrailingWhitespace(s string) string {
+ i := len(s)
+ for i > 0 && isWhitespace(s[i-1]) {
+ i--
+ }
+ return s[0:i]
+}
+
+// Text returns the text of the comment,
+// with the comment markers - //, /*, and */ - removed.
+func (g *CommentGroup) Text() string {
+ if g == nil {
+ return ""
+ }
+ comments := make([]string, len(g.List))
+ for i, c := range g.List {
+ comments[i] = string(c.Text)
+ }
+
+ lines := make([]string, 0, 10) // most comments are less than 10 lines
+ for _, c := range comments {
+ // Remove comment markers.
+ // The parser has given us exactly the comment text.
+ switch c[1] {
+ case '/':
+ //-style comment
+ c = c[2:]
+ // Remove leading space after //, if there is one.
+ // TODO(gri) This appears to be necessary in isolated
+ // cases (bignum.RatFromString) - why?
+ if len(c) > 0 && c[0] == ' ' {
+ c = c[1:]
+ }
+ case '*':
+ /*-style comment */
+ c = c[2 : len(c)-2]
+ }
+
+ // Split on newlines.
+ cl := strings.Split(c, "\n")
+
+ // Walk lines, stripping trailing white space and adding to list.
+ for _, l := range cl {
+ lines = append(lines, stripTrailingWhitespace(l))
+ }
+ }
+
+ // Remove leading blank lines; convert runs of
+ // interior blank lines to a single blank line.
+ n := 0
+ for _, line := range lines {
+ if line != "" || n > 0 && lines[n-1] != "" {
+ lines[n] = line
+ n++
+ }
+ }
+ lines = lines[0:n]
+
+ // Add final "" entry to get trailing newline from Join.
+ if n > 0 && lines[n-1] != "" {
+ lines = append(lines, "")
+ }
+
+ return strings.Join(lines, "\n")
+}
+
// ----------------------------------------------------------------------------
// Expressions and types
@@ -412,29 +481,29 @@ func (x *ChanType) End() token.Pos { return x.Value.End() }
// exprNode() ensures that only expression/type nodes can be
// assigned to an ExprNode.
//
-func (x *BadExpr) exprNode() {}
-func (x *Ident) exprNode() {}
-func (x *Ellipsis) exprNode() {}
-func (x *BasicLit) exprNode() {}
-func (x *FuncLit) exprNode() {}
-func (x *CompositeLit) exprNode() {}
-func (x *ParenExpr) exprNode() {}
-func (x *SelectorExpr) exprNode() {}
-func (x *IndexExpr) exprNode() {}
-func (x *SliceExpr) exprNode() {}
-func (x *TypeAssertExpr) exprNode() {}
-func (x *CallExpr) exprNode() {}
-func (x *StarExpr) exprNode() {}
-func (x *UnaryExpr) exprNode() {}
-func (x *BinaryExpr) exprNode() {}
-func (x *KeyValueExpr) exprNode() {}
-
-func (x *ArrayType) exprNode() {}
-func (x *StructType) exprNode() {}
-func (x *FuncType) exprNode() {}
-func (x *InterfaceType) exprNode() {}
-func (x *MapType) exprNode() {}
-func (x *ChanType) exprNode() {}
+func (*BadExpr) exprNode() {}
+func (*Ident) exprNode() {}
+func (*Ellipsis) exprNode() {}
+func (*BasicLit) exprNode() {}
+func (*FuncLit) exprNode() {}
+func (*CompositeLit) exprNode() {}
+func (*ParenExpr) exprNode() {}
+func (*SelectorExpr) exprNode() {}
+func (*IndexExpr) exprNode() {}
+func (*SliceExpr) exprNode() {}
+func (*TypeAssertExpr) exprNode() {}
+func (*CallExpr) exprNode() {}
+func (*StarExpr) exprNode() {}
+func (*UnaryExpr) exprNode() {}
+func (*BinaryExpr) exprNode() {}
+func (*KeyValueExpr) exprNode() {}
+
+func (*ArrayType) exprNode() {}
+func (*StructType) exprNode() {}
+func (*FuncType) exprNode() {}
+func (*InterfaceType) exprNode() {}
+func (*MapType) exprNode() {}
+func (*ChanType) exprNode() {}
// ----------------------------------------------------------------------------
// Convenience functions for Idents
@@ -711,27 +780,27 @@ func (s *RangeStmt) End() token.Pos { return s.Body.End() }
// stmtNode() ensures that only statement nodes can be
// assigned to a StmtNode.
//
-func (s *BadStmt) stmtNode() {}
-func (s *DeclStmt) stmtNode() {}
-func (s *EmptyStmt) stmtNode() {}
-func (s *LabeledStmt) stmtNode() {}
-func (s *ExprStmt) stmtNode() {}
-func (s *SendStmt) stmtNode() {}
-func (s *IncDecStmt) stmtNode() {}
-func (s *AssignStmt) stmtNode() {}
-func (s *GoStmt) stmtNode() {}
-func (s *DeferStmt) stmtNode() {}
-func (s *ReturnStmt) stmtNode() {}
-func (s *BranchStmt) stmtNode() {}
-func (s *BlockStmt) stmtNode() {}
-func (s *IfStmt) stmtNode() {}
-func (s *CaseClause) stmtNode() {}
-func (s *SwitchStmt) stmtNode() {}
-func (s *TypeSwitchStmt) stmtNode() {}
-func (s *CommClause) stmtNode() {}
-func (s *SelectStmt) stmtNode() {}
-func (s *ForStmt) stmtNode() {}
-func (s *RangeStmt) stmtNode() {}
+func (*BadStmt) stmtNode() {}
+func (*DeclStmt) stmtNode() {}
+func (*EmptyStmt) stmtNode() {}
+func (*LabeledStmt) stmtNode() {}
+func (*ExprStmt) stmtNode() {}
+func (*SendStmt) stmtNode() {}
+func (*IncDecStmt) stmtNode() {}
+func (*AssignStmt) stmtNode() {}
+func (*GoStmt) stmtNode() {}
+func (*DeferStmt) stmtNode() {}
+func (*ReturnStmt) stmtNode() {}
+func (*BranchStmt) stmtNode() {}
+func (*BlockStmt) stmtNode() {}
+func (*IfStmt) stmtNode() {}
+func (*CaseClause) stmtNode() {}
+func (*SwitchStmt) stmtNode() {}
+func (*TypeSwitchStmt) stmtNode() {}
+func (*CommClause) stmtNode() {}
+func (*SelectStmt) stmtNode() {}
+func (*ForStmt) stmtNode() {}
+func (*RangeStmt) stmtNode() {}
// ----------------------------------------------------------------------------
// Declarations
@@ -752,6 +821,7 @@ type (
Name *Ident // local package name (including "."); or nil
Path *BasicLit // import path
Comment *CommentGroup // line comments; or nil
+ EndPos token.Pos // end of spec (overrides Path.Pos if nonzero)
}
// A ValueSpec node represents a constant or variable declaration
@@ -785,7 +855,13 @@ func (s *ImportSpec) Pos() token.Pos {
func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() }
-func (s *ImportSpec) End() token.Pos { return s.Path.End() }
+func (s *ImportSpec) End() token.Pos {
+ if s.EndPos != 0 {
+ return s.EndPos
+ }
+ return s.Path.End()
+}
+
func (s *ValueSpec) End() token.Pos {
if n := len(s.Values); n > 0 {
return s.Values[n-1].End()
@@ -800,9 +876,9 @@ func (s *TypeSpec) End() token.Pos { return s.Type.End() }
// specNode() ensures that only spec nodes can be
// assigned to a Spec.
//
-func (s *ImportSpec) specNode() {}
-func (s *ValueSpec) specNode() {}
-func (s *TypeSpec) specNode() {}
+func (*ImportSpec) specNode() {}
+func (*ValueSpec) specNode() {}
+func (*TypeSpec) specNode() {}
// A declaration is represented by one of the following declaration nodes.
//
@@ -868,9 +944,9 @@ func (d *FuncDecl) End() token.Pos {
// declNode() ensures that only declaration nodes can be
// assigned to a DeclNode.
//
-func (d *BadDecl) declNode() {}
-func (d *GenDecl) declNode() {}
-func (d *FuncDecl) declNode() {}
+func (*BadDecl) declNode() {}
+func (*GenDecl) declNode() {}
+func (*FuncDecl) declNode() {}
// ----------------------------------------------------------------------------
// Files and packages
diff --git a/src/pkg/go/ast/example_test.go b/src/pkg/go/ast/example_test.go
new file mode 100644
index 000000000..632bfcfd0
--- /dev/null
+++ b/src/pkg/go/ast/example_test.go
@@ -0,0 +1,136 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast_test
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+)
+
+// This example demonstrates how to inspect the AST of a Go program.
+func ExampleInspect() {
+ // src is the input for which we want to inspect the AST.
+ src := `
+package p
+const c = 1.0
+var X = f(3.14)*2 + c
+`
+
+ // Create the AST by parsing src.
+ fset := token.NewFileSet() // positions are relative to fset
+ f, err := parser.ParseFile(fset, "src.go", src, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // Inspect the AST and print all identifiers and literals.
+ ast.Inspect(f, func(n ast.Node) bool {
+ var s string
+ switch x := n.(type) {
+ case *ast.BasicLit:
+ s = x.Value
+ case *ast.Ident:
+ s = x.Name
+ }
+ if s != "" {
+ fmt.Printf("%s:\t%s\n", fset.Position(n.Pos()), s)
+ }
+ return true
+ })
+
+ // output:
+ // src.go:2:9: p
+ // src.go:3:7: c
+ // src.go:3:11: 1.0
+ // src.go:4:5: X
+ // src.go:4:9: f
+ // src.go:4:11: 3.14
+ // src.go:4:17: 2
+ // src.go:4:21: c
+}
+
+// This example shows what an AST looks like when printed for debugging.
+func ExamplePrint() {
+ // src is the input for which we want to print the AST.
+ src := `
+package main
+func main() {
+ println("Hello, World!")
+}
+`
+
+ // Create the AST by parsing src.
+ fset := token.NewFileSet() // positions are relative to fset
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // Print the AST.
+ ast.Print(fset, f)
+
+ // output:
+ // 0 *ast.File {
+ // 1 . Package: 2:1
+ // 2 . Name: *ast.Ident {
+ // 3 . . NamePos: 2:9
+ // 4 . . Name: "main"
+ // 5 . }
+ // 6 . Decls: []ast.Decl (len = 1) {
+ // 7 . . 0: *ast.FuncDecl {
+ // 8 . . . Name: *ast.Ident {
+ // 9 . . . . NamePos: 3:6
+ // 10 . . . . Name: "main"
+ // 11 . . . . Obj: *ast.Object {
+ // 12 . . . . . Kind: func
+ // 13 . . . . . Name: "main"
+ // 14 . . . . . Decl: *(obj @ 7)
+ // 15 . . . . }
+ // 16 . . . }
+ // 17 . . . Type: *ast.FuncType {
+ // 18 . . . . Func: 3:1
+ // 19 . . . . Params: *ast.FieldList {
+ // 20 . . . . . Opening: 3:10
+ // 21 . . . . . Closing: 3:11
+ // 22 . . . . }
+ // 23 . . . }
+ // 24 . . . Body: *ast.BlockStmt {
+ // 25 . . . . Lbrace: 3:13
+ // 26 . . . . List: []ast.Stmt (len = 1) {
+ // 27 . . . . . 0: *ast.ExprStmt {
+ // 28 . . . . . . X: *ast.CallExpr {
+ // 29 . . . . . . . Fun: *ast.Ident {
+ // 30 . . . . . . . . NamePos: 4:2
+ // 31 . . . . . . . . Name: "println"
+ // 32 . . . . . . . }
+ // 33 . . . . . . . Lparen: 4:9
+ // 34 . . . . . . . Args: []ast.Expr (len = 1) {
+ // 35 . . . . . . . . 0: *ast.BasicLit {
+ // 36 . . . . . . . . . ValuePos: 4:10
+ // 37 . . . . . . . . . Kind: STRING
+ // 38 . . . . . . . . . Value: "\"Hello, World!\""
+ // 39 . . . . . . . . }
+ // 40 . . . . . . . }
+ // 41 . . . . . . . Ellipsis: -
+ // 42 . . . . . . . Rparen: 4:25
+ // 43 . . . . . . }
+ // 44 . . . . . }
+ // 45 . . . . }
+ // 46 . . . . Rbrace: 5:1
+ // 47 . . . }
+ // 48 . . }
+ // 49 . }
+ // 50 . Scope: *ast.Scope {
+ // 51 . . Objects: map[string]*ast.Object (len = 1) {
+ // 52 . . . "main": *(obj @ 11)
+ // 53 . . }
+ // 54 . }
+ // 55 . Unresolved: []*ast.Ident (len = 1) {
+ // 56 . . 0: *(obj @ 29)
+ // 57 . }
+ // 58 }
+}
diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go
index 26733430d..4a89b8909 100644
--- a/src/pkg/go/ast/filter.go
+++ b/src/pkg/go/ast/filter.go
@@ -4,15 +4,52 @@
package ast
-import "go/token"
+import (
+ "go/token"
+ "sort"
+)
// ----------------------------------------------------------------------------
// Export filtering
-func identListExports(list []*Ident) []*Ident {
+// exportFilter is a special filter function to extract exported nodes.
+func exportFilter(name string) bool {
+ return IsExported(name)
+}
+
+// FileExports trims the AST for a Go source file in place such that
+// only exported nodes remain: all top-level identifiers which are not exported
+// and their associated information (such as type, initial value, or function
+// body) are removed. Non-exported fields and methods of exported types are
+// stripped. The File.Comments list is not changed.
+//
+// FileExports returns true if there are exported declarations;
+// it returns false otherwise.
+//
+func FileExports(src *File) bool {
+ return filterFile(src, exportFilter, true)
+}
+
+// PackageExports trims the AST for a Go package in place such that
+// only exported nodes remain. The pkg.Files list is not changed, so that
+// file names and top-level package comments don't get lost.
+//
+// PackageExports returns true if there are exported declarations;
+// it returns false otherwise.
+//
+func PackageExports(pkg *Package) bool {
+ return filterPackage(pkg, exportFilter, true)
+}
+
+// ----------------------------------------------------------------------------
+// General filtering
+
+type Filter func(string) bool
+
+func filterIdentList(list []*Ident, f Filter) []*Ident {
j := 0
for _, x := range list {
- if x.IsExported() {
+ if f(x.Name) {
list[j] = x
j++
}
@@ -38,33 +75,30 @@ func fieldName(x Expr) *Ident {
return nil
}
-func fieldListExports(fields *FieldList) (removedFields bool) {
+func filterFieldList(fields *FieldList, filter Filter, export bool) (removedFields bool) {
if fields == nil {
- return
+ return false
}
list := fields.List
j := 0
for _, f := range list {
- exported := false
+ keepField := false
if len(f.Names) == 0 {
// anonymous field
- // (Note that a non-exported anonymous field
- // may still refer to a type with exported
- // fields, so this is not absolutely correct.
- // However, this cannot be done w/o complete
- // type information.)
name := fieldName(f.Type)
- exported = name != nil && name.IsExported()
+ keepField = name != nil && filter(name.Name)
} else {
n := len(f.Names)
- f.Names = identListExports(f.Names)
+ f.Names = filterIdentList(f.Names, filter)
if len(f.Names) < n {
removedFields = true
}
- exported = len(f.Names) > 0
+ keepField = len(f.Names) > 0
}
- if exported {
- typeExports(f.Type)
+ if keepField {
+ if export {
+ filterType(f.Type, filter, export)
+ }
list[j] = f
j++
}
@@ -76,194 +110,84 @@ func fieldListExports(fields *FieldList) (removedFields bool) {
return
}
-func paramListExports(fields *FieldList) {
+func filterParamList(fields *FieldList, filter Filter, export bool) bool {
if fields == nil {
- return
+ return false
}
+ var b bool
for _, f := range fields.List {
- typeExports(f.Type)
+ if filterType(f.Type, filter, export) {
+ b = true
+ }
}
+ return b
}
-func typeExports(typ Expr) {
+func filterType(typ Expr, f Filter, export bool) bool {
switch t := typ.(type) {
+ case *Ident:
+ return f(t.Name)
+ case *ParenExpr:
+ return filterType(t.X, f, export)
case *ArrayType:
- typeExports(t.Elt)
+ return filterType(t.Elt, f, export)
case *StructType:
- if fieldListExports(t.Fields) {
+ if filterFieldList(t.Fields, f, export) {
t.Incomplete = true
}
+ return len(t.Fields.List) > 0
case *FuncType:
- paramListExports(t.Params)
- paramListExports(t.Results)
+ b1 := filterParamList(t.Params, f, export)
+ b2 := filterParamList(t.Results, f, export)
+ return b1 || b2
case *InterfaceType:
- if fieldListExports(t.Methods) {
+ if filterFieldList(t.Methods, f, export) {
t.Incomplete = true
}
+ return len(t.Methods.List) > 0
case *MapType:
- typeExports(t.Key)
- typeExports(t.Value)
+ b1 := filterType(t.Key, f, export)
+ b2 := filterType(t.Value, f, export)
+ return b1 || b2
case *ChanType:
- typeExports(t.Value)
+ return filterType(t.Value, f, export)
}
+ return false
}
-func specExports(spec Spec) bool {
+func filterSpec(spec Spec, f Filter, export bool) bool {
switch s := spec.(type) {
case *ValueSpec:
- s.Names = identListExports(s.Names)
+ s.Names = filterIdentList(s.Names, f)
if len(s.Names) > 0 {
- typeExports(s.Type)
- return true
- }
- case *TypeSpec:
- if s.Name.IsExported() {
- typeExports(s.Type)
- return true
- }
- }
- return false
-}
-
-func specListExports(list []Spec) []Spec {
- j := 0
- for _, s := range list {
- if specExports(s) {
- list[j] = s
- j++
- }
- }
- return list[0:j]
-}
-
-func declExports(decl Decl) bool {
- switch d := decl.(type) {
- case *GenDecl:
- d.Specs = specListExports(d.Specs)
- return len(d.Specs) > 0
- case *FuncDecl:
- d.Body = nil // strip body
- return d.Name.IsExported()
- }
- return false
-}
-
-// FileExports trims the AST for a Go source file in place such that only
-// exported nodes remain: all top-level identifiers which are not exported
-// and their associated information (such as type, initial value, or function
-// body) are removed. Non-exported fields and methods of exported types are
-// stripped, and the function bodies of exported functions are set to nil.
-// The File.comments list is not changed.
-//
-// FileExports returns true if there is an exported declaration; it returns
-// false otherwise.
-//
-func FileExports(src *File) bool {
- j := 0
- for _, d := range src.Decls {
- if declExports(d) {
- src.Decls[j] = d
- j++
- }
- }
- src.Decls = src.Decls[0:j]
- return j > 0
-}
-
-// PackageExports trims the AST for a Go package in place such that only
-// exported nodes remain. The pkg.Files list is not changed, so that file
-// names and top-level package comments don't get lost.
-//
-// PackageExports returns true if there is an exported declaration; it
-// returns false otherwise.
-//
-func PackageExports(pkg *Package) bool {
- hasExports := false
- for _, f := range pkg.Files {
- if FileExports(f) {
- hasExports = true
- }
- }
- return hasExports
-}
-
-// ----------------------------------------------------------------------------
-// General filtering
-
-type Filter func(string) bool
-
-func filterIdentList(list []*Ident, f Filter) []*Ident {
- j := 0
- for _, x := range list {
- if f(x.Name) {
- list[j] = x
- j++
- }
- }
- return list[0:j]
-}
-
-func filterFieldList(fields *FieldList, filter Filter) (removedFields bool) {
- if fields == nil {
- return false
- }
- list := fields.List
- j := 0
- for _, f := range list {
- keepField := false
- if len(f.Names) == 0 {
- // anonymous field
- name := fieldName(f.Type)
- keepField = name != nil && filter(name.Name)
- } else {
- n := len(f.Names)
- f.Names = filterIdentList(f.Names, filter)
- if len(f.Names) < n {
- removedFields = true
+ if export {
+ filterType(s.Type, f, export)
}
- keepField = len(f.Names) > 0
- }
- if keepField {
- list[j] = f
- j++
+ return true
}
- }
- if j < len(list) {
- removedFields = true
- }
- fields.List = list[0:j]
- return
-}
-
-func filterSpec(spec Spec, f Filter) bool {
- switch s := spec.(type) {
- case *ValueSpec:
- s.Names = filterIdentList(s.Names, f)
- return len(s.Names) > 0
case *TypeSpec:
if f(s.Name.Name) {
+ if export {
+ filterType(s.Type, f, export)
+ }
return true
}
- switch t := s.Type.(type) {
- case *StructType:
- if filterFieldList(t.Fields, f) {
- t.Incomplete = true
- }
- return len(t.Fields.List) > 0
- case *InterfaceType:
- if filterFieldList(t.Methods, f) {
- t.Incomplete = true
- }
- return len(t.Methods.List) > 0
+ if !export {
+ // For general filtering (not just exports),
+ // filter type even if name is not filtered
+ // out.
+ // If the type contains filtered elements,
+ // keep the declaration.
+ return filterType(s.Type, f, export)
}
}
return false
}
-func filterSpecList(list []Spec, f Filter) []Spec {
+func filterSpecList(list []Spec, f Filter, export bool) []Spec {
j := 0
for _, s := range list {
- if filterSpec(s, f) {
+ if filterSpec(s, f, export) {
list[j] = s
j++
}
@@ -279,9 +203,13 @@ func filterSpecList(list []Spec, f Filter) []Spec {
// filtering; it returns false otherwise.
//
func FilterDecl(decl Decl, f Filter) bool {
+ return filterDecl(decl, f, false)
+}
+
+func filterDecl(decl Decl, f Filter, export bool) bool {
switch d := decl.(type) {
case *GenDecl:
- d.Specs = filterSpecList(d.Specs, f)
+ d.Specs = filterSpecList(d.Specs, f, export)
return len(d.Specs) > 0
case *FuncDecl:
return f(d.Name.Name)
@@ -293,16 +221,20 @@ func FilterDecl(decl Decl, f Filter) bool {
// names from top-level declarations (including struct field and
// interface method names, but not from parameter lists) that don't
// pass through the filter f. If the declaration is empty afterwards,
-// the declaration is removed from the AST.
-// The File.comments list is not changed.
+// the declaration is removed from the AST. The File.Comments list
+// is not changed.
//
// FilterFile returns true if there are any top-level declarations
// left after filtering; it returns false otherwise.
//
func FilterFile(src *File, f Filter) bool {
+ return filterFile(src, f, false)
+}
+
+func filterFile(src *File, f Filter, export bool) bool {
j := 0
for _, d := range src.Decls {
- if FilterDecl(d, f) {
+ if filterDecl(d, f, export) {
src.Decls[j] = d
j++
}
@@ -311,21 +243,25 @@ func FilterFile(src *File, f Filter) bool {
return j > 0
}
-// FilterPackage trims the AST for a Go package in place by removing all
-// names from top-level declarations (including struct field and
+// FilterPackage trims the AST for a Go package in place by removing
+// all names from top-level declarations (including struct field and
// interface method names, but not from parameter lists) that don't
// pass through the filter f. If the declaration is empty afterwards,
-// the declaration is removed from the AST.
-// The pkg.Files list is not changed, so that file names and top-level
-// package comments don't get lost.
+// the declaration is removed from the AST. The pkg.Files list is not
+// changed, so that file names and top-level package comments don't get
+// lost.
//
// FilterPackage returns true if there are any top-level declarations
// left after filtering; it returns false otherwise.
//
func FilterPackage(pkg *Package, f Filter) bool {
+ return filterPackage(pkg, f, false)
+}
+
+func filterPackage(pkg *Package, f Filter, export bool) bool {
hasDecls := false
for _, src := range pkg.Files {
- if FilterFile(src, f) {
+ if filterFile(src, f, export) {
hasDecls = true
}
}
@@ -344,6 +280,8 @@ const (
// If set, comments that are not associated with a specific
// AST node (as Doc or Comment) are excluded.
FilterUnassociatedComments
+ // If set, duplicate import declarations are excluded.
+ FilterImportDuplicates
)
// separator is an empty //-style comment that is interspersed between
@@ -356,29 +294,35 @@ var separator = &Comment{noPos, "//"}
//
func MergePackageFiles(pkg *Package, mode MergeMode) *File {
// Count the number of package docs, comments and declarations across
- // all package files.
+ // all package files. Also, compute sorted list of filenames, so that
+ // subsequent iterations can always iterate in the same order.
ndocs := 0
ncomments := 0
ndecls := 0
- for _, f := range pkg.Files {
+ filenames := make([]string, len(pkg.Files))
+ i := 0
+ for filename, f := range pkg.Files {
+ filenames[i] = filename
+ i++
if f.Doc != nil {
ndocs += len(f.Doc.List) + 1 // +1 for separator
}
ncomments += len(f.Comments)
ndecls += len(f.Decls)
}
+ sort.Strings(filenames)
// Collect package comments from all package files into a single
- // CommentGroup - the collected package documentation. The order
- // is unspecified. In general there should be only one file with
- // a package comment; but it's better to collect extra comments
- // than drop them on the floor.
+ // CommentGroup - the collected package documentation. In general
+ // there should be only one file with a package comment; but it's
+ // better to collect extra comments than drop them on the floor.
var doc *CommentGroup
var pos token.Pos
if ndocs > 0 {
list := make([]*Comment, ndocs-1) // -1: no separator before first group
i := 0
- for _, f := range pkg.Files {
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
if f.Doc != nil {
if i > 0 {
// not the first group - add separator
@@ -407,7 +351,8 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
funcs := make(map[string]int) // map of global function name -> decls index
i := 0 // current index
n := 0 // number of filtered entries
- for _, f := range pkg.Files {
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
for _, d := range f.Decls {
if mode&FilterFuncDuplicates != 0 {
// A language entity may be declared multiple
@@ -459,6 +404,32 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
}
}
+ // Collect import specs from all package files.
+ var imports []*ImportSpec
+ if mode&FilterImportDuplicates != 0 {
+ seen := make(map[string]bool)
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
+ for _, imp := range f.Imports {
+ if path := imp.Path.Value; !seen[path] {
+ // TODO: consider handling cases where:
+ // - 2 imports exist with the same import path but
+ // have different local names (one should probably
+ // keep both of them)
+ // - 2 imports exist but only one has a comment
+ // - 2 imports exist and they both have (possibly
+ // different) comments
+ imports = append(imports, imp)
+ seen[path] = true
+ }
+ }
+ }
+ } else {
+ for _, f := range pkg.Files {
+ imports = append(imports, f.Imports...)
+ }
+ }
+
// Collect comments from all package files.
var comments []*CommentGroup
if mode&FilterUnassociatedComments == 0 {
@@ -469,7 +440,6 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
}
}
- // TODO(gri) need to compute pkgScope and unresolved identifiers!
- // TODO(gri) need to compute imports!
- return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, nil, comments}
+ // TODO(gri) need to compute unresolved identifiers!
+ return &File{doc, pos, NewIdent(pkg.Name), decls, pkg.Scope, imports, nil, comments}
}
diff --git a/src/pkg/go/ast/import.go b/src/pkg/go/ast/import.go
new file mode 100644
index 000000000..2d4f69aae
--- /dev/null
+++ b/src/pkg/go/ast/import.go
@@ -0,0 +1,134 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "go/token"
+ "sort"
+ "strconv"
+)
+
+// SortImports sorts runs of consecutive import lines in import blocks in f.
+func SortImports(fset *token.FileSet, f *File) {
+ for _, d := range f.Decls {
+ d, ok := d.(*GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ break
+ }
+
+ if d.Lparen == token.NoPos {
+ // Not a block: sorted by default.
+ continue
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ for j, s := range d.Specs {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ // j begins a new run. End this one.
+ sortSpecs(fset, f, d.Specs[i:j])
+ i = j
+ }
+ }
+ sortSpecs(fset, f, d.Specs[i:])
+ }
+}
+
+func importPath(s Spec) string {
+ t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *File, specs []Spec) {
+ // Avoid work if already sorted (also catches < 2 entries).
+ sorted := true
+ for i, s := range specs {
+ if i > 0 && importPath(specs[i-1]) > importPath(s) {
+ sorted = false
+ break
+ }
+ }
+ if sorted {
+ return
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ pos[i] = posSpan{s.Pos(), s.End()}
+ }
+
+ // Identify comments in this range.
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
+ for i, g := range f.Comments {
+ if g.Pos() < pos[0].Start {
+ continue
+ }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
+ }
+ }
+ comments := f.Comments[cstart:cend]
+
+ // Assign each comment to the import spec preceding it.
+ importComment := map[*ImportSpec][]*CommentGroup{}
+ specIndex := 0
+ for _, g := range comments {
+ for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+ specIndex++
+ }
+ s := specs[specIndex].(*ImportSpec)
+ importComment[s] = append(importComment[s], g)
+ }
+
+ // Sort the import specs by import path.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportPath(specs))
+ for i, s := range specs {
+ s := s.(*ImportSpec)
+ if s.Name != nil {
+ s.Name.NamePos = pos[i].Start
+ }
+ s.Path.ValuePos = pos[i].Start
+ s.EndPos = pos[i].End
+ for _, g := range importComment[s] {
+ for _, c := range g.List {
+ c.Slash = pos[i].End
+ }
+ }
+ }
+ sort.Sort(byCommentPos(comments))
+}
+
+type byImportPath []Spec // slice of *ImportSpec
+
+func (x byImportPath) Len() int { return len(x) }
+func (x byImportPath) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportPath) Less(i, j int) bool { return importPath(x[i]) < importPath(x[j]) }
+
+type byCommentPos []*CommentGroup
+
+func (x byCommentPos) Len() int { return len(x) }
+func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
diff --git a/src/pkg/go/ast/print.go b/src/pkg/go/ast/print.go
index 62a30481d..02cf9e022 100644
--- a/src/pkg/go/ast/print.go
+++ b/src/pkg/go/ast/print.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file contains printing suppport for ASTs.
+// This file contains printing support for ASTs.
package ast
@@ -36,7 +36,7 @@ func NotNilFilter(_ string, v reflect.Value) bool {
// struct fields for which f(fieldname, fieldvalue) is true are
// are printed; all others are filtered from the output.
//
-func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err os.Error) {
+func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (err error) {
// setup printer
p := printer{
output: w,
@@ -48,7 +48,6 @@ func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n i
// install error handler
defer func() {
- n = p.written
if e := recover(); e != nil {
err = e.(localError).err // re-panics if it's not a localError
}
@@ -67,24 +66,23 @@ func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n i
// Print prints x to standard output, skipping nil fields.
// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
-func Print(fset *token.FileSet, x interface{}) (int, os.Error) {
+func Print(fset *token.FileSet, x interface{}) error {
return Fprint(os.Stdout, fset, x, NotNilFilter)
}
type printer struct {
- output io.Writer
- fset *token.FileSet
- filter FieldFilter
- ptrmap map[interface{}]int // *T -> line number
- written int // number of bytes written to output
- indent int // current indentation level
- last byte // the last byte processed by Write
- line int // current line number
+ output io.Writer
+ fset *token.FileSet
+ filter FieldFilter
+ ptrmap map[interface{}]int // *T -> line number
+ indent int // current indentation level
+ last byte // the last byte processed by Write
+ line int // current line number
}
var indent = []byte(". ")
-func (p *printer) Write(data []byte) (n int, err os.Error) {
+func (p *printer) Write(data []byte) (n int, err error) {
var m int
for i, b := range data {
// invariant: data[0:n] has been written
@@ -114,17 +112,15 @@ func (p *printer) Write(data []byte) (n int, err os.Error) {
return
}
-// localError wraps locally caught os.Errors so we can distinguish
+// localError wraps locally caught errors so we can distinguish
// them from genuine panics which we don't want to return as errors.
type localError struct {
- err os.Error
+ err error
}
// printf is a convenience wrapper that takes care of print errors.
func (p *printer) printf(format string, args ...interface{}) {
- n, err := fmt.Fprintf(p, format, args...)
- p.written += n
- if err != nil {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
panic(localError{err})
}
}
@@ -149,7 +145,7 @@ func (p *printer) print(x reflect.Value) {
p.print(x.Elem())
case reflect.Map:
- p.printf("%s (len = %d) {\n", x.Type().String(), x.Len())
+ p.printf("%s (len = %d) {\n", x.Type(), x.Len())
p.indent++
for _, key := range x.MapKeys() {
p.print(key)
@@ -178,7 +174,7 @@ func (p *printer) print(x reflect.Value) {
p.printf("%#q", s)
return
}
- p.printf("%s (len = %d) {\n", x.Type().String(), x.Len())
+ p.printf("%s (len = %d) {\n", x.Type(), x.Len())
p.indent++
for i, n := 0, x.Len(); i < n; i++ {
p.printf("%d: ", i)
@@ -189,7 +185,7 @@ func (p *printer) print(x reflect.Value) {
p.printf("}")
case reflect.Struct:
- p.printf("%s {\n", x.Type().String())
+ p.printf("%s {\n", x.Type())
p.indent++
t := x.Type()
for i, n := 0, t.NumField(); i < n; i++ {
diff --git a/src/pkg/go/ast/print_test.go b/src/pkg/go/ast/print_test.go
index f4e8f7a78..71c028e75 100644
--- a/src/pkg/go/ast/print_test.go
+++ b/src/pkg/go/ast/print_test.go
@@ -23,11 +23,10 @@ var tests = []struct {
{"foobar", "0 \"foobar\""},
// maps
- {map[string]int{"a": 1, "b": 2},
- `0 map[string] int (len = 2) {
+ {map[string]int{"a": 1},
+ `0 map[string]int (len = 1) {
1 . "a": 1
- 2 . "b": 2
- 3 }`},
+ 2 }`},
// pointers
{new(int), "0 *0"},
@@ -41,10 +40,10 @@ var tests = []struct {
4 }`},
// structs
- {struct{ x, y int }{42, 991},
- `0 struct { x int; y int } {
- 1 . x: 42
- 2 . y: 991
+ {struct{ X, Y int }{42, 991},
+ `0 struct { X int; Y int } {
+ 1 . X: 42
+ 2 . Y: 991
3 }`},
}
@@ -67,7 +66,7 @@ func TestPrint(t *testing.T) {
var buf bytes.Buffer
for _, test := range tests {
buf.Reset()
- if _, err := Fprint(&buf, nil, test.x, nil); err != nil {
+ if err := Fprint(&buf, nil, test.x, nil); err != nil {
t.Errorf("Fprint failed: %s", err)
}
if s, ts := trim(buf.String()), trim(test.s); s != ts {
diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go
index 3927a799e..908e61c5d 100644
--- a/src/pkg/go/ast/resolve.go
+++ b/src/pkg/go/ast/resolve.go
@@ -10,17 +10,16 @@ import (
"fmt"
"go/scanner"
"go/token"
- "os"
"strconv"
)
type pkgBuilder struct {
- scanner.ErrorVector
- fset *token.FileSet
+ fset *token.FileSet
+ errors scanner.ErrorList
}
func (p *pkgBuilder) error(pos token.Pos, msg string) {
- p.Error(p.fset.Position(pos), msg)
+ p.errors.Add(p.fset.Position(pos), msg)
}
func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
@@ -61,7 +60,7 @@ func resolve(scope *Scope, ident *Ident) bool {
// Importer should load the package data for the given path into
// a new *Object (pkg), record pkg in the imports map, and then
// return pkg.
-type Importer func(imports map[string]*Object, path string) (pkg *Object, err os.Error)
+type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
// NewPackage creates a new Package node from a set of File nodes. It resolves
// unresolved identifiers across files and updates each file's Unresolved list
@@ -72,7 +71,7 @@ type Importer func(imports map[string]*Object, path string) (pkg *Object, err os
// different package names are reported and then ignored.
// The result is a package node and a scanner.ErrorList if there were errors.
//
-func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, os.Error) {
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
var p pkgBuilder
p.fset = fset
@@ -114,7 +113,7 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
importErrors = true
continue
}
- path, _ := strconv.Unquote(string(spec.Path.Value))
+ path, _ := strconv.Unquote(spec.Path.Value)
pkg, err := importer(imports, path)
if err != nil {
p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
@@ -170,5 +169,6 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
pkgScope.Outer = universe // reset universe scope
}
- return &Package{pkgName, pkgScope, imports, files}, p.GetError(scanner.Sorted)
+ p.errors.Sort()
+ return &Package{pkgName, pkgScope, imports, files}, p.errors.Err()
}
diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go
index 92e366980..11e6b13f1 100644
--- a/src/pkg/go/ast/scope.go
+++ b/src/pkg/go/ast/scope.go
@@ -80,7 +80,7 @@ func (s *Scope) String() string {
type Object struct {
Kind ObjKind
Name string // declared name
- Decl interface{} // corresponding Field, XxxSpec, FuncDecl, or LabeledStmt; or nil
+ Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil
Data interface{} // object-specific data; or nil
Type interface{} // place holder for type information; may be nil
}
@@ -125,6 +125,14 @@ func (obj *Object) Pos() token.Pos {
if d.Label.Name == name {
return d.Label.Pos()
}
+ case *AssignStmt:
+ for _, x := range d.Lhs {
+ if ident, isIdent := x.(*Ident); isIdent && ident.Name == name {
+ return ident.Pos()
+ }
+ }
+ case *Scope:
+ // predeclared object - nothing to do for now
}
return token.NoPos
}
diff --git a/src/pkg/go/build/Makefile b/src/pkg/go/build/Makefile
deleted file mode 100644
index 349e00e80..000000000
--- a/src/pkg/go/build/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/build
-GOFILES=\
- build.go\
- dir.go\
- path.go\
- syslist.go\
-
-CLEANFILES+=syslist.go pkgtest/_obj cmdtest/_obj cgotest/_obj
-
-include ../../../Make.pkg
-
-syslist.go: ../../../Make.inc Makefile
- echo '// Generated automatically by make.' >$@
- echo 'package build' >>$@
- echo 'const goosList = "$(GOOS_LIST)"' >>$@
- echo 'const goarchList = "$(GOARCH_LIST)"' >>$@
diff --git a/src/pkg/go/build/build.go b/src/pkg/go/build/build.go
index 97f92bfb6..d749aef15 100644
--- a/src/pkg/go/build/build.go
+++ b/src/pkg/go/build/build.go
@@ -2,250 +2,978 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package build provides tools for building Go packages.
package build
import (
"bytes"
- "exec"
+ "errors"
"fmt"
+ "go/ast"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
"os"
+ pathpkg "path"
"path/filepath"
- "regexp"
"runtime"
+ "sort"
+ "strconv"
"strings"
+ "unicode"
)
-// Build produces a build Script for the given package.
-func Build(tree *Tree, pkg string, info *DirInfo) (*Script, os.Error) {
- s := &Script{}
- b := &build{
- script: s,
- path: filepath.Join(tree.SrcDir(), pkg),
+// A Context specifies the supporting context for a build.
+type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go path
+ CgoEnabled bool // whether cgo can be used
+ BuildTags []string // additional tags to recognize in +build lines
+ UseAllFiles bool // use files regardless of +build lines, file names
+ Compiler string // compiler to assume when computing target paths
+
+ // By default, Import uses the operating system's file system calls
+ // to read directories and files. To read from other sources,
+ // callers can set the following functions. They all have default
+ // behaviors that use the local file system, so clients need only set
+ // the functions whose behaviors they wish to change.
+
+ // JoinPath joins the sequence of path fragments into a single path.
+ // If JoinPath is nil, Import uses filepath.Join.
+ JoinPath func(elem ...string) string
+
+ // SplitPathList splits the path list into a slice of individual paths.
+ // If SplitPathList is nil, Import uses filepath.SplitList.
+ SplitPathList func(list string) []string
+
+ // IsAbsPath reports whether path is an absolute path.
+ // If IsAbsPath is nil, Import uses filepath.IsAbs.
+ IsAbsPath func(path string) bool
+
+ // IsDir reports whether the path names a directory.
+ // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+ IsDir func(path string) bool
+
+ // HasSubdir reports whether dir is a subdirectory of
+ // (perhaps multiple levels below) root.
+ // If so, HasSubdir sets rel to a slash-separated path that
+ // can be joined to root to produce a path equivalent to dir.
+ // If HasSubdir is nil, Import uses an implementation built on
+ // filepath.EvalSymlinks.
+ HasSubdir func(root, dir string) (rel string, ok bool)
+
+ // ReadDir returns a slice of os.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // If ReadDir is nil, Import uses io.ReadDir.
+ ReadDir func(dir string) (fi []os.FileInfo, err error)
+
+ // OpenFile opens a file (not a directory) for reading.
+ // If OpenFile is nil, Import uses os.Open.
+ OpenFile func(path string) (r io.ReadCloser, err error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+ if f := ctxt.JoinPath; f != nil {
+ return f(elem...)
}
- b.obj = b.abs("_obj") + string(filepath.Separator)
+ return filepath.Join(elem...)
+}
- b.goarch = runtime.GOARCH
- if g := os.Getenv("GOARCH"); g != "" {
- b.goarch = g
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+ if f := ctxt.SplitPathList; f != nil {
+ return f(s)
}
- var err os.Error
- b.arch, err = ArchChar(b.goarch)
- if err != nil {
- return nil, err
+ return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsSPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+ if f := ctxt.IsAbsPath; f != nil {
+ return f(path)
}
+ return filepath.IsAbs(path)
+}
- // add import object files to list of Inputs
- for _, pkg := range info.Imports {
- t, p, err := FindTree(pkg)
- if err != nil && err != ErrNotFound {
- // FindTree should always be able to suggest an import
- // path and tree. The path must be malformed
- // (for example, an absolute or relative path).
- return nil, os.NewError("build: invalid import: " + pkg)
- }
- s.addInput(filepath.Join(t.PkgDir(), p+".a"))
+// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
+func (ctxt *Context) isDir(path string) bool {
+ if f := ctxt.IsDir; f != nil {
+ return f(path)
}
+ fi, err := os.Stat(path)
+ return err == nil && fi.IsDir()
+}
- // .go files to be built with gc
- gofiles := b.abss(info.GoFiles...)
- s.addInput(gofiles...)
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
- var ofiles []string // object files to be linked or packed
+ if p, err := filepath.EvalSymlinks(root); err == nil {
+ root = p
+ }
+ if p, err := filepath.EvalSymlinks(dir); err == nil {
+ dir = p
+ }
+ const sep = string(filepath.Separator)
+ root = filepath.Clean(root)
+ if !strings.HasSuffix(root, sep) {
+ root += sep
+ }
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+ return filepath.ToSlash(dir[len(root):]), true
+}
- // make build directory
- b.mkdir(b.obj)
- s.addIntermediate(b.obj)
+// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
+func (ctxt *Context) readDir(path string) ([]os.FileInfo, error) {
+ if f := ctxt.ReadDir; f != nil {
+ return f(path)
+ }
+ return ioutil.ReadDir(path)
+}
- // cgo
- if len(info.CgoFiles) > 0 {
- cgoFiles := b.abss(info.CgoFiles...)
- s.addInput(cgoFiles...)
- cgoCFiles := b.abss(info.CFiles...)
- s.addInput(cgoCFiles...)
- outGo, outObj := b.cgo(cgoFiles, cgoCFiles)
- gofiles = append(gofiles, outGo...)
- ofiles = append(ofiles, outObj...)
- s.addIntermediate(outGo...)
- s.addIntermediate(outObj...)
+// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
+func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
+ if fn := ctxt.OpenFile; fn != nil {
+ return fn(path)
}
- // compile
- if len(gofiles) > 0 {
- ofile := b.obj + "_go_." + b.arch
- b.gc(ofile, gofiles...)
- ofiles = append(ofiles, ofile)
- s.addIntermediate(ofile)
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err // nil interface
}
+ return f, nil
+}
- // assemble
- for _, sfile := range info.SFiles {
- ofile := b.obj + sfile[:len(sfile)-1] + b.arch
- sfile = b.abs(sfile)
- s.addInput(sfile)
- b.asm(ofile, sfile)
- ofiles = append(ofiles, ofile)
- s.addIntermediate(ofile)
+// isFile determines whether path is a file by trying to open it.
+// It reuses openFile instead of adding another function to the
+// list in Context.
+func (ctxt *Context) isFile(path string) bool {
+ f, err := ctxt.openFile(path)
+ if err != nil {
+ return false
}
+ f.Close()
+ return true
+}
- if len(ofiles) == 0 {
- return nil, os.NewError("make: no object files to build")
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT, which is useless
+ // but would cause us to find packages with import paths
+ // like "pkg/math".
+ // Do not get confused by this common mistake.
+ continue
+ }
+ all = append(all, p)
}
+ return all
+}
- // choose target file
- var targ string
- if info.IsCommand() {
- // use the last part of the import path as binary name
- _, bin := filepath.Split(pkg)
- if runtime.GOOS == "windows" {
- bin += ".exe"
+// SrcDirs returns a list of package source root directories.
+// It draws from the current Go root and Go path but omits directories
+// that do not exist.
+func (ctxt *Context) SrcDirs() []string {
+ var all []string
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
}
- targ = filepath.Join(tree.BinDir(), bin)
- } else {
- targ = filepath.Join(tree.PkgDir(), pkg+".a")
}
+ for _, p := range ctxt.gopath() {
+ dir := ctxt.joinPath(p, "src")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
+ }
+ }
+ return all
+}
- // make target directory
- targDir, _ := filepath.Split(targ)
- b.mkdir(targDir)
+// Default is the default Context for builds.
+// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
+// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
+var Default Context = defaultContext()
+
+var cgoEnabled = map[string]bool{
+ "darwin/386": true,
+ "darwin/amd64": true,
+ "linux/386": true,
+ "linux/amd64": true,
+ "freebsd/386": true,
+ "freebsd/amd64": true,
+ "windows/386": true,
+ "windows/amd64": true,
+}
- // link binary or pack object
- if info.IsCommand() {
- b.ld(targ, ofiles...)
- } else {
- b.gopack(targ, ofiles...)
+func defaultContext() Context {
+ var c Context
+
+ c.GOARCH = envOr("GOARCH", runtime.GOARCH)
+ c.GOOS = envOr("GOOS", runtime.GOOS)
+ c.GOROOT = runtime.GOROOT()
+ c.GOPATH = envOr("GOPATH", "")
+ c.Compiler = runtime.Compiler
+
+ switch os.Getenv("CGO_ENABLED") {
+ case "1":
+ c.CgoEnabled = true
+ case "0":
+ c.CgoEnabled = false
+ default:
+ c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH]
}
- s.Output = append(s.Output, targ)
- return b.script, nil
+ return c
}
-// A Script describes the build process for a Go package.
-// The Input, Intermediate, and Output fields are lists of absolute paths.
-type Script struct {
- Cmd []*Cmd
- Input []string
- Intermediate []string
- Output []string
+func envOr(name, def string) string {
+ s := os.Getenv(name)
+ if s == "" {
+ return def
+ }
+ return s
}
-func (s *Script) addInput(file ...string) {
- s.Input = append(s.Input, file...)
+// An ImportMode controls the behavior of the Import method.
+type ImportMode uint
+
+const (
+ // If FindOnly is set, Import stops after locating the directory
+ // that should contain the sources for a package. It does not
+ // read any files in the directory.
+ FindOnly ImportMode = 1 << iota
+
+ // If AllowBinary is set, Import can be satisfied by a compiled
+ // package object without corresponding sources.
+ AllowBinary
+)
+
+// A Package describes the Go package found in a directory.
+type Package struct {
+ Dir string // directory containing package sources
+ Name string // package name
+ Doc string // documentation synopsis
+ ImportPath string // import path of package ("" if unknown)
+ Root string // root of Go tree where this package lives
+ SrcRoot string // package source root directory ("" if unknown)
+ PkgRoot string // package install root directory ("" if unknown)
+ BinDir string // command install directory ("" if unknown)
+ Goroot bool // package found in Go root
+ PkgObj string // installed .a file
+
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ CFiles []string // .c source files
+ HFiles []string // .h source files
+ SFiles []string // .s source files
+ SysoFiles []string // .syso system object files to add to archive
+
+ // Cgo directives
+ CgoPkgConfig []string // Cgo pkg-config directives
+ CgoCFLAGS []string // Cgo CFLAGS directives
+ CgoLDFLAGS []string // Cgo LDFLAGS directives
+
+ // Dependency information
+ Imports []string // imports from GoFiles, CgoFiles
+ ImportPos map[string][]token.Position // line information for Imports
+
+ // Test information
+ TestGoFiles []string // _test.go files in package
+ TestImports []string // imports from TestGoFiles
+ TestImportPos map[string][]token.Position // line information for TestImports
+ XTestGoFiles []string // _test.go files outside package
+ XTestImports []string // imports from XTestGoFiles
+ XTestImportPos map[string][]token.Position // line information for XTestImports
}
-func (s *Script) addIntermediate(file ...string) {
- s.Intermediate = append(s.Intermediate, file...)
+// IsCommand reports whether the package is considered a
+// command to be installed (not just a library).
+// Packages named "main" are treated as commands.
+func (p *Package) IsCommand() bool {
+ return p.Name == "main"
}
-// Run runs the Script's Cmds in order.
-func (s *Script) Run() os.Error {
- for _, c := range s.Cmd {
- if err := c.Run(); err != nil {
- return err
+// ImportDir is like Import but processes the Go package found in
+// the named directory.
+func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return ctxt.Import(".", dir, mode)
+}
+
+// NoGoError is the error used by Import to describe a directory
+// containing no Go source files.
+type NoGoError struct {
+ Dir string
+}
+
+func (e *NoGoError) Error() string {
+ return "no Go source files in " + e.Dir
+}
+
+// Import returns details about the Go package named by the import path,
+// interpreting local import paths relative to the srcDir directory.
+// If the path is a local import path naming a package that can be imported
+// using a standard import path, the returned package will set p.ImportPath
+// to that path.
+//
+// In the directory containing the package, .go, .c, .h, and .s files are
+// considered part of the package except for:
+//
+// - .go files in package documentation
+// - files starting with _ or . (likely editor temporary files)
+// - files with build constraints not satisfied by the context
+//
+// If an error occurs, Import returns a non-nil error also returns a non-nil
+// *Package containing partial information.
+//
+func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
+ p := &Package{
+ ImportPath: path,
+ }
+
+ var pkga string
+ var pkgerr error
+ switch ctxt.Compiler {
+ case "gccgo":
+ dir, elem := pathpkg.Split(p.ImportPath)
+ pkga = "pkg/gccgo/" + dir + "lib" + elem + ".a"
+ case "gc":
+ pkga = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + "/" + p.ImportPath + ".a"
+ default:
+ // Save error for end of function.
+ pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
+ }
+
+ binaryOnly := false
+ if IsLocalImport(path) {
+ pkga = "" // local imports have no installed path
+ if srcDir == "" {
+ return p, fmt.Errorf("import %q: import relative to unknown directory", path)
+ }
+ if !ctxt.isAbsPath(path) {
+ p.Dir = ctxt.joinPath(srcDir, path)
+ }
+ // Determine canonical import path, if any.
+ if ctxt.GOROOT != "" {
+ root := ctxt.joinPath(ctxt.GOROOT, "src", "pkg")
+ if sub, ok := ctxt.hasSubdir(root, p.Dir); ok {
+ p.Goroot = true
+ p.ImportPath = sub
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ }
+ all := ctxt.gopath()
+ for i, root := range all {
+ rootsrc := ctxt.joinPath(root, "src")
+ if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok {
+ // We found a potential import path for dir,
+ // but check that using it wouldn't find something
+ // else first.
+ if ctxt.GOROOT != "" {
+ if dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", sub); ctxt.isDir(dir) {
+ goto Found
+ }
+ }
+ for _, earlyRoot := range all[:i] {
+ if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
+ goto Found
+ }
+ }
+
+ // sub would not name some other directory instead of this one.
+ // Record it.
+ p.ImportPath = sub
+ p.Root = root
+ goto Found
+ }
+ }
+ // It's okay that we didn't find a root containing dir.
+ // Keep going with the information we have.
+ } else {
+ if strings.HasPrefix(path, "/") {
+ return p, fmt.Errorf("import %q: cannot import absolute path", path)
+ }
+ // Determine directory from import path.
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Goroot = true
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ }
+ for _, root := range ctxt.gopath() {
+ dir := ctxt.joinPath(root, "src", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Root = root
+ goto Found
+ }
+ }
+ return p, fmt.Errorf("import %q: cannot find package", path)
+ }
+
+Found:
+ if p.Root != "" {
+ if p.Goroot {
+ p.SrcRoot = ctxt.joinPath(p.Root, "src", "pkg")
+ } else {
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ }
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
}
}
- return nil
-}
-// Stale returns true if the build's inputs are newer than its outputs.
-func (s *Script) Stale() bool {
- var latest int64
- // get latest mtime of outputs
- for _, file := range s.Output {
- fi, err := os.Stat(file)
+ if mode&FindOnly != 0 {
+ return p, pkgerr
+ }
+ if binaryOnly && (mode&AllowBinary) != 0 {
+ return p, pkgerr
+ }
+
+ dirs, err := ctxt.readDir(p.Dir)
+ if err != nil {
+ return p, err
+ }
+
+ var Sfiles []string // files with ".S" (capital S)
+ var firstFile string
+ imported := make(map[string][]token.Position)
+ testImported := make(map[string][]token.Position)
+ xTestImported := make(map[string][]token.Position)
+ fset := token.NewFileSet()
+ for _, d := range dirs {
+ if d.IsDir() {
+ continue
+ }
+ name := d.Name()
+ if strings.HasPrefix(name, "_") ||
+ strings.HasPrefix(name, ".") {
+ continue
+ }
+ if !ctxt.UseAllFiles && !ctxt.goodOSArchFile(name) {
+ continue
+ }
+
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ i = len(name)
+ }
+ ext := name[i:]
+ switch ext {
+ case ".go", ".c", ".s", ".h", ".S":
+ // tentatively okay - read to make sure
+ case ".syso":
+ // binary objects to add to package archive
+ // Likely of the form foo_windows.syso, but
+ // the name was vetted above with goodOSArchFile.
+ p.SysoFiles = append(p.SysoFiles, name)
+ continue
+ default:
+ // skip
+ continue
+ }
+
+ filename := ctxt.joinPath(p.Dir, name)
+ f, err := ctxt.openFile(filename)
if err != nil {
- // any error reading output files means stale
- return true
+ return p, err
}
- if m := fi.Mtime_ns; m > latest {
- latest = m
+ data, err := ioutil.ReadAll(f)
+ f.Close()
+ if err != nil {
+ return p, fmt.Errorf("read %s: %v", filename, err)
}
- }
- for _, file := range s.Input {
- fi, err := os.Stat(file)
- if err != nil || fi.Mtime_ns > latest {
- // any error reading input files means stale
- // (attempt to rebuild to figure out why)
- return true
+
+ // Look for +build comments to accept or reject the file.
+ if !ctxt.UseAllFiles && !ctxt.shouldBuild(data) {
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
+ case ".c":
+ p.CFiles = append(p.CFiles, name)
+ continue
+ case ".h":
+ p.HFiles = append(p.HFiles, name)
+ continue
+ case ".s":
+ p.SFiles = append(p.SFiles, name)
+ continue
+ case ".S":
+ Sfiles = append(Sfiles, name)
+ continue
+ }
+
+ pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
+ if err != nil {
+ return p, err
+ }
+
+ pkg := string(pf.Name.Name)
+ if pkg == "documentation" {
+ continue
+ }
+
+ isTest := strings.HasSuffix(name, "_test.go")
+ isXTest := false
+ if isTest && strings.HasSuffix(pkg, "_test") {
+ isXTest = true
+ pkg = pkg[:len(pkg)-len("_test")]
+ }
+
+ if p.Name == "" {
+ p.Name = pkg
+ firstFile = name
+ } else if pkg != p.Name {
+ return p, fmt.Errorf("found packages %s (%s) and %s (%s) in %s", p.Name, firstFile, pkg, name, p.Dir)
+ }
+ if pf.Doc != nil && p.Doc == "" {
+ p.Doc = doc.Synopsis(pf.Doc.Text())
+ }
+
+ // Record imports and information about cgo.
+ isCgo := false
+ for _, decl := range pf.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := string(spec.Path.Value)
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
+ }
+ if isXTest {
+ xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos()))
+ } else if isTest {
+ testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
+ } else {
+ imported[path] = append(imported[path], fset.Position(spec.Pos()))
+ }
+ if path == "C" {
+ if isTest {
+ return p, fmt.Errorf("use of cgo in test %s not supported", filename)
+ }
+ cg := spec.Doc
+ if cg == nil && len(d.Specs) == 1 {
+ cg = d.Doc
+ }
+ if cg != nil {
+ if err := ctxt.saveCgo(filename, p, cg); err != nil {
+ return p, err
+ }
+ }
+ isCgo = true
+ }
+ }
+ }
+ if isCgo {
+ if ctxt.CgoEnabled {
+ p.CgoFiles = append(p.CgoFiles, name)
+ }
+ } else if isXTest {
+ p.XTestGoFiles = append(p.XTestGoFiles, name)
+ } else if isTest {
+ p.TestGoFiles = append(p.TestGoFiles, name)
+ } else {
+ p.GoFiles = append(p.GoFiles, name)
}
}
- return false
+ if p.Name == "" {
+ return p, &NoGoError{p.Dir}
+ }
+
+ p.Imports, p.ImportPos = cleanImports(imported)
+ p.TestImports, p.TestImportPos = cleanImports(testImported)
+ p.XTestImports, p.XTestImportPos = cleanImports(xTestImported)
+
+ // add the .S files only if we are using cgo
+ // (which means gcc will compile them).
+ // The standard assemblers expect .s files.
+ if len(p.CgoFiles) > 0 {
+ p.SFiles = append(p.SFiles, Sfiles...)
+ sort.Strings(p.SFiles)
+ }
+
+ return p, pkgerr
}
-// Clean removes the Script's Intermediate files.
-// It tries to remove every file and returns the first error it encounters.
-func (s *Script) Clean() (err os.Error) {
- // Reverse order so that directories get removed after the files they contain.
- for i := len(s.Intermediate) - 1; i >= 0; i-- {
- if e := os.Remove(s.Intermediate[i]); err == nil {
- err = e
- }
+func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+ all := make([]string, 0, len(m))
+ for path := range m {
+ all = append(all, path)
}
- return
+ sort.Strings(all)
+ return all, m
+}
+
+// Import is shorthand for Default.Import.
+func Import(path, srcDir string, mode ImportMode) (*Package, error) {
+ return Default.Import(path, srcDir, mode)
}
-// Nuke removes the Script's Intermediate and Output files.
-// It tries to remove every file and returns the first error it encounters.
-func (s *Script) Nuke() (err os.Error) {
- // Reverse order so that directories get removed after the files they contain.
- for i := len(s.Output) - 1; i >= 0; i-- {
- if e := os.Remove(s.Output[i]); err == nil {
- err = e
+// ImportDir is shorthand for Default.ImportDir.
+func ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return Default.ImportDir(dir, mode)
+}
+
+var slashslash = []byte("//")
+
+// shouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+// // +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+func (ctxt *Context) shouldBuild(content []byte) bool {
+ // Pass 1. Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ end := 0
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 { // Blank line
+ end = cap(content) - cap(line) // &line[0] - &content[0]
+ continue
+ }
+ if !bytes.HasPrefix(line, slashslash) { // Not comment line
+ break
}
}
- if e := s.Clean(); err == nil {
- err = e
+ content = content[:end]
+
+ // Pass 2. Process each line in the run.
+ p = content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if bytes.HasPrefix(line, slashslash) {
+ line = bytes.TrimSpace(line[len(slashslash):])
+ if len(line) > 0 && line[0] == '+' {
+ // Looks like a comment +line.
+ f := strings.Fields(string(line))
+ if f[0] == "+build" {
+ ok := false
+ for _, tok := range f[1:] {
+ if ctxt.match(tok) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return false // this one doesn't match
+ }
+ }
+ }
+ }
}
- return
+ return true // everything matches
}
-// A Cmd describes an individual build command.
-type Cmd struct {
- Args []string // command-line
- Stdout string // write standard output to this file, "" is passthrough
- Dir string // working directory
- Env []string // environment
- Input []string // file paths (dependencies)
- Output []string // file paths
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS and LDFLAGS and pkg-config directives that affect
+// the way cgo's C code is built.
+//
+// TODO(rsc): This duplicates code in cgo.
+// Once the dust settles, remove this code from cgo.
+func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
+ text := cg.Text()
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line = strings.TrimSpace(line[4:])
+ i := strings.Index(line, ":")
+ if i < 0 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ line, argstr := line[:i], line[i+1:]
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.match(c) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ for _, arg := range args {
+ if !safeName(arg) {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
}
-func (c *Cmd) String() string {
- return strings.Join(c.Args, " ")
+var safeBytes = []byte("+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:")
+
+func safeName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < 0x80 && bytes.IndexByte(safeBytes, c) < 0 {
+ return false
+ }
+ }
+ return true
}
-// Run executes the Cmd.
-func (c *Cmd) Run() os.Error {
- if c.Args[0] == "mkdir" {
- for _, p := range c.Output {
- if err := os.MkdirAll(p, 0777); err != nil {
- return fmt.Errorf("command %q: %v", c, err)
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
}
+ continue
}
- return nil
+ arg[i] = rune
+ i++
}
- out := new(bytes.Buffer)
- cmd := exec.Command(c.Args[0], c.Args[1:]...)
- cmd.Dir = c.Dir
- cmd.Env = c.Env
- cmd.Stdout = out
- cmd.Stderr = out
- if c.Stdout != "" {
- f, err := os.Create(c.Stdout)
- if err != nil {
- return err
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// match returns true if the name is one of:
+//
+// $GOOS
+// $GOARCH
+// cgo (if cgo is enabled)
+// !cgo (if cgo is disabled)
+// tag (if tag is listed in ctxt.BuildTags)
+// !tag (if tag is not listed in ctxt.BuildTags)
+// a comma-separated list of any of these
+//
+func (ctxt *Context) match(name string) bool {
+ if name == "" {
+ return false
+ }
+ if i := strings.Index(name, ","); i >= 0 {
+ // comma-separated list
+ return ctxt.match(name[:i]) && ctxt.match(name[i+1:])
+ }
+ if strings.HasPrefix(name, "!!") { // bad syntax, reject always
+ return false
+ }
+ if strings.HasPrefix(name, "!") { // negation
+ return len(name) > 1 && !ctxt.match(name[1:])
+ }
+
+ // Tags must be letters, digits, underscores.
+ // Unlike in Go identifiers, all digits are fine (e.g., "386").
+ for _, c := range name {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' {
+ return false
}
- defer f.Close()
- cmd.Stdout = f
}
- if err := cmd.Run(); err != nil {
- return fmt.Errorf("command %q: %v\n%v", c, err, out)
+
+ // special tags
+ if ctxt.CgoEnabled && name == "cgo" {
+ return true
}
- return nil
+ if name == ctxt.GOOS || name == ctxt.GOARCH {
+ return true
+ }
+
+ // other tags
+ for _, tag := range ctxt.BuildTags {
+ if tag == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+func (ctxt *Context) goodOSArchFile(name string) bool {
+ if dot := strings.Index(name, "."); dot != -1 {
+ name = name[:dot]
+ }
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ return l[n-2] == ctxt.GOOS && l[n-1] == ctxt.GOARCH
+ }
+ if n >= 1 && knownOS[l[n-1]] {
+ return l[n-1] == ctxt.GOOS
+ }
+ if n >= 1 && knownArch[l[n-1]] {
+ return l[n-1] == ctxt.GOARCH
+ }
+ return true
+}
+
+var knownOS = make(map[string]bool)
+var knownArch = make(map[string]bool)
+
+func init() {
+ for _, v := range strings.Fields(goosList) {
+ knownOS[v] = true
+ }
+ for _, v := range strings.Fields(goarchList) {
+ knownArch[v] = true
+ }
+}
+
+// ToolDir is the directory containing build tools.
+var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
+
+// IsLocalImport reports whether the import path is
+// a local import path, like ".", "..", "./foo", or "../foo".
+func IsLocalImport(path string) bool {
+ return path == "." || path == ".." ||
+ strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
}
// ArchChar returns the architecture character for the given goarch.
// For example, ArchChar("amd64") returns "6".
-func ArchChar(goarch string) (string, os.Error) {
+func ArchChar(goarch string) (string, error) {
switch goarch {
case "386":
return "8", nil
@@ -254,191 +982,5 @@ func ArchChar(goarch string) (string, os.Error) {
case "arm":
return "5", nil
}
- return "", os.NewError("unsupported GOARCH " + goarch)
-}
-
-type build struct {
- script *Script
- path string
- obj string
- goarch string
- arch string
-}
-
-func (b *build) abs(file string) string {
- if filepath.IsAbs(file) {
- return file
- }
- return filepath.Join(b.path, file)
-}
-
-func (b *build) abss(file ...string) []string {
- s := make([]string, len(file))
- for i, f := range file {
- s[i] = b.abs(f)
- }
- return s
-}
-
-func (b *build) add(c Cmd) {
- b.script.Cmd = append(b.script.Cmd, &c)
-}
-
-func (b *build) mkdir(name string) {
- b.add(Cmd{
- Args: []string{"mkdir", "-p", name},
- Output: []string{name},
- })
-}
-
-func (b *build) gc(ofile string, gofiles ...string) {
- gc := b.arch + "g"
- args := append([]string{gc, "-o", ofile}, gcImportArgs...)
- args = append(args, gofiles...)
- b.add(Cmd{
- Args: args,
- Input: gofiles,
- Output: []string{ofile},
- })
-}
-
-func (b *build) asm(ofile string, sfile string) {
- asm := b.arch + "a"
- b.add(Cmd{
- Args: []string{asm, "-o", ofile, sfile},
- Input: []string{sfile},
- Output: []string{ofile},
- })
-}
-
-func (b *build) ld(targ string, ofiles ...string) {
- ld := b.arch + "l"
- args := append([]string{ld, "-o", targ}, ldImportArgs...)
- args = append(args, ofiles...)
- b.add(Cmd{
- Args: args,
- Input: ofiles,
- Output: []string{targ},
- })
-}
-
-func (b *build) gopack(targ string, ofiles ...string) {
- b.add(Cmd{
- Args: append([]string{"gopack", "grc", targ}, ofiles...),
- Input: ofiles,
- Output: []string{targ},
- })
-}
-
-func (b *build) cc(ofile string, cfiles ...string) {
- cc := b.arch + "c"
- dir := fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)
- inc := filepath.Join(runtime.GOROOT(), "pkg", dir)
- args := []string{cc, "-FVw", "-I", inc, "-o", ofile}
- b.add(Cmd{
- Args: append(args, cfiles...),
- Input: cfiles,
- Output: []string{ofile},
- })
-}
-
-func (b *build) gccCompile(ofile, cfile string) {
- b.add(Cmd{
- Args: b.gccArgs("-o", ofile, "-c", cfile),
- Input: []string{cfile},
- Output: []string{ofile},
- })
-}
-
-func (b *build) gccLink(ofile string, ofiles ...string) {
- b.add(Cmd{
- Args: append(b.gccArgs("-o", ofile), ofiles...),
- Input: ofiles,
- Output: []string{ofile},
- })
-}
-
-func (b *build) gccArgs(args ...string) []string {
- // TODO(adg): HOST_CC
- a := []string{"gcc", "-I", b.path, "-g", "-fPIC", "-O2"}
- switch b.arch {
- case "8":
- a = append(a, "-m32")
- case "6":
- a = append(a, "-m64")
- }
- return append(a, args...)
-}
-
-var cgoRe = regexp.MustCompile(`[/\\:]`)
-
-func (b *build) cgo(cgofiles, cgocfiles []string) (outGo, outObj []string) {
- // cgo
- // TODO(adg): CGOPKGPATH
- // TODO(adg): CGO_FLAGS
- gofiles := []string{b.obj + "_cgo_gotypes.go"}
- cfiles := []string{b.obj + "_cgo_main.c", b.obj + "_cgo_export.c"}
- for _, fn := range cgofiles {
- f := b.obj + cgoRe.ReplaceAllString(fn[:len(fn)-2], "_")
- gofiles = append(gofiles, f+"cgo1.go")
- cfiles = append(cfiles, f+"cgo2.c")
- }
- defunC := b.obj + "_cgo_defun.c"
- output := append([]string{defunC}, cfiles...)
- output = append(output, gofiles...)
- b.add(Cmd{
- Args: append([]string{"cgo", "--"}, cgofiles...),
- Dir: b.path,
- Env: append(os.Environ(), "GOARCH="+b.goarch),
- Input: cgofiles,
- Output: output,
- })
- outGo = append(outGo, gofiles...)
- exportH := filepath.Join(b.path, "_cgo_export.h")
- b.script.addIntermediate(defunC, exportH, b.obj+"_cgo_flags")
- b.script.addIntermediate(cfiles...)
-
- // cc _cgo_defun.c
- defunObj := b.obj + "_cgo_defun." + b.arch
- b.cc(defunObj, defunC)
- outObj = append(outObj, defunObj)
-
- // gcc
- linkobj := make([]string, 0, len(cfiles))
- for _, cfile := range cfiles {
- ofile := cfile[:len(cfile)-1] + "o"
- b.gccCompile(ofile, cfile)
- linkobj = append(linkobj, ofile)
- if !strings.HasSuffix(ofile, "_cgo_main.o") {
- outObj = append(outObj, ofile)
- } else {
- b.script.addIntermediate(ofile)
- }
- }
- for _, cfile := range cgocfiles {
- ofile := b.obj + cgoRe.ReplaceAllString(cfile[:len(cfile)-1], "_") + "o"
- b.gccCompile(ofile, cfile)
- linkobj = append(linkobj, ofile)
- outObj = append(outObj, ofile)
- }
- dynObj := b.obj + "_cgo_.o"
- b.gccLink(dynObj, linkobj...)
- b.script.addIntermediate(dynObj)
-
- // cgo -dynimport
- importC := b.obj + "_cgo_import.c"
- b.add(Cmd{
- Args: []string{"cgo", "-dynimport", dynObj},
- Stdout: importC,
- Input: []string{dynObj},
- Output: []string{importC},
- })
- b.script.addIntermediate(importC)
-
- // cc _cgo_import.ARCH
- importObj := b.obj + "_cgo_import." + b.arch
- b.cc(importObj, importC)
- outObj = append(outObj, importObj)
-
- return
+ return "", errors.New("unsupported GOARCH " + goarch)
}
diff --git a/src/pkg/go/build/build_test.go b/src/pkg/go/build/build_test.go
index e59d87672..560ebad5c 100644
--- a/src/pkg/go/build/build_test.go
+++ b/src/pkg/go/build/build_test.go
@@ -5,57 +5,73 @@
package build
import (
- "exec"
+ "os"
"path/filepath"
+ "runtime"
"testing"
)
-var buildPkgs = []string{
- "go/build/pkgtest",
- "go/build/cmdtest",
- "go/build/cgotest",
-}
-
-const cmdtestOutput = "3"
+func TestMatch(t *testing.T) {
+ ctxt := Default
+ what := "default"
+ match := func(tag string) {
+ if !ctxt.match(tag) {
+ t.Errorf("%s context should match %s, does not", what, tag)
+ }
+ }
+ nomatch := func(tag string) {
+ if ctxt.match(tag) {
+ t.Errorf("%s context should NOT match %s, does", what, tag)
+ }
+ }
-func TestBuild(t *testing.T) {
- for _, pkg := range buildPkgs {
- tree := Path[0] // Goroot
- dir := filepath.Join(tree.SrcDir(), pkg)
+ match(runtime.GOOS + "," + runtime.GOARCH)
+ match(runtime.GOOS + "," + runtime.GOARCH + ",!foo")
+ nomatch(runtime.GOOS + "," + runtime.GOARCH + ",foo")
- info, err := ScanDir(dir, true)
- if err != nil {
- t.Error("ScanDir:", err)
- continue
- }
+ what = "modified"
+ ctxt.BuildTags = []string{"foo"}
+ match(runtime.GOOS + "," + runtime.GOARCH)
+ match(runtime.GOOS + "," + runtime.GOARCH + ",foo")
+ nomatch(runtime.GOOS + "," + runtime.GOARCH + ",!foo")
+ match(runtime.GOOS + "," + runtime.GOARCH + ",!bar")
+ nomatch(runtime.GOOS + "," + runtime.GOARCH + ",bar")
+ nomatch("!")
+}
- s, err := Build(tree, pkg, info)
- if err != nil {
- t.Error("Build:", err)
- continue
- }
+func TestDotSlashImport(t *testing.T) {
+ p, err := ImportDir("testdata/other", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(p.Imports) != 1 || p.Imports[0] != "./file" {
+ t.Fatalf("testdata/other: Imports=%v, want [./file]", p.Imports)
+ }
- if err := s.Run(); err != nil {
- t.Error("Run:", err)
- continue
- }
+ p1, err := Import("./file", "testdata/other", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p1.Name != "file" {
+ t.Fatalf("./file: Name=%q, want %q", p1.Name, "file")
+ }
+ dir := filepath.Clean("testdata/other/file") // Clean to use \ on Windows
+ if p1.Dir != dir {
+ t.Fatalf("./file: Dir=%q, want %q", p1.Name, dir)
+ }
+}
- if pkg == "go/build/cmdtest" {
- bin := s.Output[0]
- b, err := exec.Command(bin).CombinedOutput()
- if err != nil {
- t.Errorf("exec: %s: %v", bin, err)
- continue
- }
- if string(b) != cmdtestOutput {
- t.Errorf("cmdtest output: %s want: %s", b, cmdtestOutput)
- }
- }
+func TestLocalDirectory(t *testing.T) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
- defer func(s *Script) {
- if err := s.Nuke(); err != nil {
- t.Errorf("nuking: %v", err)
- }
- }(s)
+ p, err := ImportDir(cwd, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p.ImportPath != "go/build" {
+ t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "go/build")
}
}
diff --git a/src/pkg/go/build/cgotest/cgotest.go b/src/pkg/go/build/cgotest/cgotest.go
deleted file mode 100644
index 93bbf0688..000000000
--- a/src/pkg/go/build/cgotest/cgotest.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cgotest
-
-/*
-char* greeting = "hello, world";
-*/
-// #include "cgotest.h"
-import "C"
-import "unsafe"
-
-var Greeting = C.GoString(C.greeting)
-
-func DoAdd(x, y int) (sum int) {
- C.Add(C.int(x), C.int(y), (*C.int)(unsafe.Pointer(&sum)))
- return
-}
diff --git a/src/pkg/go/build/deps_test.go b/src/pkg/go/build/deps_test.go
new file mode 100644
index 000000000..4e9f32a03
--- /dev/null
+++ b/src/pkg/go/build/deps_test.go
@@ -0,0 +1,424 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file exercises the import parser but also checks that
+// some low-level packages do not have new dependencies added.
+
+package build_test
+
+import (
+ "go/build"
+ "sort"
+ "testing"
+)
+
+// pkgDeps defines the expected dependencies between packages in
+// the Go source tree. It is a statement of policy.
+// Changes should not be made to this map without prior discussion.
+//
+// The map contains two kinds of entries:
+// 1) Lower-case keys are standard import paths and list the
+// allowed imports in that package.
+// 2) Upper-case keys define aliases for package sets, which can then
+// be used as dependencies by other rules.
+//
+// DO NOT CHANGE THIS DATA TO FIX BUILDS.
+//
+var pkgDeps = map[string][]string{
+ // L0 is the lowest level, core, nearly unavoidable packages.
+ "errors": {},
+ "io": {"errors", "sync"},
+ "runtime": {"unsafe"},
+ "sync": {"sync/atomic"},
+ "sync/atomic": {"unsafe"},
+ "unsafe": {},
+
+ "L0": {
+ "errors",
+ "io",
+ "runtime",
+ "sync",
+ "sync/atomic",
+ "unsafe",
+ },
+
+ // L1 adds simple functions and strings processing,
+ // but not Unicode tables.
+ "math": {"unsafe"},
+ "math/cmplx": {"math"},
+ "math/rand": {"L0", "math"},
+ "sort": {"math"},
+ "strconv": {"L0", "unicode/utf8", "math"},
+ "unicode/utf16": {},
+ "unicode/utf8": {},
+
+ "L1": {
+ "L0",
+ "math",
+ "math/cmplx",
+ "math/rand",
+ "sort",
+ "strconv",
+ "unicode/utf16",
+ "unicode/utf8",
+ },
+
+ // L2 adds Unicode and strings processing.
+ "bufio": {"L0", "unicode/utf8", "bytes"},
+ "bytes": {"L0", "unicode", "unicode/utf8"},
+ "path": {"L0", "unicode/utf8", "strings"},
+ "strings": {"L0", "unicode", "unicode/utf8"},
+ "unicode": {},
+
+ "L2": {
+ "L1",
+ "bufio",
+ "bytes",
+ "path",
+ "strings",
+ "unicode",
+ },
+
+ // L3 adds reflection and some basic utility packages
+ // and interface definitions, but nothing that makes
+ // system calls.
+ "crypto": {"L2", "hash"}, // interfaces
+ "crypto/cipher": {"L2"}, // interfaces
+ "encoding/base32": {"L2"},
+ "encoding/base64": {"L2"},
+ "encoding/binary": {"L2", "reflect"},
+ "hash": {"L2"}, // interfaces
+ "hash/adler32": {"L2", "hash"},
+ "hash/crc32": {"L2", "hash"},
+ "hash/crc64": {"L2", "hash"},
+ "hash/fnv": {"L2", "hash"},
+ "image": {"L2", "image/color"}, // interfaces
+ "image/color": {"L2"}, // interfaces
+ "reflect": {"L2"},
+
+ "L3": {
+ "L2",
+ "crypto",
+ "crypto/cipher",
+ "encoding/base32",
+ "encoding/base64",
+ "encoding/binary",
+ "hash",
+ "hash/adler32",
+ "hash/crc32",
+ "hash/crc64",
+ "hash/fnv",
+ "image",
+ "image/color",
+ "reflect",
+ },
+
+ // End of linear dependency definitions.
+
+ // Operating system access.
+ "syscall": {"L0", "unicode/utf16"},
+ "time": {"L0", "syscall"},
+ "os": {"L1", "os", "syscall", "time"},
+ "path/filepath": {"L2", "os", "syscall"},
+ "io/ioutil": {"L2", "os", "path/filepath", "time"},
+ "os/exec": {"L2", "os", "syscall"},
+ "os/signal": {"L2", "os", "syscall"},
+
+ // OS enables basic operating system functionality,
+ // but not direct use of package syscall, nor os/signal.
+ "OS": {
+ "io/ioutil",
+ "os",
+ "os/exec",
+ "path/filepath",
+ "time",
+ },
+
+ // Formatted I/O: few dependencies (L1) but we must add reflect.
+ "fmt": {"L1", "os", "reflect"},
+ "log": {"L1", "os", "fmt", "time"},
+
+ // Packages used by testing must be low-level (L2+fmt).
+ "regexp": {"L2", "regexp/syntax"},
+ "regexp/syntax": {"L2"},
+ "runtime/debug": {"L2", "fmt", "io/ioutil", "os"},
+ "runtime/pprof": {"L2", "fmt", "text/tabwriter"},
+ "text/tabwriter": {"L2"},
+
+ "testing": {"L2", "flag", "fmt", "os", "runtime/pprof", "time"},
+ "testing/iotest": {"L2", "log"},
+ "testing/quick": {"L2", "flag", "fmt", "reflect"},
+
+ // L4 is defined as L3+fmt+log+time, because in general once
+ // you're using L3 packages, use of fmt, log, or time is not a big deal.
+ "L4": {
+ "L3",
+ "fmt",
+ "log",
+ "time",
+ },
+
+ // Go parser.
+ "go/ast": {"L4", "OS", "go/scanner", "go/token"},
+ "go/doc": {"L4", "go/ast", "go/token", "regexp", "text/template"},
+ "go/parser": {"L4", "OS", "go/ast", "go/scanner", "go/token"},
+ "go/printer": {"L4", "OS", "go/ast", "go/scanner", "go/token", "text/tabwriter"},
+ "go/scanner": {"L4", "OS", "go/token"},
+ "go/token": {"L4"},
+
+ "GOPARSER": {
+ "go/ast",
+ "go/doc",
+ "go/parser",
+ "go/printer",
+ "go/scanner",
+ "go/token",
+ },
+
+ // One of a kind.
+ "archive/tar": {"L4", "OS"},
+ "archive/zip": {"L4", "OS", "compress/flate"},
+ "compress/bzip2": {"L4"},
+ "compress/flate": {"L4"},
+ "compress/gzip": {"L4", "compress/flate"},
+ "compress/lzw": {"L4"},
+ "compress/zlib": {"L4", "compress/flate"},
+ "database/sql": {"L4", "database/sql/driver"},
+ "database/sql/driver": {"L4", "time"},
+ "debug/dwarf": {"L4"},
+ "debug/elf": {"L4", "OS", "debug/dwarf"},
+ "debug/gosym": {"L4"},
+ "debug/macho": {"L4", "OS", "debug/dwarf"},
+ "debug/pe": {"L4", "OS", "debug/dwarf"},
+ "encoding/ascii85": {"L4"},
+ "encoding/asn1": {"L4", "math/big"},
+ "encoding/csv": {"L4"},
+ "encoding/gob": {"L4", "OS"},
+ "encoding/hex": {"L4"},
+ "encoding/json": {"L4"},
+ "encoding/pem": {"L4"},
+ "encoding/xml": {"L4"},
+ "flag": {"L4", "OS"},
+ "go/build": {"L4", "OS", "GOPARSER"},
+ "html": {"L4"},
+ "image/draw": {"L4"},
+ "image/gif": {"L4", "compress/lzw"},
+ "image/jpeg": {"L4"},
+ "image/png": {"L4", "compress/zlib"},
+ "index/suffixarray": {"L4", "regexp"},
+ "math/big": {"L4"},
+ "mime": {"L4", "OS", "syscall"},
+ "net/url": {"L4"},
+ "text/scanner": {"L4", "OS"},
+ "text/template/parse": {"L4"},
+
+ "html/template": {
+ "L4", "OS", "encoding/json", "html", "text/template",
+ "text/template/parse",
+ },
+ "text/template": {
+ "L4", "OS", "net/url", "text/template/parse",
+ },
+
+ // Cgo.
+ "runtime/cgo": {"L0", "C"},
+ "CGO": {"C", "runtime/cgo"},
+
+ // Fake entry to satisfy the pseudo-import "C"
+ // that shows up in programs that use cgo.
+ "C": {},
+
+ "os/user": {"L4", "CGO", "syscall"},
+
+ // Basic networking.
+ // Because net must be used by any package that wants to
+ // do networking portably, it must have a small dependency set: just L1+basic os.
+ "net": {"L1", "CGO", "os", "syscall", "time"},
+
+ // NET enables use of basic network-related packages.
+ "NET": {
+ "net",
+ "mime",
+ "net/textproto",
+ "net/url",
+ },
+
+ // Uses of networking.
+ "log/syslog": {"L4", "OS", "net"},
+ "net/mail": {"L4", "NET", "OS"},
+ "net/textproto": {"L4", "OS", "net"},
+
+ // Core crypto.
+ "crypto/aes": {"L3"},
+ "crypto/des": {"L3"},
+ "crypto/hmac": {"L3"},
+ "crypto/md5": {"L3"},
+ "crypto/rc4": {"L3"},
+ "crypto/sha1": {"L3"},
+ "crypto/sha256": {"L3"},
+ "crypto/sha512": {"L3"},
+ "crypto/subtle": {"L3"},
+
+ "CRYPTO": {
+ "crypto/aes",
+ "crypto/des",
+ "crypto/hmac",
+ "crypto/md5",
+ "crypto/rc4",
+ "crypto/sha1",
+ "crypto/sha256",
+ "crypto/sha512",
+ "crypto/subtle",
+ },
+
+ // Random byte, number generation.
+ // This would be part of core crypto except that it imports
+ // math/big, which imports fmt.
+ "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall"},
+
+ // Mathematical crypto: dependencies on fmt (L4) and math/big.
+ // We could avoid some of the fmt, but math/big imports fmt anyway.
+ "crypto/dsa": {"L4", "CRYPTO", "math/big"},
+ "crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big"},
+ "crypto/elliptic": {"L4", "CRYPTO", "math/big"},
+ "crypto/rsa": {"L4", "CRYPTO", "crypto/rand", "math/big"},
+
+ "CRYPTO-MATH": {
+ "CRYPTO",
+ "crypto/dsa",
+ "crypto/ecdsa",
+ "crypto/elliptic",
+ "crypto/rand",
+ "crypto/rsa",
+ "encoding/asn1",
+ "math/big",
+ },
+
+ // SSL/TLS.
+ "crypto/tls": {
+ "L4", "CRYPTO-MATH", "CGO", "OS",
+ "crypto/x509", "encoding/pem", "net", "syscall",
+ },
+ "crypto/x509": {"L4", "CRYPTO-MATH", "OS", "CGO", "crypto/x509/pkix", "encoding/pem", "syscall"},
+ "crypto/x509/pkix": {"L4", "CRYPTO-MATH"},
+
+ // Simple net+crypto-aware packages.
+ "mime/multipart": {"L4", "OS", "mime", "crypto/rand", "net/textproto"},
+ "net/smtp": {"L4", "CRYPTO", "NET", "crypto/tls"},
+
+ // HTTP, kingpin of dependencies.
+ "net/http": {
+ "L4", "NET", "OS",
+ "compress/gzip", "crypto/tls", "mime/multipart", "runtime/debug",
+ },
+
+ // HTTP-using packages.
+ "expvar": {"L4", "OS", "encoding/json", "net/http"},
+ "net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"},
+ "net/http/fcgi": {"L4", "NET", "OS", "net/http", "net/http/cgi"},
+ "net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http"},
+ "net/http/httputil": {"L4", "NET", "OS", "net/http"},
+ "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof"},
+ "net/rpc": {"L4", "NET", "encoding/gob", "net/http", "text/template"},
+ "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
+}
+
+// isMacro reports whether p is a package dependency macro
+// (uppercase name).
+func isMacro(p string) bool {
+ return 'A' <= p[0] && p[0] <= 'Z'
+}
+
+func allowed(pkg string) map[string]bool {
+ m := map[string]bool{}
+ var allow func(string)
+ allow = func(p string) {
+ if m[p] {
+ return
+ }
+ m[p] = true // set even for macros, to avoid loop on cycle
+
+ // Upper-case names are macro-expanded.
+ if isMacro(p) {
+ for _, pp := range pkgDeps[p] {
+ allow(pp)
+ }
+ }
+ }
+ for _, pp := range pkgDeps[pkg] {
+ allow(pp)
+ }
+ return m
+}
+
+var bools = []bool{false, true}
+var geese = []string{"darwin", "freebsd", "linux", "netbsd", "openbsd", "plan9", "windows"}
+var goarches = []string{"386", "amd64", "arm"}
+
+type osPkg struct {
+ goos, pkg string
+}
+
+// allowedErrors are the operating systems and packages known to contain errors
+// (currently just "no Go source files")
+var allowedErrors = map[osPkg]bool{
+ osPkg{"windows", "log/syslog"}: true,
+ osPkg{"plan9", "log/syslog"}: true,
+}
+
+func TestDependencies(t *testing.T) {
+ var all []string
+
+ for k := range pkgDeps {
+ all = append(all, k)
+ }
+ sort.Strings(all)
+
+ ctxt := build.Default
+ test := func(mustImport bool) {
+ for _, pkg := range all {
+ if isMacro(pkg) {
+ continue
+ }
+ p, err := ctxt.Import(pkg, "", 0)
+ if err != nil {
+ if allowedErrors[osPkg{ctxt.GOOS, pkg}] {
+ continue
+ }
+ // Some of the combinations we try might not
+ // be reasonable (like arm,plan9,cgo), so ignore
+ // errors for the auto-generated combinations.
+ if !mustImport {
+ continue
+ }
+ t.Errorf("%s/%s/cgo=%v %v", ctxt.GOOS, ctxt.GOARCH, ctxt.CgoEnabled, err)
+ continue
+ }
+ ok := allowed(pkg)
+ var bad []string
+ for _, imp := range p.Imports {
+ if !ok[imp] {
+ bad = append(bad, imp)
+ }
+ }
+ if bad != nil {
+ t.Errorf("%s/%s/cgo=%v unexpected dependency: %s imports %v", ctxt.GOOS, ctxt.GOARCH, ctxt.CgoEnabled, pkg, bad)
+ }
+ }
+ }
+ test(true)
+
+ if testing.Short() {
+ t.Logf("skipping other systems")
+ return
+ }
+
+ for _, ctxt.GOOS = range geese {
+ for _, ctxt.GOARCH = range goarches {
+ for _, ctxt.CgoEnabled = range bools {
+ test(false)
+ }
+ }
+ }
+}
diff --git a/src/pkg/go/build/dir.go b/src/pkg/go/build/dir.go
deleted file mode 100644
index e0000b534..000000000
--- a/src/pkg/go/build/dir.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
- "go/parser"
- "go/token"
- "log"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "runtime"
-)
-
-type DirInfo struct {
- GoFiles []string // .go files in dir (excluding CgoFiles)
- CgoFiles []string // .go files that import "C"
- CFiles []string // .c files in dir
- SFiles []string // .s files in dir
- Imports []string // All packages imported by goFiles
- PkgName string // Name of package in dir
-}
-
-func (d *DirInfo) IsCommand() bool {
- return d.PkgName == "main"
-}
-
-// ScanDir returns a structure with details about the Go content found
-// in the given directory. The file lists exclude:
-//
-// - files in package main (unless allowMain is true)
-// - files in package documentation
-// - files ending in _test.go
-// - files starting with _ or .
-//
-// Only files that satisfy the goodOSArch function are included.
-func ScanDir(dir string, allowMain bool) (info *DirInfo, err os.Error) {
- f, err := os.Open(dir)
- if err != nil {
- return nil, err
- }
- dirs, err := f.Readdir(-1)
- f.Close()
- if err != nil {
- return nil, err
- }
-
- var di DirInfo
- imported := make(map[string]bool)
- fset := token.NewFileSet()
- for i := range dirs {
- d := &dirs[i]
- if strings.HasPrefix(d.Name, "_") ||
- strings.HasPrefix(d.Name, ".") {
- continue
- }
- if !goodOSArch(d.Name) {
- continue
- }
-
- switch filepath.Ext(d.Name) {
- case ".go":
- if strings.HasSuffix(d.Name, "_test.go") {
- continue
- }
- case ".c":
- di.CFiles = append(di.CFiles, d.Name)
- continue
- case ".s":
- di.SFiles = append(di.SFiles, d.Name)
- continue
- default:
- continue
- }
-
- filename := filepath.Join(dir, d.Name)
- pf, err := parser.ParseFile(fset, filename, nil, parser.ImportsOnly)
- if err != nil {
- return nil, err
- }
- s := string(pf.Name.Name)
- if s == "main" && !allowMain {
- continue
- }
- if s == "documentation" {
- continue
- }
- if di.PkgName == "" {
- di.PkgName = s
- } else if di.PkgName != s {
- // Only if all files in the directory are in package main
- // do we return PkgName=="main".
- // A mix of main and another package reverts
- // to the original (allowMain=false) behaviour.
- if s == "main" || di.PkgName == "main" {
- return ScanDir(dir, false)
- }
- return nil, os.NewError("multiple package names in " + dir)
- }
- isCgo := false
- for _, spec := range pf.Imports {
- quoted := string(spec.Path.Value)
- path, err := strconv.Unquote(quoted)
- if err != nil {
- log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
- }
- imported[path] = true
- if path == "C" {
- isCgo = true
- }
- }
- if isCgo {
- di.CgoFiles = append(di.CgoFiles, d.Name)
- } else {
- di.GoFiles = append(di.GoFiles, d.Name)
- }
- }
- di.Imports = make([]string, len(imported))
- i := 0
- for p := range imported {
- di.Imports[i] = p
- i++
- }
- return &di, nil
-}
-
-// goodOSArch returns false if the filename contains a $GOOS or $GOARCH
-// suffix which does not match the current system.
-// The recognized filename formats are:
-//
-// name_$(GOOS).*
-// name_$(GOARCH).*
-// name_$(GOOS)_$(GOARCH).*
-//
-func goodOSArch(filename string) bool {
- if dot := strings.Index(filename, "."); dot != -1 {
- filename = filename[:dot]
- }
- l := strings.Split(filename, "_")
- n := len(l)
- if n == 0 {
- return true
- }
- if good, known := goodOS[l[n-1]]; known {
- return good
- }
- if good, known := goodArch[l[n-1]]; known {
- if !good || n < 2 {
- return false
- }
- good, known = goodOS[l[n-2]]
- return good || !known
- }
- return true
-}
-
-var goodOS = make(map[string]bool)
-var goodArch = make(map[string]bool)
-
-func init() {
- goodOS = make(map[string]bool)
- goodArch = make(map[string]bool)
- for _, v := range strings.Fields(goosList) {
- goodOS[v] = v == runtime.GOOS
- }
- for _, v := range strings.Fields(goarchList) {
- goodArch[v] = v == runtime.GOARCH
- }
-}
diff --git a/src/pkg/go/build/doc.go b/src/pkg/go/build/doc.go
new file mode 100644
index 000000000..67c26ac7f
--- /dev/null
+++ b/src/pkg/go/build/doc.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package build gathers information about Go packages.
+//
+// Go Path
+//
+// The Go path is a list of directory trees containing Go source code.
+// It is consulted to resolve imports that cannot be found in the standard
+// Go tree. The default path is the value of the GOPATH environment
+// variable, interpreted as a path list appropriate to the operating system
+// (on Unix, the variable is a colon-separated string;
+// on Windows, a semicolon-separated string;
+// on Plan 9, a list).
+//
+// Each directory listed in the Go path must have a prescribed structure:
+//
+// The src/ directory holds source code. The path below 'src' determines
+// the import path or executable name.
+//
+// The pkg/ directory holds installed package objects.
+// As in the Go tree, each target operating system and
+// architecture pair has its own subdirectory of pkg
+// (pkg/GOOS_GOARCH).
+//
+// If DIR is a directory listed in the Go path, a package with
+// source in DIR/src/foo/bar can be imported as "foo/bar" and
+// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
+// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
+//
+// The bin/ directory holds compiled commands.
+// Each command is named for its source directory, but only
+// using the final element, not the entire path. That is, the
+// command with source in DIR/src/foo/quux is installed into
+// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
+// so that you can add DIR/bin to your PATH to get at the
+// installed commands.
+//
+// Here's an example directory layout:
+//
+// GOPATH=/home/user/gocode
+//
+// /home/user/gocode/
+// src/
+// foo/
+// bar/ (go code in package bar)
+// x.go
+// quux/ (go code in package main)
+// y.go
+// bin/
+// quux (installed command)
+// pkg/
+// linux_amd64/
+// foo/
+// bar.a (installed package object)
+//
+// Build Constraints
+//
+// A build constraint is a line comment beginning with the directive +build
+// that lists the conditions under which a file should be included in the package.
+// Constraints may appear in any kind of source file (not just Go), but
+// they must be appear near the top of the file, preceded
+// only by blank lines and other line comments.
+//
+// A build constraint is evaluated as the OR of space-separated options;
+// each option evaluates as the AND of its comma-separated terms;
+// and each term is an alphanumeric word or, preceded by !, its negation.
+// That is, the build constraint:
+//
+// // +build linux,386 darwin,!cgo
+//
+// corresponds to the boolean formula:
+//
+// (linux AND 386) OR (darwin AND (NOT cgo))
+//
+// During a particular build, the following words are satisfied:
+//
+// - the target operating system, as spelled by runtime.GOOS
+// - the target architecture, as spelled by runtime.GOARCH
+// - "cgo", if ctxt.CgoEnabled is true
+// - any additional words listed in ctxt.BuildTags
+//
+// If a file's name, after stripping the extension and a possible _test suffix,
+// matches *_GOOS, *_GOARCH, or *_GOOS_GOARCH for any known operating
+// system and architecture values, then the file is considered to have an implicit
+// build constraint requiring those terms.
+//
+// To keep a file from being considered for the build:
+//
+// // +build ignore
+//
+// (any other unsatisfied word will work as well, but ``ignore'' is conventional.)
+//
+// To build a file only when using cgo, and only on Linux and OS X:
+//
+// // +build linux,cgo darwin,cgo
+//
+// Such a file is usually paired with another file implementing the
+// default functionality for other systems, which in this case would
+// carry the constraint:
+//
+// // +build !linux !darwin !cgo
+//
+// Naming a file dns_windows.go will cause it to be included only when
+// building the package for Windows; similarly, math_386.s will be included
+// only when building the package for 32-bit x86.
+//
+package build
diff --git a/src/pkg/go/build/path.go b/src/pkg/go/build/path.go
deleted file mode 100644
index e39b5f8fa..000000000
--- a/src/pkg/go/build/path.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package build
-
-import (
- "fmt"
- "log"
- "os"
- "path/filepath"
- "runtime"
-)
-
-// Path is a validated list of Trees derived from $GOROOT and $GOPATH at init.
-var Path []*Tree
-
-// Tree describes a Go source tree, either $GOROOT or one from $GOPATH.
-type Tree struct {
- Path string
- Goroot bool
-}
-
-func newTree(p string) (*Tree, os.Error) {
- if !filepath.IsAbs(p) {
- return nil, os.NewError("must be absolute")
- }
- ep, err := filepath.EvalSymlinks(p)
- if err != nil {
- return nil, err
- }
- return &Tree{Path: ep}, nil
-}
-
-// SrcDir returns the tree's package source directory.
-func (t *Tree) SrcDir() string {
- if t.Goroot {
- return filepath.Join(t.Path, "src", "pkg")
- }
- return filepath.Join(t.Path, "src")
-}
-
-// PkgDir returns the tree's package object directory.
-func (t *Tree) PkgDir() string {
- goos, goarch := runtime.GOOS, runtime.GOARCH
- if e := os.Getenv("GOOS"); e != "" {
- goos = e
- }
- if e := os.Getenv("GOARCH"); e != "" {
- goarch = e
- }
- return filepath.Join(t.Path, "pkg", goos+"_"+goarch)
-}
-
-// BinDir returns the tree's binary executable directory.
-func (t *Tree) BinDir() string {
- if t.Goroot {
- if gobin := os.Getenv("GOBIN"); gobin != "" {
- return gobin
- }
- }
- return filepath.Join(t.Path, "bin")
-}
-
-// HasSrc returns whether the given package's
-// source can be found inside this Tree.
-func (t *Tree) HasSrc(pkg string) bool {
- fi, err := os.Stat(filepath.Join(t.SrcDir(), pkg))
- if err != nil {
- return false
- }
- return fi.IsDirectory()
-}
-
-// HasPkg returns whether the given package's
-// object file can be found inside this Tree.
-func (t *Tree) HasPkg(pkg string) bool {
- fi, err := os.Stat(filepath.Join(t.PkgDir(), pkg+".a"))
- if err != nil {
- return false
- }
- return fi.IsRegular()
- // TODO(adg): check object version is consistent
-}
-
-var (
- ErrNotFound = os.NewError("go/build: package could not be found locally")
- ErrTreeNotFound = os.NewError("go/build: no valid GOROOT or GOPATH could be found")
-)
-
-// FindTree takes an import or filesystem path and returns the
-// tree where the package source should be and the package import path.
-func FindTree(path string) (tree *Tree, pkg string, err os.Error) {
- if isLocalPath(path) {
- if path, err = filepath.Abs(path); err != nil {
- return
- }
- if path, err = filepath.EvalSymlinks(path); err != nil {
- return
- }
- for _, t := range Path {
- tpath := t.SrcDir() + string(filepath.Separator)
- if !filepath.HasPrefix(path, tpath) {
- continue
- }
- tree = t
- pkg = path[len(tpath):]
- return
- }
- err = fmt.Errorf("path %q not inside a GOPATH", path)
- return
- }
- tree = defaultTree
- pkg = path
- for _, t := range Path {
- if t.HasSrc(pkg) {
- tree = t
- return
- }
- }
- if tree == nil {
- err = ErrTreeNotFound
- } else {
- err = ErrNotFound
- }
- return
-}
-
-// isLocalPath returns whether the given path is local (/foo ./foo ../foo . ..)
-// Windows paths that starts with drive letter (c:\foo c:foo) are considered local.
-func isLocalPath(s string) bool {
- const sep = string(filepath.Separator)
- return s == "." || s == ".." ||
- filepath.HasPrefix(s, sep) ||
- filepath.HasPrefix(s, "."+sep) || filepath.HasPrefix(s, ".."+sep) ||
- filepath.VolumeName(s) != ""
-}
-
-var (
- // argument lists used by the build's gc and ld methods
- gcImportArgs []string
- ldImportArgs []string
-
- // default tree for remote packages
- defaultTree *Tree
-)
-
-// set up Path: parse and validate GOROOT and GOPATH variables
-func init() {
- root := runtime.GOROOT()
- t, err := newTree(root)
- if err != nil {
- log.Printf("go/build: invalid GOROOT %q: %v", root, err)
- } else {
- t.Goroot = true
- Path = []*Tree{t}
- }
-
- for _, p := range filepath.SplitList(os.Getenv("GOPATH")) {
- if p == "" {
- continue
- }
- t, err := newTree(p)
- if err != nil {
- log.Printf("go/build: invalid GOPATH %q: %v", p, err)
- continue
- }
- Path = append(Path, t)
- gcImportArgs = append(gcImportArgs, "-I", t.PkgDir())
- ldImportArgs = append(ldImportArgs, "-L", t.PkgDir())
-
- // select first GOPATH entry as default
- if defaultTree == nil {
- defaultTree = t
- }
- }
-
- // use GOROOT if no valid GOPATH specified
- if defaultTree == nil && len(Path) > 0 {
- defaultTree = Path[0]
- }
-}
diff --git a/src/pkg/go/build/pkgtest/pkgtest.go b/src/pkg/go/build/pkgtest/pkgtest.go
deleted file mode 100644
index 9322f5ebd..000000000
--- a/src/pkg/go/build/pkgtest/pkgtest.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package pkgtest
-
-func Foo() {}
-
-func Sqrt(x float64) float64
diff --git a/src/pkg/go/build/pkgtest/sqrt_386.s b/src/pkg/go/build/pkgtest/sqrt_386.s
deleted file mode 100644
index d0a428d52..000000000
--- a/src/pkg/go/build/pkgtest/sqrt_386.s
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// func Sqrt(x float64) float64
-TEXT ·Sqrt(SB),7,$0
- FMOVD x+0(FP),F0
- FSQRT
- FMOVDP F0,r+8(FP)
- RET
diff --git a/src/pkg/go/build/pkgtest/sqrt_amd64.s b/src/pkg/go/build/pkgtest/sqrt_amd64.s
deleted file mode 100644
index f5b329e70..000000000
--- a/src/pkg/go/build/pkgtest/sqrt_amd64.s
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// func Sqrt(x float64) float64
-TEXT ·Sqrt(SB),7,$0
- SQRTSD x+0(FP), X0
- MOVSD X0, r+8(FP)
- RET
diff --git a/src/pkg/go/build/pkgtest/sqrt_arm.s b/src/pkg/go/build/pkgtest/sqrt_arm.s
deleted file mode 100644
index befbb8a89..000000000
--- a/src/pkg/go/build/pkgtest/sqrt_arm.s
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// func Sqrt(x float64) float64
-TEXT ·Sqrt(SB),7,$0
- MOVD x+0(FP),F0
- SQRTD F0,F0
- MOVD F0,r+8(FP)
- RET
diff --git a/src/pkg/go/build/cmdtest/main.go b/src/pkg/go/build/syslist.go
index bed4f485a..ea21f3c74 100644
--- a/src/pkg/go/build/cmdtest/main.go
+++ b/src/pkg/go/build/syslist.go
@@ -2,11 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package main
+package build
-import "go/build/pkgtest"
-
-func main() {
- pkgtest.Foo()
- print(int(pkgtest.Sqrt(9)))
-}
+const goosList = "darwin freebsd linux netbsd openbsd plan9 windows "
+const goarchList = "386 amd64 arm "
diff --git a/src/pkg/go/build/syslist_test.go b/src/pkg/go/build/syslist_test.go
index eb0e5dcb6..9157faf8c 100644
--- a/src/pkg/go/build/syslist_test.go
+++ b/src/pkg/go/build/syslist_test.go
@@ -55,8 +55,8 @@ var tests = []GoodFileTest{
func TestGoodOSArch(t *testing.T) {
for _, test := range tests {
- if goodOSArch(test.name) != test.result {
- t.Fatalf("goodOSArch(%q) != %v", test.name, test.result)
+ if Default.goodOSArchFile(test.name) != test.result {
+ t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
}
}
}
diff --git a/src/pkg/go/build/testdata/other/file/file.go b/src/pkg/go/build/testdata/other/file/file.go
new file mode 100644
index 000000000..bbfd3e9e5
--- /dev/null
+++ b/src/pkg/go/build/testdata/other/file/file.go
@@ -0,0 +1,5 @@
+// Test data - not compiled.
+
+package file
+
+func F() {}
diff --git a/src/pkg/go/build/testdata/other/main.go b/src/pkg/go/build/testdata/other/main.go
new file mode 100644
index 000000000..e0904357c
--- /dev/null
+++ b/src/pkg/go/build/testdata/other/main.go
@@ -0,0 +1,11 @@
+// Test data - not compiled.
+
+package main
+
+import (
+ "./file"
+)
+
+func main() {
+ file.F()
+}
diff --git a/src/pkg/go/doc/Makefile b/src/pkg/go/doc/Makefile
index a5152c793..ca4948f91 100644
--- a/src/pkg/go/doc/Makefile
+++ b/src/pkg/go/doc/Makefile
@@ -2,11 +2,6 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-include ../../../Make.inc
-
-TARG=go/doc
-GOFILES=\
- comment.go\
- doc.go\
-
-include ../../../Make.pkg
+# Script to test heading detection heuristic
+headscan: headscan.go
+ go build headscan.go
diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go
index e1989226b..6f0edd4ba 100644
--- a/src/pkg/go/doc/comment.go
+++ b/src/pkg/go/doc/comment.go
@@ -7,114 +7,14 @@
package doc
import (
- "go/ast"
"io"
"regexp"
"strings"
- "template" // for HTMLEscape
+ "text/template" // for HTMLEscape
+ "unicode"
+ "unicode/utf8"
)
-func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
-
-func stripTrailingWhitespace(s string) string {
- i := len(s)
- for i > 0 && isWhitespace(s[i-1]) {
- i--
- }
- return s[0:i]
-}
-
-// CommentText returns the text of comment,
-// with the comment markers - //, /*, and */ - removed.
-func CommentText(comment *ast.CommentGroup) string {
- if comment == nil {
- return ""
- }
- comments := make([]string, len(comment.List))
- for i, c := range comment.List {
- comments[i] = string(c.Text)
- }
-
- lines := make([]string, 0, 10) // most comments are less than 10 lines
- for _, c := range comments {
- // Remove comment markers.
- // The parser has given us exactly the comment text.
- switch c[1] {
- case '/':
- //-style comment
- c = c[2:]
- // Remove leading space after //, if there is one.
- // TODO(gri) This appears to be necessary in isolated
- // cases (bignum.RatFromString) - why?
- if len(c) > 0 && c[0] == ' ' {
- c = c[1:]
- }
- case '*':
- /*-style comment */
- c = c[2 : len(c)-2]
- }
-
- // Split on newlines.
- cl := strings.Split(c, "\n")
-
- // Walk lines, stripping trailing white space and adding to list.
- for _, l := range cl {
- lines = append(lines, stripTrailingWhitespace(l))
- }
- }
-
- // Remove leading blank lines; convert runs of
- // interior blank lines to a single blank line.
- n := 0
- for _, line := range lines {
- if line != "" || n > 0 && lines[n-1] != "" {
- lines[n] = line
- n++
- }
- }
- lines = lines[0:n]
-
- // Add final "" entry to get trailing newline from Join.
- if n > 0 && lines[n-1] != "" {
- lines = append(lines, "")
- }
-
- return strings.Join(lines, "\n")
-}
-
-// Split bytes into lines.
-func split(text []byte) [][]byte {
- // count lines
- n := 0
- last := 0
- for i, c := range text {
- if c == '\n' {
- last = i + 1
- n++
- }
- }
- if last < len(text) {
- n++
- }
-
- // split
- out := make([][]byte, n)
- last = 0
- n = 0
- for i, c := range text {
- if c == '\n' {
- out[n] = text[last : i+1]
- last = i + 1
- n++
- }
- }
- if last < len(text) {
- out[n] = text[last:]
- }
-
- return out
-}
-
var (
ldquo = []byte("&ldquo;")
rdquo = []byte("&rdquo;")
@@ -122,13 +22,13 @@ var (
// Escape comment text for HTML. If nice is set,
// also turn `` into &ldquo; and '' into &rdquo;.
-func commentEscape(w io.Writer, s []byte, nice bool) {
+func commentEscape(w io.Writer, text string, nice bool) {
last := 0
if nice {
- for i := 0; i < len(s)-1; i++ {
- ch := s[i]
- if ch == s[i+1] && (ch == '`' || ch == '\'') {
- template.HTMLEscape(w, s[last:i])
+ for i := 0; i < len(text)-1; i++ {
+ ch := text[i]
+ if ch == text[i+1] && (ch == '`' || ch == '\'') {
+ template.HTMLEscape(w, []byte(text[last:i]))
last = i + 2
switch ch {
case '`':
@@ -140,7 +40,7 @@ func commentEscape(w io.Writer, s []byte, nice bool) {
}
}
}
- template.HTMLEscape(w, s[last:])
+ template.HTMLEscape(w, []byte(text[last:]))
}
const (
@@ -156,7 +56,7 @@ const (
filePart + `([:.,]` + filePart + `)*`
)
-var matchRx = regexp.MustCompile(`(` + identRx + `)|(` + urlRx + `)`)
+var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
var (
html_a = []byte(`<a href="`)
@@ -168,6 +68,9 @@ var (
html_endp = []byte("</p>\n")
html_pre = []byte("<pre>")
html_endpre = []byte("</pre>\n")
+ html_h = []byte(`<h3 id="`)
+ html_hq = []byte(`">`)
+ html_endh = []byte("</h3>\n")
)
// Emphasize and escape a line of text for HTML. URLs are converted into links;
@@ -178,13 +81,13 @@ var (
// and the word is converted into a link. If nice is set, the remaining text's
// appearance is improved where it makes sense (e.g., `` is turned into &ldquo;
// and '' into &rdquo;).
-func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) {
+func emphasize(w io.Writer, line string, words map[string]string, nice bool) {
for {
- m := matchRx.FindSubmatchIndex(line)
+ m := matchRx.FindStringSubmatchIndex(line)
if m == nil {
break
}
- // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is identRx)
+ // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
// write text before match
commentEscape(w, line[0:m[0]], nice)
@@ -196,8 +99,8 @@ func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) {
if words != nil {
url, italics = words[string(match)]
}
- if m[2] < 0 {
- // didn't match against first parenthesized sub-regexp; must be match against urlRx
+ if m[2] >= 0 {
+ // match against first parenthesized sub-regexp; must be match against urlRx
if !italics {
// no alternative URL in words list, use match instead
url = string(match)
@@ -228,7 +131,7 @@ func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) {
commentEscape(w, line, nice)
}
-func indentLen(s []byte) int {
+func indentLen(s string) int {
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
i++
@@ -236,9 +139,11 @@ func indentLen(s []byte) int {
return i
}
-func isBlank(s []byte) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n') }
+func isBlank(s string) bool {
+ return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
+}
-func commonPrefix(a, b []byte) []byte {
+func commonPrefix(a, b string) string {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] {
i++
@@ -246,7 +151,7 @@ func commonPrefix(a, b []byte) []byte {
return a[0:i]
}
-func unindent(block [][]byte) {
+func unindent(block []string) {
if len(block) == 0 {
return
}
@@ -268,7 +173,66 @@ func unindent(block [][]byte) {
}
}
-// Convert comment text to formatted HTML.
+// heading returns the trimmed line if it passes as a section heading;
+// otherwise it returns the empty string.
+func heading(line string) string {
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ return ""
+ }
+
+ // a heading must start with an uppercase letter
+ r, _ := utf8.DecodeRuneInString(line)
+ if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
+ return ""
+ }
+
+ // it must end in a letter or digit:
+ r, _ = utf8.DecodeLastRuneInString(line)
+ if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
+ return ""
+ }
+
+ // exclude lines with illegal characters
+ if strings.IndexAny(line, ",.;:!?+*/=()[]{}_^°&§~%#@<\">\\") >= 0 {
+ return ""
+ }
+
+ // allow "'" for possessive "'s" only
+ for b := line; ; {
+ i := strings.IndexRune(b, '\'')
+ if i < 0 {
+ break
+ }
+ if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {
+ return "" // not followed by "s "
+ }
+ b = b[i+2:]
+ }
+
+ return line
+}
+
+type op int
+
+const (
+ opPara op = iota
+ opHead
+ opPre
+)
+
+type block struct {
+ op op
+ lines []string
+}
+
+var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
+
+func anchorID(line string) string {
+ return nonAlphaNumRx.ReplaceAllString(line, "_")
+}
+
+// ToHTML converts comment text to formatted HTML.
// The comment was prepared by DocReader,
// so it is known not to have leading, trailing blank lines
// nor to have trailing spaces at the end of lines.
@@ -276,6 +240,7 @@ func unindent(block [][]byte) {
//
// Turn each run of multiple \n into </p><p>.
// Turn each run of indented lines into a <pre> block without indent.
+// Enclose headings with header tags.
//
// URLs in the comment text are converted into links; if the URL also appears
// in the words map, the link is taken from the map (if the corresponding map
@@ -284,23 +249,57 @@ func unindent(block [][]byte) {
// Go identifiers that appear in the words map are italicized; if the corresponding
// map value is not the empty string, it is considered a URL and the word is converted
// into a link.
-func ToHTML(w io.Writer, s []byte, words map[string]string) {
- inpara := false
-
- close := func() {
- if inpara {
+func ToHTML(w io.Writer, text string, words map[string]string) {
+ for _, b := range blocks(text) {
+ switch b.op {
+ case opPara:
+ w.Write(html_p)
+ for _, line := range b.lines {
+ emphasize(w, line, words, true)
+ }
w.Write(html_endp)
- inpara = false
+ case opHead:
+ w.Write(html_h)
+ id := ""
+ for _, line := range b.lines {
+ if id == "" {
+ id = anchorID(line)
+ w.Write([]byte(id))
+ w.Write(html_hq)
+ }
+ commentEscape(w, line, true)
+ }
+ if id == "" {
+ w.Write(html_hq)
+ }
+ w.Write(html_endh)
+ case opPre:
+ w.Write(html_pre)
+ for _, line := range b.lines {
+ emphasize(w, line, nil, false)
+ }
+ w.Write(html_endpre)
}
}
- open := func() {
- if !inpara {
- w.Write(html_p)
- inpara = true
+}
+
+func blocks(text string) []block {
+ var (
+ out []block
+ para []string
+
+ lastWasBlank = false
+ lastWasHeading = false
+ )
+
+ close := func() {
+ if para != nil {
+ out = append(out, block{opPara, para})
+ para = nil
}
}
- lines := split(s)
+ lines := strings.SplitAfter(text, "\n")
unindent(lines)
for i := 0; i < len(lines); {
line := lines[i]
@@ -308,6 +307,7 @@ func ToHTML(w io.Writer, s []byte, words map[string]string) {
// close paragraph
close()
i++
+ lastWasBlank = true
continue
}
if indentLen(line) > 0 {
@@ -323,23 +323,119 @@ func ToHTML(w io.Writer, s []byte, words map[string]string) {
for j > i && isBlank(lines[j-1]) {
j--
}
- block := lines[i:j]
+ pre := lines[i:j]
i = j
- unindent(block)
+ unindent(pre)
// put those lines in a pre block
- w.Write(html_pre)
- for _, line := range block {
- emphasize(w, line, nil, false) // no nice text formatting
- }
- w.Write(html_endpre)
+ out = append(out, block{opPre, pre})
+ lastWasHeading = false
continue
}
+
+ if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
+ isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
+ // current line is non-blank, surrounded by blank lines
+ // and the next non-blank line is not indented: this
+ // might be a heading.
+ if head := heading(line); head != "" {
+ close()
+ out = append(out, block{opHead, []string{head}})
+ i += 2
+ lastWasHeading = true
+ continue
+ }
+ }
+
// open paragraph
- open()
- emphasize(w, lines[i], words, true) // nice text formatting
+ lastWasBlank = false
+ lastWasHeading = false
+ para = append(para, lines[i])
i++
}
close()
+
+ return out
+}
+
+// ToText prepares comment text for presentation in textual output.
+// It wraps paragraphs of text to width or fewer Unicode code points
+// and then prefixes each line with the indent. In preformatted sections
+// (such as program text), it prefixes each non-blank line with preIndent.
+func ToText(w io.Writer, text string, indent, preIndent string, width int) {
+ l := lineWrapper{
+ out: w,
+ width: width,
+ indent: indent,
+ }
+ for _, b := range blocks(text) {
+ switch b.op {
+ case opPara:
+ // l.write will add leading newline if required
+ for _, line := range b.lines {
+ l.write(line)
+ }
+ l.flush()
+ case opHead:
+ w.Write(nl)
+ for _, line := range b.lines {
+ l.write(line + "\n")
+ }
+ l.flush()
+ case opPre:
+ w.Write(nl)
+ for _, line := range b.lines {
+ if !isBlank(line) {
+ w.Write([]byte(preIndent))
+ w.Write([]byte(line))
+ }
+ }
+ }
+ }
+}
+
+type lineWrapper struct {
+ out io.Writer
+ printed bool
+ width int
+ indent string
+ n int
+ pendSpace int
+}
+
+var nl = []byte("\n")
+var space = []byte(" ")
+
+func (l *lineWrapper) write(text string) {
+ if l.n == 0 && l.printed {
+ l.out.Write(nl) // blank line before new paragraph
+ }
+ l.printed = true
+
+ for _, f := range strings.Fields(text) {
+ w := utf8.RuneCountInString(f)
+ // wrap if line is too long
+ if l.n > 0 && l.n+l.pendSpace+w > l.width {
+ l.out.Write(nl)
+ l.n = 0
+ l.pendSpace = 0
+ }
+ if l.n == 0 {
+ l.out.Write([]byte(l.indent))
+ }
+ l.out.Write(space[:l.pendSpace])
+ l.out.Write([]byte(f))
+ l.n += l.pendSpace + w
+ l.pendSpace = 1
+ }
+}
+
+func (l *lineWrapper) flush() {
+ if l.n == 0 {
+ return
+ }
+ l.out.Write(nl)
+ l.pendSpace = 0
+ l.n = 0
}
diff --git a/src/pkg/go/doc/comment_test.go b/src/pkg/go/doc/comment_test.go
new file mode 100644
index 000000000..aa21b8d1b
--- /dev/null
+++ b/src/pkg/go/doc/comment_test.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+var headingTests = []struct {
+ line string
+ ok bool
+}{
+ {"Section", true},
+ {"A typical usage", true},
+ {"ΔΛΞ is Greek", true},
+ {"Foo 42", true},
+ {"", false},
+ {"section", false},
+ {"A typical usage:", false},
+ {"This code:", false},
+ {"δ is Greek", false},
+ {"Foo §", false},
+ {"Fermat's Last Sentence", true},
+ {"Fermat's", true},
+ {"'sX", false},
+ {"Ted 'Too' Bar", false},
+ {"Use n+m", false},
+ {"Scanning:", false},
+ {"N:M", false},
+}
+
+func TestIsHeading(t *testing.T) {
+ for _, tt := range headingTests {
+ if h := heading(tt.line); (len(h) > 0) != tt.ok {
+ t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
+ }
+ }
+}
+
+var blocksTests = []struct {
+ in string
+ out []block
+}{
+ {
+ in: `Para 1.
+Para 1 line 2.
+
+Para 2.
+
+Section
+
+Para 3.
+
+ pre
+ pre1
+
+Para 4.
+ pre
+ pre2
+`,
+ out: []block{
+ {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
+ {opPara, []string{"Para 2.\n"}},
+ {opHead, []string{"Section"}},
+ {opPara, []string{"Para 3.\n"}},
+ {opPre, []string{"pre\n", "pre1\n"}},
+ {opPara, []string{"Para 4.\n"}},
+ {opPre, []string{"pre\n", "pre2\n"}},
+ },
+ },
+}
+
+func TestBlocks(t *testing.T) {
+ for i, tt := range blocksTests {
+ b := blocks(tt.in)
+ if !reflect.DeepEqual(b, tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
+ }
+ }
+}
+
+var emphasizeTests = []struct {
+ in string
+ out string
+}{
+ {"http://www.google.com/", `<a href="http://www.google.com/">http://www.google.com/</a>`},
+ {"https://www.google.com/", `<a href="https://www.google.com/">https://www.google.com/</a>`},
+ {"http://www.google.com/path.", `<a href="http://www.google.com/path">http://www.google.com/path</a>.`},
+ {"(http://www.google.com/)", `(<a href="http://www.google.com/">http://www.google.com/</a>)`},
+ {"Foo bar http://example.com/ quux!", `Foo bar <a href="http://example.com/">http://example.com/</a> quux!`},
+ {"Hello http://example.com/%2f/ /world.", `Hello <a href="http://example.com/%2f/">http://example.com/%2f/</a> /world.`},
+ {"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"},
+ {"javascript://is/not/linked", "javascript://is/not/linked"},
+}
+
+func TestEmphasize(t *testing.T) {
+ for i, tt := range emphasizeTests {
+ var buf bytes.Buffer
+ emphasize(&buf, tt.in, nil, true)
+ out := buf.String()
+ if out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+ }
+ }
+}
diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go
index c7fed9784..9c606315d 100644
--- a/src/pkg/go/doc/doc.go
+++ b/src/pkg/go/doc/doc.go
@@ -8,634 +8,90 @@ package doc
import (
"go/ast"
"go/token"
- "regexp"
- "sort"
)
-// ----------------------------------------------------------------------------
+// Package is the documentation for an entire package.
+type Package struct {
+ Doc string
+ Name string
+ ImportPath string
+ Imports []string
+ Filenames []string
+ Bugs []string
-type typeDoc struct {
- // len(decl.Specs) == 1, and the element type is *ast.TypeSpec
- // if the type declaration hasn't been seen yet, decl is nil
- decl *ast.GenDecl
- // values, factory functions, and methods associated with the type
- values []*ast.GenDecl // consts and vars
- factories map[string]*ast.FuncDecl
- methods map[string]*ast.FuncDecl
+ // declarations
+ Consts []*Value
+ Types []*Type
+ Vars []*Value
+ Funcs []*Func
}
-// docReader accumulates documentation for a single package.
-// It modifies the AST: Comments (declaration documentation)
-// that have been collected by the DocReader are set to nil
-// in the respective AST nodes so that they are not printed
-// twice (once when printing the documentation and once when
-// printing the corresponding AST node).
-//
-type docReader struct {
- doc *ast.CommentGroup // package documentation, if any
- pkgName string
- values []*ast.GenDecl // consts and vars
- types map[string]*typeDoc
- funcs map[string]*ast.FuncDecl
- bugs []*ast.CommentGroup
-}
-
-func (doc *docReader) init(pkgName string) {
- doc.pkgName = pkgName
- doc.types = make(map[string]*typeDoc)
- doc.funcs = make(map[string]*ast.FuncDecl)
-}
-
-func (doc *docReader) addDoc(comments *ast.CommentGroup) {
- if doc.doc == nil {
- // common case: just one package comment
- doc.doc = comments
- return
- }
-
- // More than one package comment: Usually there will be only
- // one file with a package comment, but it's better to collect
- // all comments than drop them on the floor.
- // (This code isn't particularly clever - no amortized doubling is
- // used - but this situation occurs rarely and is not time-critical.)
- n1 := len(doc.doc.List)
- n2 := len(comments.List)
- list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line
- copy(list, doc.doc.List)
- list[n1] = &ast.Comment{token.NoPos, "//"} // separator line
- copy(list[n1+1:], comments.List)
- doc.doc = &ast.CommentGroup{list}
-}
-
-func (doc *docReader) addType(decl *ast.GenDecl) {
- spec := decl.Specs[0].(*ast.TypeSpec)
- typ := doc.lookupTypeDoc(spec.Name.Name)
- // typ should always be != nil since declared types
- // are always named - be conservative and check
- if typ != nil {
- // a type should be added at most once, so typ.decl
- // should be nil - if it isn't, simply overwrite it
- typ.decl = decl
- }
-}
-
-func (doc *docReader) lookupTypeDoc(name string) *typeDoc {
- if name == "" {
- return nil // no type docs for anonymous types
- }
- if tdoc, found := doc.types[name]; found {
- return tdoc
- }
- // type wasn't found - add one without declaration
- tdoc := &typeDoc{nil, nil, make(map[string]*ast.FuncDecl), make(map[string]*ast.FuncDecl)}
- doc.types[name] = tdoc
- return tdoc
-}
-
-func baseTypeName(typ ast.Expr) string {
- switch t := typ.(type) {
- case *ast.Ident:
- // if the type is not exported, the effect to
- // a client is as if there were no type name
- if t.IsExported() {
- return t.Name
- }
- case *ast.StarExpr:
- return baseTypeName(t.X)
- }
- return ""
-}
-
-func (doc *docReader) addValue(decl *ast.GenDecl) {
- // determine if decl should be associated with a type
- // Heuristic: For each typed entry, determine the type name, if any.
- // If there is exactly one type name that is sufficiently
- // frequent, associate the decl with the respective type.
- domName := ""
- domFreq := 0
- prev := ""
- for _, s := range decl.Specs {
- if v, ok := s.(*ast.ValueSpec); ok {
- name := ""
- switch {
- case v.Type != nil:
- // a type is present; determine its name
- name = baseTypeName(v.Type)
- case decl.Tok == token.CONST:
- // no type is present but we have a constant declaration;
- // use the previous type name (w/o more type information
- // we cannot handle the case of unnamed variables with
- // initializer expressions except for some trivial cases)
- name = prev
- }
- if name != "" {
- // entry has a named type
- if domName != "" && domName != name {
- // more than one type name - do not associate
- // with any type
- domName = ""
- break
- }
- domName = name
- domFreq++
- }
- prev = name
- }
- }
-
- // determine values list
- const threshold = 0.75
- values := &doc.values
- if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) {
- // typed entries are sufficiently frequent
- typ := doc.lookupTypeDoc(domName)
- if typ != nil {
- values = &typ.values // associate with that type
- }
- }
-
- *values = append(*values, decl)
-}
-
-// Helper function to set the table entry for function f. Makes sure that
-// at least one f with associated documentation is stored in table, if there
-// are multiple f's with the same name.
-func setFunc(table map[string]*ast.FuncDecl, f *ast.FuncDecl) {
- name := f.Name.Name
- if g, exists := table[name]; exists && g.Doc != nil {
- // a function with the same name has already been registered;
- // since it has documentation, assume f is simply another
- // implementation and ignore it
- // TODO(gri) consider collecting all functions, or at least
- // all comments
- return
- }
- // function doesn't exist or has no documentation; use f
- table[name] = f
-}
-
-func (doc *docReader) addFunc(fun *ast.FuncDecl) {
- name := fun.Name.Name
-
- // determine if it should be associated with a type
- if fun.Recv != nil {
- // method
- typ := doc.lookupTypeDoc(baseTypeName(fun.Recv.List[0].Type))
- if typ != nil {
- // exported receiver type
- setFunc(typ.methods, fun)
- }
- // otherwise don't show the method
- // TODO(gri): There may be exported methods of non-exported types
- // that can be called because of exported values (consts, vars, or
- // function results) of that type. Could determine if that is the
- // case and then show those methods in an appropriate section.
- return
- }
-
- // perhaps a factory function
- // determine result type, if any
- if fun.Type.Results.NumFields() >= 1 {
- res := fun.Type.Results.List[0]
- if len(res.Names) <= 1 {
- // exactly one (named or anonymous) result associated
- // with the first type in result signature (there may
- // be more than one result)
- tname := baseTypeName(res.Type)
- typ := doc.lookupTypeDoc(tname)
- if typ != nil {
- // named and exported result type
-
- // Work-around for failure of heuristic: In package os
- // too many functions are considered factory functions
- // for the Error type. Eliminate manually for now as
- // this appears to be the only important case in the
- // current library where the heuristic fails.
- if doc.pkgName == "os" && tname == "Error" &&
- name != "NewError" && name != "NewSyscallError" {
- // not a factory function for os.Error
- setFunc(doc.funcs, fun) // treat as ordinary function
- return
- }
-
- setFunc(typ.factories, fun)
- return
- }
- }
- }
-
- // ordinary function
- setFunc(doc.funcs, fun)
-}
-
-func (doc *docReader) addDecl(decl ast.Decl) {
- switch d := decl.(type) {
- case *ast.GenDecl:
- if len(d.Specs) > 0 {
- switch d.Tok {
- case token.CONST, token.VAR:
- // constants and variables are always handled as a group
- doc.addValue(d)
- case token.TYPE:
- // types are handled individually
- for _, spec := range d.Specs {
- // make a (fake) GenDecl node for this TypeSpec
- // (we need to do this here - as opposed to just
- // for printing - so we don't lose the GenDecl
- // documentation)
- //
- // TODO(gri): Consider just collecting the TypeSpec
- // node (and copy in the GenDecl.doc if there is no
- // doc in the TypeSpec - this is currently done in
- // makeTypeDocs below). Simpler data structures, but
- // would lose GenDecl documentation if the TypeSpec
- // has documentation as well.
- doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, token.NoPos, []ast.Spec{spec}, token.NoPos})
- // A new GenDecl node is created, no need to nil out d.Doc.
- }
- }
- }
- case *ast.FuncDecl:
- doc.addFunc(d)
- }
-}
-
-func copyCommentList(list []*ast.Comment) []*ast.Comment {
- return append([]*ast.Comment(nil), list...)
-}
-
-var (
- bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid):
- bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
-)
-
-// addFile adds the AST for a source file to the docReader.
-// Adding the same AST multiple times is a no-op.
-//
-func (doc *docReader) addFile(src *ast.File) {
- // add package documentation
- if src.Doc != nil {
- doc.addDoc(src.Doc)
- src.Doc = nil // doc consumed - remove from ast.File node
- }
-
- // add all declarations
- for _, decl := range src.Decls {
- doc.addDecl(decl)
- }
-
- // collect BUG(...) comments
- for _, c := range src.Comments {
- text := c.List[0].Text
- if m := bug_markers.FindStringIndex(text); m != nil {
- // found a BUG comment; maybe empty
- if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
- // non-empty BUG comment; collect comment without BUG prefix
- list := copyCommentList(c.List)
- list[0].Text = text[m[1]:]
- doc.bugs = append(doc.bugs, &ast.CommentGroup{list})
- }
- }
- }
- src.Comments = nil // consumed unassociated comments - remove from ast.File node
-}
-
-func NewFileDoc(file *ast.File) *PackageDoc {
- var r docReader
- r.init(file.Name.Name)
- r.addFile(file)
- return r.newDoc("", nil)
-}
-
-func NewPackageDoc(pkg *ast.Package, importpath string) *PackageDoc {
- var r docReader
- r.init(pkg.Name)
- filenames := make([]string, len(pkg.Files))
- i := 0
- for filename, f := range pkg.Files {
- r.addFile(f)
- filenames[i] = filename
- i++
- }
- return r.newDoc(importpath, filenames)
-}
-
-// ----------------------------------------------------------------------------
-// Conversion to external representation
-
-// ValueDoc is the documentation for a group of declared
-// values, either vars or consts.
-//
-type ValueDoc struct {
+// Value is the documentation for a (possibly grouped) var or const declaration.
+type Value struct {
Doc string
+ Names []string // var or const names in declaration order
Decl *ast.GenDecl
- order int
-}
-
-type sortValueDoc []*ValueDoc
-func (p sortValueDoc) Len() int { return len(p) }
-func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func declName(d *ast.GenDecl) string {
- if len(d.Specs) != 1 {
- return ""
- }
-
- switch v := d.Specs[0].(type) {
- case *ast.ValueSpec:
- return v.Names[0].Name
- case *ast.TypeSpec:
- return v.Name.Name
- }
-
- return ""
+ order int
}
-func (p sortValueDoc) Less(i, j int) bool {
- // sort by name
- // pull blocks (name = "") up to top
- // in original order
- if ni, nj := declName(p[i].Decl), declName(p[j].Decl); ni != nj {
- return ni < nj
- }
- return p[i].order < p[j].order
-}
+// Type is the documentation for a type declaration.
+type Type struct {
+ Doc string
+ Name string
+ Decl *ast.GenDecl
-func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc {
- d := make([]*ValueDoc, len(list)) // big enough in any case
- n := 0
- for i, decl := range list {
- if decl.Tok == tok {
- d[n] = &ValueDoc{CommentText(decl.Doc), decl, i}
- n++
- decl.Doc = nil // doc consumed - removed from AST
- }
- }
- d = d[0:n]
- sort.Sort(sortValueDoc(d))
- return d
+ // associated declarations
+ Consts []*Value // sorted list of constants of (mostly) this type
+ Vars []*Value // sorted list of variables of (mostly) this type
+ Funcs []*Func // sorted list of functions returning this type
+ Methods []*Func // sorted list of methods (including embedded ones) of this type
}
-// FuncDoc is the documentation for a func declaration,
-// either a top-level function or a method function.
-//
-type FuncDoc struct {
+// Func is the documentation for a func declaration.
+type Func struct {
Doc string
- Recv ast.Expr // TODO(rsc): Would like string here
Name string
Decl *ast.FuncDecl
-}
-
-type sortFuncDoc []*FuncDoc
-func (p sortFuncDoc) Len() int { return len(p) }
-func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name }
-
-func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc {
- d := make([]*FuncDoc, len(m))
- i := 0
- for _, f := range m {
- doc := new(FuncDoc)
- doc.Doc = CommentText(f.Doc)
- f.Doc = nil // doc consumed - remove from ast.FuncDecl node
- if f.Recv != nil {
- doc.Recv = f.Recv.List[0].Type
- }
- doc.Name = f.Name.Name
- doc.Decl = f
- d[i] = doc
- i++
- }
- sort.Sort(sortFuncDoc(d))
- return d
+ // methods
+ // (for functions, these fields have the respective zero value)
+ Recv string // actual receiver "T" or "*T"
+ Orig string // original receiver "T" or "*T"
+ Level int // embedding level; 0 means not embedded
}
-// TypeDoc is the documentation for a declared type.
-// Consts and Vars are sorted lists of constants and variables of (mostly) that type.
-// Factories is a sorted list of factory functions that return that type.
-// Methods is a sorted list of method functions on that type.
-type TypeDoc struct {
- Doc string
- Type *ast.TypeSpec
- Consts []*ValueDoc
- Vars []*ValueDoc
- Factories []*FuncDoc
- Methods []*FuncDoc
- Decl *ast.GenDecl
- order int
-}
+// Mode values control the operation of New.
+type Mode int
-type sortTypeDoc []*TypeDoc
+const (
+ // extract documentation for all package-level declarations,
+ // not just exported ones
+ AllDecls Mode = 1 << iota
-func (p sortTypeDoc) Len() int { return len(p) }
-func (p sortTypeDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-func (p sortTypeDoc) Less(i, j int) bool {
- // sort by name
- // pull blocks (name = "") up to top
- // in original order
- if ni, nj := p[i].Type.Name.Name, p[j].Type.Name.Name; ni != nj {
- return ni < nj
- }
- return p[i].order < p[j].order
-}
-
-// NOTE(rsc): This would appear not to be correct for type ( )
-// blocks, but the doc extractor above has split them into
-// individual declarations.
-func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc {
- d := make([]*TypeDoc, len(m))
- i := 0
- for _, old := range m {
- // all typeDocs should have a declaration associated with
- // them after processing an entire package - be conservative
- // and check
- if decl := old.decl; decl != nil {
- typespec := decl.Specs[0].(*ast.TypeSpec)
- t := new(TypeDoc)
- doc := typespec.Doc
- typespec.Doc = nil // doc consumed - remove from ast.TypeSpec node
- if doc == nil {
- // no doc associated with the spec, use the declaration doc, if any
- doc = decl.Doc
- }
- decl.Doc = nil // doc consumed - remove from ast.Decl node
- t.Doc = CommentText(doc)
- t.Type = typespec
- t.Consts = makeValueDocs(old.values, token.CONST)
- t.Vars = makeValueDocs(old.values, token.VAR)
- t.Factories = makeFuncDocs(old.factories)
- t.Methods = makeFuncDocs(old.methods)
- t.Decl = old.decl
- t.order = i
- d[i] = t
- i++
- } else {
- // no corresponding type declaration found - move any associated
- // values, factory functions, and methods back to the top-level
- // so that they are not lost (this should only happen if a package
- // file containing the explicit type declaration is missing or if
- // an unqualified type name was used after a "." import)
- // 1) move values
- doc.values = append(doc.values, old.values...)
- // 2) move factory functions
- for name, f := range old.factories {
- doc.funcs[name] = f
- }
- // 3) move methods
- for name, f := range old.methods {
- // don't overwrite functions with the same name
- if _, found := doc.funcs[name]; !found {
- doc.funcs[name] = f
- }
- }
- }
- }
- d = d[0:i] // some types may have been ignored
- sort.Sort(sortTypeDoc(d))
- return d
-}
-
-func makeBugDocs(list []*ast.CommentGroup) []string {
- d := make([]string, len(list))
- for i, g := range list {
- d[i] = CommentText(g)
- }
- return d
-}
-
-// PackageDoc is the documentation for an entire package.
-//
-type PackageDoc struct {
- PackageName string
- ImportPath string
- Filenames []string
- Doc string
- Consts []*ValueDoc
- Types []*TypeDoc
- Vars []*ValueDoc
- Funcs []*FuncDoc
- Bugs []string
-}
+ // show all embedded methods, not just the ones of
+ // invisible (unexported) anonymous fields
+ AllMethods
+)
-// newDoc returns the accumulated documentation for the package.
+// New computes the package documentation for the given package AST.
+// New takes ownership of the AST pkg and may edit or overwrite it.
//
-func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc {
- p := new(PackageDoc)
- p.PackageName = doc.pkgName
- p.ImportPath = importpath
- sort.Strings(filenames)
- p.Filenames = filenames
- p.Doc = CommentText(doc.doc)
- // makeTypeDocs may extend the list of doc.values and
- // doc.funcs and thus must be called before any other
- // function consuming those lists
- p.Types = doc.makeTypeDocs(doc.types)
- p.Consts = makeValueDocs(doc.values, token.CONST)
- p.Vars = makeValueDocs(doc.values, token.VAR)
- p.Funcs = makeFuncDocs(doc.funcs)
- p.Bugs = makeBugDocs(doc.bugs)
- return p
-}
-
-// ----------------------------------------------------------------------------
-// Filtering by name
-
-type Filter func(string) bool
-
-func matchFields(fields *ast.FieldList, f Filter) bool {
- if fields != nil {
- for _, field := range fields.List {
- for _, name := range field.Names {
- if f(name.Name) {
- return true
- }
- }
- }
+func New(pkg *ast.Package, importPath string, mode Mode) *Package {
+ var r reader
+ r.readPackage(pkg, mode)
+ r.computeMethodSets()
+ r.cleanupTypes()
+ return &Package{
+ Doc: r.doc,
+ Name: pkg.Name,
+ ImportPath: importPath,
+ Imports: sortedKeys(r.imports),
+ Filenames: r.filenames,
+ Bugs: r.bugs,
+ Consts: sortedValues(r.values, token.CONST),
+ Types: sortedTypes(r.types, mode&AllMethods != 0),
+ Vars: sortedValues(r.values, token.VAR),
+ Funcs: sortedFuncs(r.funcs, true),
}
- return false
-}
-
-func matchDecl(d *ast.GenDecl, f Filter) bool {
- for _, d := range d.Specs {
- switch v := d.(type) {
- case *ast.ValueSpec:
- for _, name := range v.Names {
- if f(name.Name) {
- return true
- }
- }
- case *ast.TypeSpec:
- if f(v.Name.Name) {
- return true
- }
- switch t := v.Type.(type) {
- case *ast.StructType:
- if matchFields(t.Fields, f) {
- return true
- }
- case *ast.InterfaceType:
- if matchFields(t.Methods, f) {
- return true
- }
- }
- }
- }
- return false
-}
-
-func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc {
- w := 0
- for _, vd := range a {
- if matchDecl(vd.Decl, f) {
- a[w] = vd
- w++
- }
- }
- return a[0:w]
-}
-
-func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc {
- w := 0
- for _, fd := range a {
- if f(fd.Name) {
- a[w] = fd
- w++
- }
- }
- return a[0:w]
-}
-
-func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc {
- w := 0
- for _, td := range a {
- n := 0 // number of matches
- if matchDecl(td.Decl, f) {
- n = 1
- } else {
- // type name doesn't match, but we may have matching consts, vars, factories or methods
- td.Consts = filterValueDocs(td.Consts, f)
- td.Vars = filterValueDocs(td.Vars, f)
- td.Factories = filterFuncDocs(td.Factories, f)
- td.Methods = filterFuncDocs(td.Methods, f)
- n += len(td.Consts) + len(td.Vars) + len(td.Factories) + len(td.Methods)
- }
- if n > 0 {
- a[w] = td
- w++
- }
- }
- return a[0:w]
-}
-
-// Filter eliminates documentation for names that don't pass through the filter f.
-// TODO: Recognize "Type.Method" as a name.
-//
-func (p *PackageDoc) Filter(f Filter) {
- p.Consts = filterValueDocs(p.Consts, f)
- p.Vars = filterValueDocs(p.Vars, f)
- p.Types = filterTypeDocs(p.Types, f)
- p.Funcs = filterFuncDocs(p.Funcs, f)
- p.Doc = "" // don't show top-level package doc
}
diff --git a/src/pkg/go/doc/doc_test.go b/src/pkg/go/doc/doc_test.go
new file mode 100644
index 000000000..f957ede4a
--- /dev/null
+++ b/src/pkg/go/doc/doc_test.go
@@ -0,0 +1,136 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "text/template"
+)
+
+var update = flag.Bool("update", false, "update golden (.out) files")
+var files = flag.String("files", "", "consider only Go test files matching this regular expression")
+
+const dataDir = "testdata"
+
+var templateTxt = readTemplate("template.txt")
+
+func readTemplate(filename string) *template.Template {
+ t := template.New(filename)
+ t.Funcs(template.FuncMap{
+ "node": nodeFmt,
+ "synopsis": synopsisFmt,
+ })
+ return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
+}
+
+func nodeFmt(node interface{}, fset *token.FileSet) string {
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, node)
+ return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1)
+}
+
+func synopsisFmt(s string) string {
+ const n = 64
+ if len(s) > n {
+ // cut off excess text and go back to a word boundary
+ s = s[0:n]
+ if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
+ s = s[0:i]
+ }
+ s = strings.TrimSpace(s) + " ..."
+ }
+ return "// " + strings.Replace(s, "\n", " ", -1)
+}
+
+func isGoFile(fi os.FileInfo) bool {
+ name := fi.Name()
+ return !fi.IsDir() &&
+ len(name) > 0 && name[0] != '.' && // ignore .files
+ filepath.Ext(name) == ".go"
+}
+
+type bundle struct {
+ *Package
+ FSet *token.FileSet
+}
+
+func test(t *testing.T, mode Mode) {
+ // determine file filter
+ filter := isGoFile
+ if *files != "" {
+ rx, err := regexp.Compile(*files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ filter = func(fi os.FileInfo) bool {
+ return isGoFile(fi) && rx.MatchString(fi.Name())
+ }
+ }
+
+ // get packages
+ fset := token.NewFileSet()
+ pkgs, err := parser.ParseDir(fset, dataDir, filter, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test packages
+ for _, pkg := range pkgs {
+ importpath := dataDir + "/" + pkg.Name
+ doc := New(pkg, importpath, mode)
+
+ // golden files always use / in filenames - canonicalize them
+ for i, filename := range doc.Filenames {
+ doc.Filenames[i] = filepath.ToSlash(filename)
+ }
+
+ // print documentation
+ var buf bytes.Buffer
+ if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
+ t.Error(err)
+ continue
+ }
+ got := buf.Bytes()
+
+ // update golden file if necessary
+ golden := filepath.Join(dataDir, fmt.Sprintf("%s.%d.golden", pkg.Name, mode))
+ if *update {
+ err := ioutil.WriteFile(golden, got, 0644)
+ if err != nil {
+ t.Error(err)
+ }
+ continue
+ }
+
+ // get golden file
+ want, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ // compare
+ if bytes.Compare(got, want) != 0 {
+ t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
+ }
+ }
+}
+
+func Test(t *testing.T) {
+ test(t, 0)
+ test(t, AllDecls)
+ test(t, AllMethods)
+}
diff --git a/src/pkg/go/doc/example.go b/src/pkg/go/doc/example.go
new file mode 100644
index 000000000..a7e0e250a
--- /dev/null
+++ b/src/pkg/go/doc/example.go
@@ -0,0 +1,117 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extract example functions from file ASTs.
+
+package doc
+
+import (
+ "go/ast"
+ "go/token"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type Example struct {
+ Name string // name of the item being exemplified
+ Doc string // example function doc string
+ Code ast.Node
+ Comments []*ast.CommentGroup
+ Output string // expected output
+}
+
+func Examples(files ...*ast.File) []*Example {
+ var list []*Example
+ for _, file := range files {
+ hasTests := false // file contains tests or benchmarks
+ numDecl := 0 // number of non-import declarations in the file
+ var flist []*Example
+ for _, decl := range file.Decls {
+ if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
+ numDecl++
+ continue
+ }
+ f, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ numDecl++
+ name := f.Name.Name
+ if isTest(name, "Test") || isTest(name, "Benchmark") {
+ hasTests = true
+ continue
+ }
+ if !isTest(name, "Example") {
+ continue
+ }
+ var doc string
+ if f.Doc != nil {
+ doc = f.Doc.Text()
+ }
+ flist = append(flist, &Example{
+ Name: name[len("Example"):],
+ Doc: doc,
+ Code: f.Body,
+ Comments: file.Comments,
+ Output: exampleOutput(f, file.Comments),
+ })
+ }
+ if !hasTests && numDecl > 1 && len(flist) == 1 {
+ // If this file only has one example function, some
+ // other top-level declarations, and no tests or
+ // benchmarks, use the whole file as the example.
+ flist[0].Code = file
+ }
+ list = append(list, flist...)
+ }
+ sort.Sort(exampleByName(list))
+ return list
+}
+
+var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*output:`)
+
+func exampleOutput(fun *ast.FuncDecl, comments []*ast.CommentGroup) string {
+ // find the last comment in the function
+ var last *ast.CommentGroup
+ for _, cg := range comments {
+ if cg.Pos() < fun.Pos() {
+ continue
+ }
+ if cg.End() > fun.End() {
+ break
+ }
+ last = cg
+ }
+ if last != nil {
+ // test that it begins with the correct prefix
+ text := last.Text()
+ if loc := outputPrefix.FindStringIndex(text); loc != nil {
+ return strings.TrimSpace(text[loc[1]:])
+ }
+ }
+ return "" // no suitable comment found
+}
+
+// isTest tells whether name looks like a test, example, or benchmark.
+// It is a Test (say) if there is a character after Test that is not a
+// lower-case letter. (We don't want Testiness.)
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(rune)
+}
+
+type exampleByName []*Example
+
+func (s exampleByName) Len() int { return len(s) }
+func (s exampleByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s exampleByName) Less(i, j int) bool { return s[i].Name < s[j].Name }
diff --git a/src/pkg/go/doc/exports.go b/src/pkg/go/doc/exports.go
new file mode 100644
index 000000000..146be5d87
--- /dev/null
+++ b/src/pkg/go/doc/exports.go
@@ -0,0 +1,199 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements export filtering of an AST.
+
+package doc
+
+import "go/ast"
+
+// filterIdentList removes unexported names from list in place
+// and returns the resulting list.
+//
+func filterIdentList(list []*ast.Ident) []*ast.Ident {
+ j := 0
+ for _, x := range list {
+ if ast.IsExported(x.Name) {
+ list[j] = x
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+// removeErrorField removes anonymous fields named "error" from an interface.
+// This is called when "error" has been determined to be a local name,
+// not the predeclared type.
+//
+func removeErrorField(ityp *ast.InterfaceType) {
+ list := ityp.Methods.List // we know that ityp.Methods != nil
+ j := 0
+ for _, field := range list {
+ keepField := true
+ if n := len(field.Names); n == 0 {
+ // anonymous field
+ if fname, _ := baseTypeName(field.Type); fname == "error" {
+ keepField = false
+ }
+ }
+ if keepField {
+ list[j] = field
+ j++
+ }
+ }
+ if j < len(list) {
+ ityp.Incomplete = true
+ }
+ ityp.Methods.List = list[0:j]
+}
+
+// filterFieldList removes unexported fields (field names) from the field list
+// in place and returns true if fields were removed. Anonymous fields are
+// recorded with the parent type. filterType is called with the types of
+// all remaining fields.
+//
+func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
+ if fields == nil {
+ return
+ }
+ list := fields.List
+ j := 0
+ for _, field := range list {
+ keepField := false
+ if n := len(field.Names); n == 0 {
+ // anonymous field
+ fname := r.recordAnonymousField(parent, field.Type)
+ if ast.IsExported(fname) {
+ keepField = true
+ } else if ityp != nil && fname == "error" {
+ // possibly the predeclared error interface; keep
+ // it for now but remember this interface so that
+ // it can be fixed if error is also defined locally
+ keepField = true
+ r.remember(ityp)
+ }
+ } else {
+ field.Names = filterIdentList(field.Names)
+ if len(field.Names) < n {
+ removedFields = true
+ }
+ if len(field.Names) > 0 {
+ keepField = true
+ }
+ }
+ if keepField {
+ r.filterType(nil, field.Type)
+ list[j] = field
+ j++
+ }
+ }
+ if j < len(list) {
+ removedFields = true
+ }
+ fields.List = list[0:j]
+ return
+}
+
+// filterParamList applies filterType to each parameter type in fields.
+//
+func (r *reader) filterParamList(fields *ast.FieldList) {
+ if fields != nil {
+ for _, f := range fields.List {
+ r.filterType(nil, f.Type)
+ }
+ }
+}
+
+// filterType strips any unexported struct fields or method types from typ
+// in place. If fields (or methods) have been removed, the corresponding
+// struct or interface type has the Incomplete field set to true.
+//
+func (r *reader) filterType(parent *namedType, typ ast.Expr) {
+ switch t := typ.(type) {
+ case *ast.Ident:
+ // nothing to do
+ case *ast.ParenExpr:
+ r.filterType(nil, t.X)
+ case *ast.ArrayType:
+ r.filterType(nil, t.Elt)
+ case *ast.StructType:
+ if r.filterFieldList(parent, t.Fields, nil) {
+ t.Incomplete = true
+ }
+ case *ast.FuncType:
+ r.filterParamList(t.Params)
+ r.filterParamList(t.Results)
+ case *ast.InterfaceType:
+ if r.filterFieldList(parent, t.Methods, t) {
+ t.Incomplete = true
+ }
+ case *ast.MapType:
+ r.filterType(nil, t.Key)
+ r.filterType(nil, t.Value)
+ case *ast.ChanType:
+ r.filterType(nil, t.Value)
+ }
+}
+
+func (r *reader) filterSpec(spec ast.Spec) bool {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // always keep imports so we can collect them
+ return true
+ case *ast.ValueSpec:
+ s.Names = filterIdentList(s.Names)
+ if len(s.Names) > 0 {
+ r.filterType(nil, s.Type)
+ return true
+ }
+ case *ast.TypeSpec:
+ if name := s.Name.Name; ast.IsExported(name) {
+ r.filterType(r.lookupType(s.Name.Name), s.Type)
+ return true
+ } else if name == "error" {
+ // special case: remember that error is declared locally
+ r.errorDecl = true
+ }
+ }
+ return false
+}
+
+func (r *reader) filterSpecList(list []ast.Spec) []ast.Spec {
+ j := 0
+ for _, s := range list {
+ if r.filterSpec(s) {
+ list[j] = s
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+func (r *reader) filterDecl(decl ast.Decl) bool {
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ d.Specs = r.filterSpecList(d.Specs)
+ return len(d.Specs) > 0
+ case *ast.FuncDecl:
+ // ok to filter these methods early because any
+ // conflicting method will be filtered here, too -
+ // thus, removing these methods early will not lead
+ // to the false removal of possible conflicts
+ return ast.IsExported(d.Name.Name)
+ }
+ return false
+}
+
+// fileExports removes unexported declarations from src in place.
+//
+func (r *reader) fileExports(src *ast.File) {
+ j := 0
+ for _, d := range src.Decls {
+ if r.filterDecl(d) {
+ src.Decls[j] = d
+ j++
+ }
+ }
+ src.Decls = src.Decls[0:j]
+}
diff --git a/src/pkg/go/doc/filter.go b/src/pkg/go/doc/filter.go
new file mode 100644
index 000000000..02b66ccef
--- /dev/null
+++ b/src/pkg/go/doc/filter.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "go/ast"
+
+type Filter func(string) bool
+
+func matchFields(fields *ast.FieldList, f Filter) bool {
+ if fields != nil {
+ for _, field := range fields.List {
+ for _, name := range field.Names {
+ if f(name.Name) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func matchDecl(d *ast.GenDecl, f Filter) bool {
+ for _, d := range d.Specs {
+ switch v := d.(type) {
+ case *ast.ValueSpec:
+ for _, name := range v.Names {
+ if f(name.Name) {
+ return true
+ }
+ }
+ case *ast.TypeSpec:
+ if f(v.Name.Name) {
+ return true
+ }
+ switch t := v.Type.(type) {
+ case *ast.StructType:
+ if matchFields(t.Fields, f) {
+ return true
+ }
+ case *ast.InterfaceType:
+ if matchFields(t.Methods, f) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func filterValues(a []*Value, f Filter) []*Value {
+ w := 0
+ for _, vd := range a {
+ if matchDecl(vd.Decl, f) {
+ a[w] = vd
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+func filterFuncs(a []*Func, f Filter) []*Func {
+ w := 0
+ for _, fd := range a {
+ if f(fd.Name) {
+ a[w] = fd
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+func filterTypes(a []*Type, f Filter) []*Type {
+ w := 0
+ for _, td := range a {
+ n := 0 // number of matches
+ if matchDecl(td.Decl, f) {
+ n = 1
+ } else {
+ // type name doesn't match, but we may have matching consts, vars, factories or methods
+ td.Consts = filterValues(td.Consts, f)
+ td.Vars = filterValues(td.Vars, f)
+ td.Funcs = filterFuncs(td.Funcs, f)
+ td.Methods = filterFuncs(td.Methods, f)
+ n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
+ }
+ if n > 0 {
+ a[w] = td
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+// Filter eliminates documentation for names that don't pass through the filter f.
+// TODO: Recognize "Type.Method" as a name.
+//
+func (p *Package) Filter(f Filter) {
+ p.Consts = filterValues(p.Consts, f)
+ p.Vars = filterValues(p.Vars, f)
+ p.Types = filterTypes(p.Types, f)
+ p.Funcs = filterFuncs(p.Funcs, f)
+ p.Doc = "" // don't show top-level package doc
+}
diff --git a/src/pkg/go/doc/headscan.go b/src/pkg/go/doc/headscan.go
new file mode 100644
index 000000000..f55934763
--- /dev/null
+++ b/src/pkg/go/doc/headscan.go
@@ -0,0 +1,113 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+ The headscan command extracts comment headings from package files;
+ it is used to detect false positives which may require an adjustment
+ to the comment formatting heuristics in comment.go.
+
+ Usage: headscan [-root root_directory]
+
+ By default, the $GOROOT/src directory is scanned.
+*/
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+var (
+ root = flag.String("root", filepath.Join(runtime.GOROOT(), "src"), "root of filesystem tree to scan")
+ verbose = flag.Bool("v", false, "verbose mode")
+)
+
+const (
+ html_h = "<h3>"
+ html_endh = "</h3>\n"
+)
+
+func isGoFile(fi os.FileInfo) bool {
+ return strings.HasSuffix(fi.Name(), ".go") &&
+ !strings.HasSuffix(fi.Name(), "_test.go")
+}
+
+func appendHeadings(list []string, comment string) []string {
+ var buf bytes.Buffer
+ doc.ToHTML(&buf, comment, nil)
+ for s := buf.String(); ; {
+ i := strings.Index(s, html_h)
+ if i < 0 {
+ break
+ }
+ i += len(html_h)
+ j := strings.Index(s, html_endh)
+ if j < 0 {
+ list = append(list, s[i:]) // incorrect HTML
+ break
+ }
+ list = append(list, s[i:j])
+ s = s[j+len(html_endh):]
+ }
+ return list
+}
+
+func main() {
+ flag.Parse()
+ fset := token.NewFileSet()
+ nheadings := 0
+ err := filepath.Walk(*root, func(path string, fi os.FileInfo, err error) error {
+ if !fi.IsDir() {
+ return nil
+ }
+ pkgs, err := parser.ParseDir(fset, path, isGoFile, parser.ParseComments)
+ if err != nil {
+ if *verbose {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ return nil
+ }
+ for _, pkg := range pkgs {
+ d := doc.New(pkg, path, doc.Mode(0))
+ list := appendHeadings(nil, d.Doc)
+ for _, d := range d.Consts {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Types {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Vars {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Funcs {
+ list = appendHeadings(list, d.Doc)
+ }
+ if len(list) > 0 {
+ // directories may contain multiple packages;
+ // print path and package name
+ fmt.Printf("%s (package %s)\n", path, pkg.Name)
+ for _, h := range list {
+ fmt.Printf("\t%s\n", h)
+ }
+ nheadings += len(list)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ fmt.Println(nheadings, "headings found")
+}
diff --git a/src/pkg/go/doc/reader.go b/src/pkg/go/doc/reader.go
new file mode 100644
index 000000000..5eaae37b7
--- /dev/null
+++ b/src/pkg/go/doc/reader.go
@@ -0,0 +1,774 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "go/ast"
+ "go/token"
+ "regexp"
+ "sort"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// function/method sets
+//
+// Internally, we treat functions like methods and collect them in method sets.
+
+// A methodSet describes a set of methods. Entries where Decl == nil are conflict
+// entries (more then one method with the same name at the same embedding level).
+//
+type methodSet map[string]*Func
+
+// recvString returns a string representation of recv of the
+// form "T", "*T", or "BADRECV" (if not a proper receiver type).
+//
+func recvString(recv ast.Expr) string {
+ switch t := recv.(type) {
+ case *ast.Ident:
+ return t.Name
+ case *ast.StarExpr:
+ return "*" + recvString(t.X)
+ }
+ return "BADRECV"
+}
+
+// set creates the corresponding Func for f and adds it to mset.
+// If there are multiple f's with the same name, set keeps the first
+// one with documentation; conflicts are ignored.
+//
+func (mset methodSet) set(f *ast.FuncDecl) {
+ name := f.Name.Name
+ if g := mset[name]; g != nil && g.Doc != "" {
+ // A function with the same name has already been registered;
+ // since it has documentation, assume f is simply another
+ // implementation and ignore it. This does not happen if the
+ // caller is using go/build.ScanDir to determine the list of
+ // files implementing a package.
+ return
+ }
+ // function doesn't exist or has no documentation; use f
+ recv := ""
+ if f.Recv != nil {
+ var typ ast.Expr
+ // be careful in case of incorrect ASTs
+ if list := f.Recv.List; len(list) == 1 {
+ typ = list[0].Type
+ }
+ recv = recvString(typ)
+ }
+ mset[name] = &Func{
+ Doc: f.Doc.Text(),
+ Name: name,
+ Decl: f,
+ Recv: recv,
+ Orig: recv,
+ }
+ f.Doc = nil // doc consumed - remove from AST
+}
+
+// add adds method m to the method set; m is ignored if the method set
+// already contains a method with the same name at the same or a higher
+// level then m.
+//
+func (mset methodSet) add(m *Func) {
+ old := mset[m.Name]
+ if old == nil || m.Level < old.Level {
+ mset[m.Name] = m
+ return
+ }
+ if old != nil && m.Level == old.Level {
+ // conflict - mark it using a method with nil Decl
+ mset[m.Name] = &Func{
+ Name: m.Name,
+ Level: m.Level,
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Named types
+
+// baseTypeName returns the name of the base type of x (or "")
+// and whether the type is imported or not.
+//
+func baseTypeName(x ast.Expr) (name string, imported bool) {
+ switch t := x.(type) {
+ case *ast.Ident:
+ return t.Name, false
+ case *ast.SelectorExpr:
+ if _, ok := t.X.(*ast.Ident); ok {
+ // only possible for qualified type names;
+ // assume type is imported
+ return t.Sel.Name, true
+ }
+ case *ast.StarExpr:
+ return baseTypeName(t.X)
+ }
+ return
+}
+
+// An embeddedSet describes a set of embedded types.
+type embeddedSet map[*namedType]bool
+
+// A namedType represents a named unqualified (package local, or possibly
+// predeclared) type. The namedType for a type name is always found via
+// reader.lookupType.
+//
+type namedType struct {
+ doc string // doc comment for type
+ name string // type name
+ decl *ast.GenDecl // nil if declaration hasn't been seen yet
+
+ isEmbedded bool // true if this type is embedded
+ isStruct bool // true if this type is a struct
+ embedded embeddedSet // true if the embedded type is a pointer
+
+ // associated declarations
+ values []*Value // consts and vars
+ funcs methodSet
+ methods methodSet
+}
+
+// ----------------------------------------------------------------------------
+// AST reader
+
+// reader accumulates documentation for a single package.
+// It modifies the AST: Comments (declaration documentation)
+// that have been collected by the reader are set to nil
+// in the respective AST nodes so that they are not printed
+// twice (once when printing the documentation and once when
+// printing the corresponding AST node).
+//
+type reader struct {
+ mode Mode
+
+ // package properties
+ doc string // package documentation, if any
+ filenames []string
+ bugs []string
+
+ // declarations
+ imports map[string]int
+ values []*Value // consts and vars
+ types map[string]*namedType
+ funcs methodSet
+
+ // support for package-local error type declarations
+ errorDecl bool // if set, type "error" was declared locally
+ fixlist []*ast.InterfaceType // list of interfaces containing anonymous field "error"
+}
+
+func (r *reader) isVisible(name string) bool {
+ return r.mode&AllDecls != 0 || ast.IsExported(name)
+}
+
+// lookupType returns the base type with the given name.
+// If the base type has not been encountered yet, a new
+// type with the given name but no associated declaration
+// is added to the type map.
+//
+func (r *reader) lookupType(name string) *namedType {
+ if name == "" || name == "_" {
+ return nil // no type docs for anonymous types
+ }
+ if typ, found := r.types[name]; found {
+ return typ
+ }
+ // type not found - add one without declaration
+ typ := &namedType{
+ name: name,
+ embedded: make(embeddedSet),
+ funcs: make(methodSet),
+ methods: make(methodSet),
+ }
+ r.types[name] = typ
+ return typ
+}
+
+// recordAnonymousField registers fieldType as the type of an
+// anonymous field in the parent type. If the field is imported
+// (qualified name) or the parent is nil, the field is ignored.
+// The function returns the field name.
+//
+func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
+ fname, imp := baseTypeName(fieldType)
+ if parent == nil || imp {
+ return
+ }
+ if ftype := r.lookupType(fname); ftype != nil {
+ ftype.isEmbedded = true
+ _, ptr := fieldType.(*ast.StarExpr)
+ parent.embedded[ftype] = ptr
+ }
+ return
+}
+
+func (r *reader) readDoc(comment *ast.CommentGroup) {
+ // By convention there should be only one package comment
+ // but collect all of them if there are more then one.
+ text := comment.Text()
+ if r.doc == "" {
+ r.doc = text
+ return
+ }
+ r.doc += "\n" + text
+}
+
+func (r *reader) remember(typ *ast.InterfaceType) {
+ r.fixlist = append(r.fixlist, typ)
+}
+
+func specNames(specs []ast.Spec) []string {
+ names := make([]string, 0, len(specs)) // reasonable estimate
+ for _, s := range specs {
+ // s guaranteed to be an *ast.ValueSpec by readValue
+ for _, ident := range s.(*ast.ValueSpec).Names {
+ names = append(names, ident.Name)
+ }
+ }
+ return names
+}
+
+// readValue processes a const or var declaration.
+//
+func (r *reader) readValue(decl *ast.GenDecl) {
+ // determine if decl should be associated with a type
+ // Heuristic: For each typed entry, determine the type name, if any.
+ // If there is exactly one type name that is sufficiently
+ // frequent, associate the decl with the respective type.
+ domName := ""
+ domFreq := 0
+ prev := ""
+ n := 0
+ for _, spec := range decl.Specs {
+ s, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue // should not happen, but be conservative
+ }
+ name := ""
+ switch {
+ case s.Type != nil:
+ // a type is present; determine its name
+ if n, imp := baseTypeName(s.Type); !imp {
+ name = n
+ }
+ case decl.Tok == token.CONST:
+ // no type is present but we have a constant declaration;
+ // use the previous type name (w/o more type information
+ // we cannot handle the case of unnamed variables with
+ // initializer expressions except for some trivial cases)
+ name = prev
+ }
+ if name != "" {
+ // entry has a named type
+ if domName != "" && domName != name {
+ // more than one type name - do not associate
+ // with any type
+ domName = ""
+ break
+ }
+ domName = name
+ domFreq++
+ }
+ prev = name
+ n++
+ }
+
+ // nothing to do w/o a legal declaration
+ if n == 0 {
+ return
+ }
+
+ // determine values list with which to associate the Value for this decl
+ values := &r.values
+ const threshold = 0.75
+ if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) {
+ // typed entries are sufficiently frequent
+ if typ := r.lookupType(domName); typ != nil {
+ values = &typ.values // associate with that type
+ }
+ }
+
+ *values = append(*values, &Value{
+ Doc: decl.Doc.Text(),
+ Names: specNames(decl.Specs),
+ Decl: decl,
+ order: len(*values),
+ })
+ decl.Doc = nil // doc consumed - remove from AST
+}
+
+// fields returns a struct's fields or an interface's methods.
+//
+func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) {
+ var fields *ast.FieldList
+ switch t := typ.(type) {
+ case *ast.StructType:
+ fields = t.Fields
+ isStruct = true
+ case *ast.InterfaceType:
+ fields = t.Methods
+ }
+ if fields != nil {
+ list = fields.List
+ }
+ return
+}
+
+// readType processes a type declaration.
+//
+func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
+ typ := r.lookupType(spec.Name.Name)
+ if typ == nil {
+ return // no name or blank name - ignore the type
+ }
+
+ // A type should be added at most once, so typ.decl
+ // should be nil - if it is not, simply overwrite it.
+ typ.decl = decl
+
+ // compute documentation
+ doc := spec.Doc
+ spec.Doc = nil // doc consumed - remove from AST
+ if doc == nil {
+ // no doc associated with the spec, use the declaration doc, if any
+ doc = decl.Doc
+ }
+ decl.Doc = nil // doc consumed - remove from AST
+ typ.doc = doc.Text()
+
+ // record anonymous fields (they may contribute methods)
+ // (some fields may have been recorded already when filtering
+ // exports, but that's ok)
+ var list []*ast.Field
+ list, typ.isStruct = fields(spec.Type)
+ for _, field := range list {
+ if len(field.Names) == 0 {
+ r.recordAnonymousField(typ, field.Type)
+ }
+ }
+}
+
+// readFunc processes a func or method declaration.
+//
+func (r *reader) readFunc(fun *ast.FuncDecl) {
+ // strip function body
+ fun.Body = nil
+
+ // associate methods with the receiver type, if any
+ if fun.Recv != nil {
+ // method
+ recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
+ if imp {
+ // should not happen (incorrect AST);
+ // don't show this method
+ return
+ }
+ if typ := r.lookupType(recvTypeName); typ != nil {
+ typ.methods.set(fun)
+ }
+ // otherwise ignore the method
+ // TODO(gri): There may be exported methods of non-exported types
+ // that can be called because of exported values (consts, vars, or
+ // function results) of that type. Could determine if that is the
+ // case and then show those methods in an appropriate section.
+ return
+ }
+
+ // associate factory functions with the first visible result type, if any
+ if fun.Type.Results.NumFields() >= 1 {
+ res := fun.Type.Results.List[0]
+ if len(res.Names) <= 1 {
+ // exactly one (named or anonymous) result associated
+ // with the first type in result signature (there may
+ // be more than one result)
+ if n, imp := baseTypeName(res.Type); !imp && r.isVisible(n) {
+ if typ := r.lookupType(n); typ != nil {
+ // associate function with typ
+ typ.funcs.set(fun)
+ return
+ }
+ }
+ }
+ }
+
+ // just an ordinary function
+ r.funcs.set(fun)
+}
+
+var (
+ bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid):
+ bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
+)
+
+// readFile adds the AST for a source file to the reader.
+//
+func (r *reader) readFile(src *ast.File) {
+ // add package documentation
+ if src.Doc != nil {
+ r.readDoc(src.Doc)
+ src.Doc = nil // doc consumed - remove from AST
+ }
+
+ // add all declarations
+ for _, decl := range src.Decls {
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ switch d.Tok {
+ case token.IMPORT:
+ // imports are handled individually
+ for _, spec := range d.Specs {
+ if s, ok := spec.(*ast.ImportSpec); ok {
+ if import_, err := strconv.Unquote(s.Path.Value); err == nil {
+ r.imports[import_] = 1
+ }
+ }
+ }
+ case token.CONST, token.VAR:
+ // constants and variables are always handled as a group
+ r.readValue(d)
+ case token.TYPE:
+ // types are handled individually
+ if len(d.Specs) == 1 && !d.Lparen.IsValid() {
+ // common case: single declaration w/o parentheses
+ // (if a single declaration is parenthesized,
+ // create a new fake declaration below, so that
+ // go/doc type declarations always appear w/o
+ // parentheses)
+ if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
+ r.readType(d, s)
+ }
+ break
+ }
+ for _, spec := range d.Specs {
+ if s, ok := spec.(*ast.TypeSpec); ok {
+ // use an individual (possibly fake) declaration
+ // for each type; this also ensures that each type
+ // gets to (re-)use the declaration documentation
+ // if there's none associated with the spec itself
+ fake := &ast.GenDecl{
+ Doc: d.Doc,
+ // don't use the existing TokPos because it
+ // will lead to the wrong selection range for
+ // the fake declaration if there are more
+ // than one type in the group (this affects
+ // src/cmd/godoc/godoc.go's posLink_urlFunc)
+ TokPos: s.Pos(),
+ Tok: token.TYPE,
+ Specs: []ast.Spec{s},
+ }
+ r.readType(fake, s)
+ }
+ }
+ }
+ case *ast.FuncDecl:
+ r.readFunc(d)
+ }
+ }
+
+ // collect BUG(...) comments
+ for _, c := range src.Comments {
+ text := c.List[0].Text
+ if m := bug_markers.FindStringIndex(text); m != nil {
+ // found a BUG comment; maybe empty
+ if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
+ // non-empty BUG comment; collect comment without BUG prefix
+ list := append([]*ast.Comment(nil), c.List...) // make a copy
+ list[0].Text = text[m[1]:]
+ r.bugs = append(r.bugs, (&ast.CommentGroup{List: list}).Text())
+ }
+ }
+ }
+ src.Comments = nil // consumed unassociated comments - remove from AST
+}
+
+func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
+ // initialize reader
+ r.filenames = make([]string, len(pkg.Files))
+ r.imports = make(map[string]int)
+ r.mode = mode
+ r.types = make(map[string]*namedType)
+ r.funcs = make(methodSet)
+
+ // sort package files before reading them so that the
+ // result result does not depend on map iteration order
+ i := 0
+ for filename := range pkg.Files {
+ r.filenames[i] = filename
+ i++
+ }
+ sort.Strings(r.filenames)
+
+ // process files in sorted order
+ for _, filename := range r.filenames {
+ f := pkg.Files[filename]
+ if mode&AllDecls == 0 {
+ r.fileExports(f)
+ }
+ r.readFile(f)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+var predeclaredTypes = map[string]bool{
+ "bool": true,
+ "byte": true,
+ "complex64": true,
+ "complex128": true,
+ "error": true,
+ "float32": true,
+ "float64": true,
+ "int": true,
+ "int8": true,
+ "int16": true,
+ "int32": true,
+ "int64": true,
+ "rune": true,
+ "string": true,
+ "uint": true,
+ "uint8": true,
+ "uint16": true,
+ "uint32": true,
+ "uint64": true,
+ "uintptr": true,
+}
+
+func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
+ if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
+ return f // shouldn't happen, but be safe
+ }
+
+ // copy existing receiver field and set new type
+ newField := *f.Decl.Recv.List[0]
+ _, origRecvIsPtr := newField.Type.(*ast.StarExpr)
+ var typ ast.Expr = ast.NewIdent(recvTypeName)
+ if !embeddedIsPtr && origRecvIsPtr {
+ typ = &ast.StarExpr{X: typ}
+ }
+ newField.Type = typ
+
+ // copy existing receiver field list and set new receiver field
+ newFieldList := *f.Decl.Recv
+ newFieldList.List = []*ast.Field{&newField}
+
+ // copy existing function declaration and set new receiver field list
+ newFuncDecl := *f.Decl
+ newFuncDecl.Recv = &newFieldList
+
+ // copy existing function documentation and set new declaration
+ newF := *f
+ newF.Decl = &newFuncDecl
+ newF.Recv = recvString(typ)
+ // the Orig field never changes
+ newF.Level = level
+
+ return &newF
+}
+
+// collectEmbeddedMethods collects the embedded methods of typ in mset.
+//
+func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) {
+ visited[typ] = true
+ for embedded, isPtr := range typ.embedded {
+ // Once an embedded type is embedded as a pointer type
+ // all embedded types in those types are treated like
+ // pointer types for the purpose of the receiver type
+ // computation; i.e., embeddedIsPtr is sticky for this
+ // embedding hierarchy.
+ thisEmbeddedIsPtr := embeddedIsPtr || isPtr
+ for _, m := range embedded.methods {
+ // only top-level methods are embedded
+ if m.Level == 0 {
+ mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
+ }
+ }
+ if !visited[embedded] {
+ r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited)
+ }
+ }
+ delete(visited, typ)
+}
+
+// computeMethodSets determines the actual method sets for each type encountered.
+//
+func (r *reader) computeMethodSets() {
+ for _, t := range r.types {
+ // collect embedded methods for t
+ if t.isStruct {
+ // struct
+ r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet))
+ } else {
+ // interface
+ // TODO(gri) fix this
+ }
+ }
+
+ // if error was declared locally, don't treat it as exported field anymore
+ if r.errorDecl {
+ for _, ityp := range r.fixlist {
+ removeErrorField(ityp)
+ }
+ }
+}
+
+// cleanupTypes removes the association of functions and methods with
+// types that have no declaration. Instead, these functions and methods
+// are shown at the package level. It also removes types with missing
+// declarations or which are not visible.
+//
+func (r *reader) cleanupTypes() {
+ for _, t := range r.types {
+ visible := r.isVisible(t.name)
+ if t.decl == nil && (predeclaredTypes[t.name] || t.isEmbedded && visible) {
+ // t.name is a predeclared type (and was not redeclared in this package),
+ // or it was embedded somewhere but its declaration is missing (because
+ // the AST is incomplete): move any associated values, funcs, and methods
+ // back to the top-level so that they are not lost.
+ // 1) move values
+ r.values = append(r.values, t.values...)
+ // 2) move factory functions
+ for name, f := range t.funcs {
+ // in a correct AST, package-level function names
+ // are all different - no need to check for conflicts
+ r.funcs[name] = f
+ }
+ // 3) move methods
+ for name, m := range t.methods {
+ // don't overwrite functions with the same name - drop them
+ if _, found := r.funcs[name]; !found {
+ r.funcs[name] = m
+ }
+ }
+ }
+ // remove types w/o declaration or which are not visible
+ if t.decl == nil || !visible {
+ delete(r.types, t.name)
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Sorting
+
+type data struct {
+ n int
+ swap func(i, j int)
+ less func(i, j int) bool
+}
+
+func (d *data) Len() int { return d.n }
+func (d *data) Swap(i, j int) { d.swap(i, j) }
+func (d *data) Less(i, j int) bool { return d.less(i, j) }
+
+// sortBy is a helper function for sorting
+func sortBy(less func(i, j int) bool, swap func(i, j int), n int) {
+ sort.Sort(&data{n, swap, less})
+}
+
+func sortedKeys(m map[string]int) []string {
+ list := make([]string, len(m))
+ i := 0
+ for key := range m {
+ list[i] = key
+ i++
+ }
+ sort.Strings(list)
+ return list
+}
+
+// sortingName returns the name to use when sorting d into place.
+//
+func sortingName(d *ast.GenDecl) string {
+ if len(d.Specs) == 1 {
+ if s, ok := d.Specs[0].(*ast.ValueSpec); ok {
+ return s.Names[0].Name
+ }
+ }
+ return ""
+}
+
+func sortedValues(m []*Value, tok token.Token) []*Value {
+ list := make([]*Value, len(m)) // big enough in any case
+ i := 0
+ for _, val := range m {
+ if val.Decl.Tok == tok {
+ list[i] = val
+ i++
+ }
+ }
+ list = list[0:i]
+
+ sortBy(
+ func(i, j int) bool {
+ if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj {
+ return ni < nj
+ }
+ return list[i].order < list[j].order
+ },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+
+ return list
+}
+
+func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
+ list := make([]*Type, len(m))
+ i := 0
+ for _, t := range m {
+ list[i] = &Type{
+ Doc: t.doc,
+ Name: t.name,
+ Decl: t.decl,
+ Consts: sortedValues(t.values, token.CONST),
+ Vars: sortedValues(t.values, token.VAR),
+ Funcs: sortedFuncs(t.funcs, true),
+ Methods: sortedFuncs(t.methods, allMethods),
+ }
+ i++
+ }
+
+ sortBy(
+ func(i, j int) bool { return list[i].Name < list[j].Name },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+
+ return list
+}
+
+func removeStar(s string) string {
+ if len(s) > 0 && s[0] == '*' {
+ return s[1:]
+ }
+ return s
+}
+
+func sortedFuncs(m methodSet, allMethods bool) []*Func {
+ list := make([]*Func, len(m))
+ i := 0
+ for _, m := range m {
+ // determine which methods to include
+ switch {
+ case m.Decl == nil:
+ // exclude conflict entry
+ case allMethods, m.Level == 0, !ast.IsExported(removeStar(m.Orig)):
+ // forced inclusion, method not embedded, or method
+ // embedded but original receiver type not exported
+ list[i] = m
+ i++
+ }
+ }
+ list = list[0:i]
+ sortBy(
+ func(i, j int) bool { return list[i].Name < list[j].Name },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+ return list
+}
diff --git a/src/pkg/go/doc/synopsis.go b/src/pkg/go/doc/synopsis.go
new file mode 100644
index 000000000..2192d78c0
--- /dev/null
+++ b/src/pkg/go/doc/synopsis.go
@@ -0,0 +1,52 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "unicode"
+
+// firstSentenceLen returns the length of the first sentence in s.
+// The sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter.
+//
+func firstSentenceLen(s string) int {
+ var ppp, pp, p rune
+ for i, q := range s {
+ if q == '\n' || q == '\r' || q == '\t' {
+ q = ' '
+ }
+ if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
+ return i
+ }
+ ppp, pp, p = pp, p, q
+ }
+ return len(s)
+}
+
+// Synopsis returns a cleaned version of the first sentence in s.
+// That sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter. The result string
+// has no \n, \r, or \t characters and uses only single spaces between
+// words.
+//
+func Synopsis(s string) string {
+ n := firstSentenceLen(s)
+ var b []byte
+ p := byte(' ')
+ for i := 0; i < n; i++ {
+ q := s[i]
+ if q == '\n' || q == '\r' || q == '\t' {
+ q = ' '
+ }
+ if q != ' ' || p != ' ' {
+ b = append(b, q)
+ p = q
+ }
+ }
+ // remove trailing blank, if any
+ if n := len(b); n > 0 && p == ' ' {
+ b = b[0 : n-1]
+ }
+ return string(b)
+}
diff --git a/src/pkg/go/doc/synopsis_test.go b/src/pkg/go/doc/synopsis_test.go
new file mode 100644
index 000000000..dfc6598af
--- /dev/null
+++ b/src/pkg/go/doc/synopsis_test.go
@@ -0,0 +1,44 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "testing"
+
+var tests = []struct {
+ txt string
+ fsl int
+ syn string
+}{
+ {"", 0, ""},
+ {"foo", 3, "foo"},
+ {"foo.", 4, "foo."},
+ {"foo.bar", 7, "foo.bar"},
+ {" foo. ", 6, "foo."},
+ {" foo\t bar.\n", 12, "foo bar."},
+ {" foo\t bar.\n", 12, "foo bar."},
+ {"a b\n\nc\r\rd\t\t", 12, "a b c d"},
+ {"a b\n\nc\r\rd\t\t . BLA", 15, "a b c d ."},
+ {"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."},
+ {"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."},
+ {"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."},
+ {"Package\nfoo. ..", 12, "Package foo."},
+ {"P . Q.", 3, "P ."},
+ {"P. Q. ", 8, "P. Q."},
+ {"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
+ {"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
+}
+
+func TestSynopsis(t *testing.T) {
+ for _, e := range tests {
+ fsl := firstSentenceLen(e.txt)
+ if fsl != e.fsl {
+ t.Errorf("got fsl = %d; want %d for %q\n", fsl, e.fsl, e.txt)
+ }
+ syn := Synopsis(e.txt)
+ if syn != e.syn {
+ t.Errorf("got syn = %q; want %q for %q\n", syn, e.syn, e.txt)
+ }
+ }
+}
diff --git a/src/pkg/go/doc/testdata/a.0.golden b/src/pkg/go/doc/testdata/a.0.golden
new file mode 100644
index 000000000..24db02d34
--- /dev/null
+++ b/src/pkg/go/doc/testdata/a.0.golden
@@ -0,0 +1,13 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS
+ // bug0
+ // bug1
diff --git a/src/pkg/go/doc/testdata/a.1.golden b/src/pkg/go/doc/testdata/a.1.golden
new file mode 100644
index 000000000..24db02d34
--- /dev/null
+++ b/src/pkg/go/doc/testdata/a.1.golden
@@ -0,0 +1,13 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS
+ // bug0
+ // bug1
diff --git a/src/pkg/go/doc/testdata/a.2.golden b/src/pkg/go/doc/testdata/a.2.golden
new file mode 100644
index 000000000..24db02d34
--- /dev/null
+++ b/src/pkg/go/doc/testdata/a.2.golden
@@ -0,0 +1,13 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS
+ // bug0
+ // bug1
diff --git a/src/pkg/go/build/cgotest/cgotest.h b/src/pkg/go/doc/testdata/a0.go
index 9c73643b6..dc552989e 100644
--- a/src/pkg/go/build/cgotest/cgotest.h
+++ b/src/pkg/go/doc/testdata/a0.go
@@ -1,5 +1,8 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-extern int Add(int, int, int *);
+// comment 0
+package a
+
+//BUG(uid): bug0
diff --git a/src/pkg/go/build/cgotest/cgotest.c b/src/pkg/go/doc/testdata/a1.go
index b13acb227..098776c1b 100644
--- a/src/pkg/go/build/cgotest/cgotest.c
+++ b/src/pkg/go/doc/testdata/a1.go
@@ -1,9 +1,8 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-int
-Add(int x, int y, int *sum)
-{
- sum = x+y;
-}
+// comment 1
+package a
+
+//BUG(uid): bug1
diff --git a/src/pkg/go/doc/testdata/b.0.golden b/src/pkg/go/doc/testdata/b.0.golden
new file mode 100644
index 000000000..9d93392ea
--- /dev/null
+++ b/src/pkg/go/doc/testdata/b.0.golden
@@ -0,0 +1,71 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const (
+ C1 notExported = iota
+ C2
+
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var (
+ U1, U2, U4, U5 notExported
+
+ U7 notExported = 7
+ )
+
+ //
+ var MaxInt int // MaxInt
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ //
+ func F1() notExported
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
diff --git a/src/pkg/go/doc/testdata/b.1.golden b/src/pkg/go/doc/testdata/b.1.golden
new file mode 100644
index 000000000..66c47b5c2
--- /dev/null
+++ b/src/pkg/go/doc/testdata/b.1.golden
@@ -0,0 +1,83 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var MaxInt int // MaxInt
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
+ //
+ type notExported int
+
+ //
+ const (
+ C1 notExported = iota
+ C2
+ c3
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ var (
+ U1, U2, u3, U4, U5 notExported
+ u6 notExported
+ U7 notExported = 7
+ )
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, v3, V4, V5 notExported
+
+ //
+ func F1() notExported
+
+ //
+ func f2() notExported
+
+ // Should only appear if AllDecls is set.
+ type uint struct{} // overrides a predeclared type uint
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+ // Associated with uint type if AllDecls is set.
+ func uintFactory() uint
+
diff --git a/src/pkg/go/doc/testdata/b.2.golden b/src/pkg/go/doc/testdata/b.2.golden
new file mode 100644
index 000000000..9d93392ea
--- /dev/null
+++ b/src/pkg/go/doc/testdata/b.2.golden
@@ -0,0 +1,71 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const (
+ C1 notExported = iota
+ C2
+
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var (
+ U1, U2, U4, U5 notExported
+
+ U7 notExported = 7
+ )
+
+ //
+ var MaxInt int // MaxInt
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ //
+ func F1() notExported
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
diff --git a/src/pkg/go/doc/testdata/b.go b/src/pkg/go/doc/testdata/b.go
new file mode 100644
index 000000000..e50663b3d
--- /dev/null
+++ b/src/pkg/go/doc/testdata/b.go
@@ -0,0 +1,58 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Basic declarations
+
+const Pi = 3.14 // Pi
+var MaxInt int // MaxInt
+type T struct{} // T
+var V T // v
+func F(x int) int {} // F
+func (x *T) M() {} // M
+
+// Corner cases: association with (presumed) predeclared types
+
+// Always under the package functions list.
+func NotAFactory() int {}
+
+// Associated with uint type if AllDecls is set.
+func UintFactory() uint {}
+
+// Associated with uint type if AllDecls is set.
+func uintFactory() uint {}
+
+// Should only appear if AllDecls is set.
+type uint struct{} // overrides a predeclared type uint
+
+// ----------------------------------------------------------------------------
+// Exported declarations associated with non-exported types must always be shown.
+
+type notExported int
+
+const C notExported = 0
+
+const (
+ C1 notExported = iota
+ C2
+ c3
+ C4
+ C5
+)
+
+var V notExported
+var V1, V2, v3, V4, V5 notExported
+
+var (
+ U1, U2, u3, U4, U5 notExported
+ u6 notExported
+ U7 notExported = 7
+)
+
+func F1() notExported {}
+func f2() notExported {}
diff --git a/src/pkg/go/doc/testdata/benchmark.go b/src/pkg/go/doc/testdata/benchmark.go
new file mode 100644
index 000000000..0aded5bb4
--- /dev/null
+++ b/src/pkg/go/doc/testdata/benchmark.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+}
+
+// B is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type B struct {
+ common
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ result BenchmarkResult
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (b *B) StartTimer() {
+ if !b.timerOn {
+ b.start = time.Now()
+ b.timerOn = true
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (b *B) StopTimer() {
+ if b.timerOn {
+ b.duration += time.Now().Sub(b.start)
+ b.timerOn = false
+ }
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (b *B) ResetTimer() {
+ if b.timerOn {
+ b.start = time.Now()
+ }
+ b.duration = 0
+}
+
+// SetBytes records the number of bytes processed in a single operation.
+// If this is called, the benchmark will report ns/op and MB/s.
+func (b *B) SetBytes(n int64) { b.bytes = n }
+
+func (b *B) nsPerOp() int64 {
+ if b.N <= 0 {
+ return 0
+ }
+ return b.duration.Nanoseconds() / int64(b.N)
+}
+
+// runN runs a single benchmark for the specified number of iterations.
+func (b *B) runN(n int) {
+ // Try to get a comparable environment for each run
+ // by clearing garbage from previous runs.
+ runtime.GC()
+ b.N = n
+ b.ResetTimer()
+ b.StartTimer()
+ b.benchmark.F(b)
+ b.StopTimer()
+}
+
+func min(x, y int) int {
+ if x > y {
+ return y
+ }
+ return x
+}
+
+func max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+ var tens = 0
+ // tens = floor(log_10(n))
+ for n > 10 {
+ n = n / 10
+ tens++
+ }
+ // result = 10^tens
+ result := 1
+ for i := 0; i < tens; i++ {
+ result *= 10
+ }
+ return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+ base := roundDown10(n)
+ if n < (2 * base) {
+ return 2 * base
+ }
+ if n < (5 * base) {
+ return 5 * base
+ }
+ return 10 * base
+}
+
+// run times the benchmark function in a separate goroutine.
+func (b *B) run() BenchmarkResult {
+ go b.launch()
+ <-b.signal
+ return b.result
+}
+
+// launch launches the benchmark function. It gradually increases the number
+// of benchmark iterations until the benchmark runs for a second in order
+// to get a reasonable measurement. It prints timing information in this form
+// testing.BenchmarkHello 100000 19 ns/op
+// launch is run by the fun function as a separate goroutine.
+func (b *B) launch() {
+ // Run the benchmark for a single iteration in case it's expensive.
+ n := 1
+
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- b
+ }()
+
+ b.runN(n)
+ // Run the benchmark for at least the specified amount of time.
+ d := time.Duration(*benchTime * float64(time.Second))
+ for !b.failed && b.duration < d && n < 1e9 {
+ last := n
+ // Predict iterations/sec.
+ if b.nsPerOp() == 0 {
+ n = 1e9
+ } else {
+ n = int(d.Nanoseconds() / b.nsPerOp())
+ }
+ // Run more iterations than we think we'll need for a second (1.5x).
+ // Don't grow too fast in case we had timing errors previously.
+ // Be sure to run at least one more than last time.
+ n = max(min(n+n/2, 100*last), last+1)
+ // Round up to something easy to read.
+ n = roundUp(n)
+ b.runN(n)
+ }
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes}
+}
+
+// The results of a benchmark run.
+type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+}
+
+func (r BenchmarkResult) NsPerOp() int64 {
+ if r.N <= 0 {
+ return 0
+ }
+ return r.T.Nanoseconds() / int64(r.N)
+}
+
+func (r BenchmarkResult) mbPerSec() float64 {
+ if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
+ return 0
+ }
+ return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+func (r BenchmarkResult) String() string {
+ mbs := r.mbPerSec()
+ mb := ""
+ if mbs != 0 {
+ mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+ }
+ nsop := r.NsPerOp()
+ ns := fmt.Sprintf("%10d ns/op", nsop)
+ if r.N > 0 && nsop < 100 {
+ // The format specifiers here make sure that
+ // the ones digits line up for all three possible formats.
+ if nsop < 10 {
+ ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ } else {
+ ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ }
+ }
+ return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
+ // If no flag was specified, don't run benchmarks.
+ if len(*matchBenchmarks) == 0 {
+ return
+ }
+ for _, Benchmark := range benchmarks {
+ matched, err := matchString(*matchBenchmarks, Benchmark.Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: Benchmark,
+ }
+ benchName := Benchmark.Name
+ if procs != 1 {
+ benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
+ }
+ fmt.Printf("%s\t", benchName)
+ r := b.run()
+ if b.failed {
+ // The output could be very long here, but probably isn't.
+ // We print it all, regardless, because we don't want to trim the reason
+ // the benchmark failed.
+ fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
+ continue
+ }
+ fmt.Printf("%v\n", r)
+ // Unlike with tests, we ignore the -chatty flag and always print output for
+ // benchmarks since the output generation time will skew the results.
+ if len(b.output) > 0 {
+ b.trimOutput()
+ fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
+ }
+ if p := runtime.GOMAXPROCS(-1); p != procs {
+ fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
+ }
+ }
+ }
+}
+
+// trimOutput shortens the output from a benchmark, which can be very long.
+func (b *B) trimOutput() {
+ // The output is likely to appear multiple times because the benchmark
+ // is run multiple times, but at least it will be seen. This is not a big deal
+ // because benchmarks rarely print, but just in case, we trim it if it's too long.
+ const maxNewlines = 10
+ for nlCount, j := 0, 0; j < len(b.output); j++ {
+ if b.output[j] == '\n' {
+ nlCount++
+ if nlCount >= maxNewlines {
+ b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
+ break
+ }
+ }
+ }
+}
+
+// Benchmark benchmarks a single function. Useful for creating
+// custom benchmarks that do not use go test.
+func Benchmark(f func(b *B)) BenchmarkResult {
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: InternalBenchmark{"", f},
+ }
+ return b.run()
+}
diff --git a/src/pkg/go/doc/testdata/c.0.golden b/src/pkg/go/doc/testdata/c.0.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/pkg/go/doc/testdata/c.0.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/pkg/go/doc/testdata/c.1.golden b/src/pkg/go/doc/testdata/c.1.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/pkg/go/doc/testdata/c.1.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/pkg/go/doc/testdata/c.2.golden b/src/pkg/go/doc/testdata/c.2.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/pkg/go/doc/testdata/c.2.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/pkg/go/doc/testdata/c.go b/src/pkg/go/doc/testdata/c.go
new file mode 100644
index 000000000..e0f39196d
--- /dev/null
+++ b/src/pkg/go/doc/testdata/c.go
@@ -0,0 +1,62 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Test that empty declarations don't cause problems
+
+const ()
+
+type ()
+
+var ()
+
+// ----------------------------------------------------------------------------
+// Test that types with documentation on both, the Decl and the Spec node
+// are handled correctly.
+
+// A (should see this)
+type A struct{}
+
+// B (should see this)
+type (
+ B struct{}
+)
+
+type (
+ // C (should see this)
+ C struct{}
+)
+
+// D (should not see this)
+type (
+ // D (should see this)
+ D struct{}
+)
+
+// E (should see this for E2 and E3)
+type (
+ // E1 (should see this)
+ E1 struct{}
+ E2 struct{}
+ E3 struct{}
+ // E4 (should see this)
+ E4 struct{}
+)
+
+// ----------------------------------------------------------------------------
+// Test that local and imported types are different when
+// handling anonymous fields.
+
+type T1 struct{}
+
+func (t1 *T1) M() {}
+
+// T2 must not show methods of local T1
+type T2 struct {
+ a.T1 // not the same as locally declared T1
+}
diff --git a/src/pkg/go/doc/testdata/d.0.golden b/src/pkg/go/doc/testdata/d.0.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/pkg/go/doc/testdata/d.0.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/pkg/go/doc/testdata/d.1.golden b/src/pkg/go/doc/testdata/d.1.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/pkg/go/doc/testdata/d.1.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/pkg/go/doc/testdata/d.2.golden b/src/pkg/go/doc/testdata/d.2.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/pkg/go/doc/testdata/d.2.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/pkg/go/doc/testdata/d1.go b/src/pkg/go/doc/testdata/d1.go
new file mode 100644
index 000000000..ebd694195
--- /dev/null
+++ b/src/pkg/go/doc/testdata/d1.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C2 should be third.
+const C2 = 2
+
+// V2 should be third.
+var V2 int
+
+// CBx constants should appear before CAx constants.
+const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+)
+
+// VBx variables should appear before VAx variables.
+var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+)
+
+const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+)
+
+var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+)
+
+// T2 should be third.
+type T2 struct{}
+
+// Grouped types are sorted nevertheless.
+type (
+ // TG2 should be third.
+ TG2 struct{}
+
+ // TG1 should be second.
+ TG1 struct{}
+
+ // TG0 should be first.
+ TG0 struct{}
+)
+
+// F2 should be third.
+func F2() {}
diff --git a/src/pkg/go/doc/testdata/d2.go b/src/pkg/go/doc/testdata/d2.go
new file mode 100644
index 000000000..2f56f4fa4
--- /dev/null
+++ b/src/pkg/go/doc/testdata/d2.go
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C1 should be second.
+const C1 = 1
+
+// C0 should be first.
+const C0 = 0
+
+// V1 should be second.
+var V1 uint
+
+// V0 should be first.
+var V0 uintptr
+
+// CAx constants should appear after CBx constants.
+const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+)
+
+// VAx variables should appear after VBx variables.
+var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+)
+
+// T1 should be second.
+type T1 struct{}
+
+// T0 should be first.
+type T0 struct{}
+
+// F1 should be second.
+func F1() {}
+
+// F0 should be first.
+func F0() {}
diff --git a/src/pkg/go/doc/testdata/e.0.golden b/src/pkg/go/doc/testdata/e.0.golden
new file mode 100644
index 000000000..6987e5867
--- /dev/null
+++ b/src/pkg/go/doc/testdata/e.0.golden
@@ -0,0 +1,109 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ // contains filtered or unexported fields
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
diff --git a/src/pkg/go/doc/testdata/e.1.golden b/src/pkg/go/doc/testdata/e.1.golden
new file mode 100644
index 000000000..cbe22e0bf
--- /dev/null
+++ b/src/pkg/go/doc/testdata/e.1.golden
@@ -0,0 +1,144 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ t1
+ t2
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ t1
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ t1e
+ t2e
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ *u5
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
+ //
+ type t1 struct{}
+
+ // t1.M should not appear as method in a Tx type.
+ func (t1) M()
+
+ //
+ type t1e struct {
+ t1
+ }
+
+ // t1.M should not appear as method in a Tx type.
+ func (t1e) M()
+
+ //
+ type t2 struct{}
+
+ // t2.M should not appear as method in a Tx type.
+ func (t2) M()
+
+ //
+ type t2e struct {
+ t2
+ }
+
+ // t2.M should not appear as method in a Tx type.
+ func (t2e) M()
+
+ //
+ type u5 struct {
+ *U4
+ }
+
diff --git a/src/pkg/go/doc/testdata/e.2.golden b/src/pkg/go/doc/testdata/e.2.golden
new file mode 100644
index 000000000..e7b05e80f
--- /dev/null
+++ b/src/pkg/go/doc/testdata/e.2.golden
@@ -0,0 +1,130 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T5) M()
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (U2) N()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (U3) M()
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ // contains filtered or unexported fields
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (V1) M()
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (V2) M()
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (V3) M()
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (V5) M()
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
diff --git a/src/pkg/go/doc/testdata/e.go b/src/pkg/go/doc/testdata/e.go
new file mode 100644
index 000000000..19dd138cf
--- /dev/null
+++ b/src/pkg/go/doc/testdata/e.go
@@ -0,0 +1,147 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package e is a go/doc test for embedded methods.
+package e
+
+// ----------------------------------------------------------------------------
+// Conflicting methods M must not show up.
+
+type t1 struct{}
+
+// t1.M should not appear as method in a Tx type.
+func (t1) M() {}
+
+type t2 struct{}
+
+// t2.M should not appear as method in a Tx type.
+func (t2) M() {}
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T1 struct {
+ t1
+ t2
+}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level method M.
+
+// T2 has only M as top-level method.
+type T2 struct {
+ t1
+}
+
+// T2.M should appear as method of T2.
+func (T2) M() {}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level conflicting methods M.
+
+type t1e struct {
+ t1
+}
+
+type t2e struct {
+ t2
+}
+
+// T3 has only M as top-level method.
+type T3 struct {
+ t1e
+ t2e
+}
+
+// T3.M should appear as method of T3.
+func (T3) M() {}
+
+// ----------------------------------------------------------------------------
+// Don't show conflicting methods M embedded via an exported and non-exported
+// type.
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T4 struct {
+ t2
+ T2
+}
+
+// ----------------------------------------------------------------------------
+// Don't show embedded methods of exported anonymous fields unless AllMethods
+// is set.
+
+type T4 struct{}
+
+// T4.M should appear as method of T5 only if AllMethods is set.
+func (*T4) M() {}
+
+type T5 struct {
+ T4
+}
+
+// ----------------------------------------------------------------------------
+// Recursive type declarations must not lead to endless recursion.
+
+type U1 struct {
+ *U1
+}
+
+// U1.M should appear as method of U1.
+func (*U1) M() {}
+
+type U2 struct {
+ *U3
+}
+
+// U2.M should appear as method of U2 and as method of U3 only if AllMethods is set.
+func (*U2) M() {}
+
+type U3 struct {
+ *U2
+}
+
+// U3.N should appear as method of U3 and as method of U2 only if AllMethods is set.
+func (*U3) N() {}
+
+type U4 struct {
+ *u5
+}
+
+// U4.M should appear as method of U4.
+func (*U4) M() {}
+
+type u5 struct {
+ *U4
+}
+
+// ----------------------------------------------------------------------------
+// A higher-level embedded type (and its methods) wins over the same type (and
+// its methods) embedded at a lower level.
+
+type V1 struct {
+ *V2
+ *V5
+}
+
+type V2 struct {
+ *V3
+}
+
+type V3 struct {
+ *V4
+}
+
+type V4 struct {
+ *V5
+}
+
+type V5 struct {
+ *V6
+}
+
+type V6 struct{}
+
+// V4.M should appear as method of V2 and V3 if AllMethods is set.
+func (*V4) M() {}
+
+// V6.M should appear as method of V1 and V5 if AllMethods is set.
+func (*V6) M() {}
diff --git a/src/pkg/go/doc/testdata/error1.0.golden b/src/pkg/go/doc/testdata/error1.0.golden
new file mode 100644
index 000000000..6c6fe5d49
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error1.0.golden
@@ -0,0 +1,30 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/pkg/go/doc/testdata/error1.1.golden b/src/pkg/go/doc/testdata/error1.1.golden
new file mode 100644
index 000000000..a8dc2e71d
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error1.1.golden
@@ -0,0 +1,32 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/pkg/go/doc/testdata/error1.2.golden b/src/pkg/go/doc/testdata/error1.2.golden
new file mode 100644
index 000000000..6c6fe5d49
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error1.2.golden
@@ -0,0 +1,30 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/pkg/go/doc/testdata/error1.go b/src/pkg/go/doc/testdata/error1.go
new file mode 100644
index 000000000..3c777a780
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error1.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error1
+
+type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+}
+
+type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+}
+
+type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+}
diff --git a/src/pkg/go/doc/testdata/error2.0.golden b/src/pkg/go/doc/testdata/error2.0.golden
new file mode 100644
index 000000000..dedfe412a
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error2.0.golden
@@ -0,0 +1,27 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // contains filtered or unexported methods
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // contains filtered or unexported methods
+ }
+ }
+
diff --git a/src/pkg/go/doc/testdata/error2.1.golden b/src/pkg/go/doc/testdata/error2.1.golden
new file mode 100644
index 000000000..776bd1b3e
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error2.1.golden
@@ -0,0 +1,37 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the the locally declared error interface
+ // is only visible if all declarations are shown.
+ error
+ }
+
+ //
+ type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should not be visible
+ error
+ }
+ }
+
+ // This error declaration shadows the predeclared error type.
+ type error interface {
+ Error() string
+ }
+
diff --git a/src/pkg/go/doc/testdata/error2.2.golden b/src/pkg/go/doc/testdata/error2.2.golden
new file mode 100644
index 000000000..dedfe412a
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error2.2.golden
@@ -0,0 +1,27 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // contains filtered or unexported methods
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // contains filtered or unexported methods
+ }
+ }
+
diff --git a/src/pkg/go/doc/testdata/error2.go b/src/pkg/go/doc/testdata/error2.go
new file mode 100644
index 000000000..6cc36feef
--- /dev/null
+++ b/src/pkg/go/doc/testdata/error2.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error2
+
+type I0 interface {
+ // When embedded, the the locally declared error interface
+ // is only visible if all declarations are shown.
+ error
+}
+
+type T0 struct {
+ ExportedField interface {
+ // error should not be visible
+ error
+ }
+}
+
+type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+}
+
+// This error declaration shadows the predeclared error type.
+type error interface {
+ Error() string
+}
diff --git a/src/pkg/go/doc/testdata/example.go b/src/pkg/go/doc/testdata/example.go
new file mode 100644
index 000000000..fdeda137e
--- /dev/null
+++ b/src/pkg/go/doc/testdata/example.go
@@ -0,0 +1,81 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+type InternalExample struct {
+ Name string
+ F func()
+ Output string
+}
+
+func RunExamples(examples []InternalExample) (ok bool) {
+ ok = true
+
+ var eg InternalExample
+
+ stdout, stderr := os.Stdout, os.Stderr
+ defer func() {
+ os.Stdout, os.Stderr = stdout, stderr
+ if e := recover(); e != nil {
+ fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
+ os.Exit(1)
+ }
+ }()
+
+ for _, eg = range examples {
+ if *chatty {
+ fmt.Printf("=== RUN: %s\n", eg.Name)
+ }
+
+ // capture stdout and stderr
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout, os.Stderr = w, w
+ outC := make(chan string)
+ go func() {
+ buf := new(bytes.Buffer)
+ _, err := io.Copy(buf, r)
+ if err != nil {
+ fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
+ os.Exit(1)
+ }
+ outC <- buf.String()
+ }()
+
+ // run example
+ t0 := time.Now()
+ eg.F()
+ dt := time.Now().Sub(t0)
+
+ // close pipe, restore stdout/stderr, get output
+ w.Close()
+ os.Stdout, os.Stderr = stdout, stderr
+ out := <-outC
+
+ // report any errors
+ tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
+ if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
+ fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
+ eg.Name, tstr, g, e)
+ ok = false
+ } else if *chatty {
+ fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
+ }
+ }
+
+ return
+}
diff --git a/src/pkg/go/doc/testdata/f.0.golden b/src/pkg/go/doc/testdata/f.0.golden
new file mode 100644
index 000000000..817590186
--- /dev/null
+++ b/src/pkg/go/doc/testdata/f.0.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/pkg/go/doc/testdata/f.1.golden b/src/pkg/go/doc/testdata/f.1.golden
new file mode 100644
index 000000000..ba68e884c
--- /dev/null
+++ b/src/pkg/go/doc/testdata/f.1.golden
@@ -0,0 +1,16 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+TYPES
+ //
+ type private struct{}
+
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/pkg/go/doc/testdata/f.2.golden b/src/pkg/go/doc/testdata/f.2.golden
new file mode 100644
index 000000000..817590186
--- /dev/null
+++ b/src/pkg/go/doc/testdata/f.2.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/pkg/go/doc/testdata/f.go b/src/pkg/go/doc/testdata/f.go
new file mode 100644
index 000000000..7e9add907
--- /dev/null
+++ b/src/pkg/go/doc/testdata/f.go
@@ -0,0 +1,14 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package f is a go/doc test for functions and factory methods.
+package f
+
+// ----------------------------------------------------------------------------
+// Factory functions for non-exported types must not get lost.
+
+type private struct{}
+
+// Exported must always be visible. Was issue 2824.
+func Exported() private {}
diff --git a/src/pkg/go/doc/testdata/template.txt b/src/pkg/go/doc/testdata/template.txt
new file mode 100644
index 000000000..32e331cdd
--- /dev/null
+++ b/src/pkg/go/doc/testdata/template.txt
@@ -0,0 +1,65 @@
+{{synopsis .Doc}}
+PACKAGE {{.Name}}
+
+IMPORTPATH
+ {{.ImportPath}}
+
+{{with .Imports}}IMPORTS
+{{range .}} {{.}}
+{{end}}
+{{end}}{{/*
+
+*/}}FILENAMES
+{{range .Filenames}} {{.}}
+{{end}}{{/*
+
+*/}}{{with .Consts}}
+CONSTANTS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Vars}}
+VARIABLES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Funcs}}
+FUNCTIONS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Types}}
+TYPES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{range .Consts}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Vars}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Funcs}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Methods}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{end}}{{/*
+
+*/}}{{with .Bugs}}
+BUGS
+{{range .}} {{synopsis .}}
+{{end}}{{end}} \ No newline at end of file
diff --git a/src/pkg/go/doc/testdata/testing.0.golden b/src/pkg/go/doc/testdata/testing.0.golden
new file mode 100644
index 000000000..15a903986
--- /dev/null
+++ b/src/pkg/go/doc/testdata/testing.0.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
diff --git a/src/pkg/go/doc/testdata/testing.1.golden b/src/pkg/go/doc/testdata/testing.1.golden
new file mode 100644
index 000000000..d26a4685c
--- /dev/null
+++ b/src/pkg/go/doc/testdata/testing.1.golden
@@ -0,0 +1,298 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+VARIABLES
+ //
+ var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "go test" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // Report as tests are run; default is silent for success.
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+ cpuList []int
+ )
+
+ //
+ var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+
+ //
+ var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+
+ //
+ var timer *time.Timer
+
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+ // after runs after all testing.
+ func after()
+
+ // alarm is called if the timeout expires.
+ func alarm()
+
+ // before runs before all testing.
+ func before()
+
+ // decorate inserts the final newline if needed and indentation ...
+ func decorate(s string, addFileLine bool) string
+
+ //
+ func max(x, y int) int
+
+ //
+ func min(x, y int) int
+
+ //
+ func parseCpuList()
+
+ // roundDown10 rounds a number down to the nearest power of 10.
+ func roundDown10(n int) int
+
+ // roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+ func roundUp(n int) int
+
+ // startAlarm starts an alarm if requested.
+ func startAlarm()
+
+ // stopAlarm turns off the alarm.
+ func stopAlarm()
+
+ //
+ func tRunner(t *T, test *InternalTest)
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ common
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ result BenchmarkResult
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // launch launches the benchmark function. It gradually increases ...
+ func (b *B) launch()
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *B) log(s string)
+
+ //
+ func (b *B) nsPerOp() int64
+
+ // run times the benchmark function in a separate goroutine.
+ func (b *B) run() BenchmarkResult
+
+ // runN runs a single benchmark for the specified number of ...
+ func (b *B) runN(n int)
+
+ // trimOutput shortens the output from a benchmark, which can be ...
+ func (b *B) trimOutput()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ //
+ func (r BenchmarkResult) mbPerSec() float64
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ common
+ name string // Name of test.
+ startParallel chan bool // Parallel tests will wait on this.
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *T) log(s string)
+
+ //
+ func (t *T) report()
+
+ // common holds the elements common between T and B and captures ...
+ type common struct {
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ self interface{} // To be sent on signal channel when done.
+ signal chan interface{} // Output for serial tests.
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *common) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *common) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *common) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *common) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *common) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *common) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *common) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *common) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *common) Logf(format string, args ...interface{})
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *common) log(s string)
+
diff --git a/src/pkg/go/doc/testdata/testing.2.golden b/src/pkg/go/doc/testdata/testing.2.golden
new file mode 100644
index 000000000..15a903986
--- /dev/null
+++ b/src/pkg/go/doc/testdata/testing.2.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed returns whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
diff --git a/src/pkg/go/doc/testdata/testing.go b/src/pkg/go/doc/testdata/testing.go
new file mode 100644
index 000000000..71c1d1eaf
--- /dev/null
+++ b/src/pkg/go/doc/testdata/testing.go
@@ -0,0 +1,404 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides support for automated testing of Go packages.
+// It is intended to be used in concert with the ``go test'' utility, which automates
+// execution of any function of the form
+// func TestXxx(*testing.T)
+// where Xxx can be any alphanumeric string (but the first letter must not be in
+// [a-z]) and serves to identify the test routine.
+// These TestXxx routines should be declared within the package they are testing.
+//
+// Functions of the form
+// func BenchmarkXxx(*testing.B)
+// are considered benchmarks, and are executed by go test when the -test.bench
+// flag is provided.
+//
+// A sample benchmark function looks like this:
+// func BenchmarkHello(b *testing.B) {
+// for i := 0; i < b.N; i++ {
+// fmt.Sprintf("hello")
+// }
+// }
+// The benchmark package will vary b.N until the benchmark function lasts
+// long enough to be timed reliably. The output
+// testing.BenchmarkHello 10000000 282 ns/op
+// means that the loop ran 10000000 times at a speed of 282 ns per loop.
+//
+// If a benchmark needs some expensive setup before running, the timer
+// may be stopped:
+// func BenchmarkBigLen(b *testing.B) {
+// b.StopTimer()
+// big := NewBig()
+// b.StartTimer()
+// for i := 0; i < b.N; i++ {
+// big.Len()
+// }
+// }
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "go test" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // Report as tests are run; default is silent for success.
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+ cpuList []int
+)
+
+// common holds the elements common between T and B and
+// captures common methods such as Errorf.
+type common struct {
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ self interface{} // To be sent on signal channel when done.
+ signal chan interface{} // Output for serial tests.
+}
+
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+ return *short
+}
+
+// decorate inserts the final newline if needed and indentation tabs for formatting.
+// If addFileLine is true, it also prefixes the string with the file and line of the call site.
+func decorate(s string, addFileLine bool) string {
+ if addFileLine {
+ _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+ if ok {
+ // Truncate file name at last file name separator.
+ if index := strings.LastIndex(file, "/"); index >= 0 {
+ file = file[index+1:]
+ } else if index = strings.LastIndex(file, "\\"); index >= 0 {
+ file = file[index+1:]
+ }
+ } else {
+ file = "???"
+ line = 1
+ }
+ s = fmt.Sprintf("%s:%d: %s", file, line, s)
+ }
+ s = "\t" + s // Every line is indented at least one tab.
+ n := len(s)
+ if n > 0 && s[n-1] != '\n' {
+ s += "\n"
+ n++
+ }
+ for i := 0; i < n-1; i++ { // -1 to avoid final newline
+ if s[i] == '\n' {
+ // Second and subsequent lines are indented an extra tab.
+ return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
+ }
+ }
+ return s
+}
+
+// T is a type passed to Test functions to manage test state and support formatted test logs.
+// Logs are accumulated during execution and dumped to standard error when done.
+type T struct {
+ common
+ name string // Name of test.
+ startParallel chan bool // Parallel tests will wait on this.
+}
+
+// Fail marks the function as having failed but continues execution.
+func (c *common) Fail() { c.failed = true }
+
+// Failed returns whether the function has failed.
+func (c *common) Failed() bool { return c.failed }
+
+// FailNow marks the function as having failed and stops its execution.
+// Execution will continue at the next Test.
+func (c *common) FailNow() {
+ c.Fail()
+
+ // Calling runtime.Goexit will exit the goroutine, which
+ // will run the deferred functions in this goroutine,
+ // which will eventually run the deferred lines in tRunner,
+ // which will signal to the test loop that this test is done.
+ //
+ // A previous version of this code said:
+ //
+ // c.duration = ...
+ // c.signal <- c.self
+ // runtime.Goexit()
+ //
+ // This previous version duplicated code (those lines are in
+ // tRunner no matter what), but worse the goroutine teardown
+ // implicit in runtime.Goexit was not guaranteed to complete
+ // before the test exited. If a test deferred an important cleanup
+ // function (like removing temporary files), there was no guarantee
+ // it would run on a test failure. Because we send on c.signal during
+ // a top-of-stack deferred function now, we know that the send
+ // only happens after any other stacked defers have completed.
+ runtime.Goexit()
+}
+
+// log generates the output. It's always at the same stack depth.
+func (c *common) log(s string) {
+ c.output = append(c.output, decorate(s, true)...)
+}
+
+// Log formats its arguments using default formatting, analogous to Println(),
+// and records the text in the error log.
+func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) }
+
+// Logf formats its arguments according to the format, analogous to Printf(),
+// and records the text in the error log.
+func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) }
+
+// Error is equivalent to Log() followed by Fail().
+func (c *common) Error(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.Fail()
+}
+
+// Errorf is equivalent to Logf() followed by Fail().
+func (c *common) Errorf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.Fail()
+}
+
+// Fatal is equivalent to Log() followed by FailNow().
+func (c *common) Fatal(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.FailNow()
+}
+
+// Fatalf is equivalent to Logf() followed by FailNow().
+func (c *common) Fatalf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.FailNow()
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests in this CPU group.
+func (t *T) Parallel() {
+ t.signal <- (*T)(nil) // Release main testing loop
+ <-t.startParallel // Wait for serial tests to finish
+}
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalTest struct {
+ Name string
+ F func(*T)
+}
+
+func tRunner(t *T, test *InternalTest) {
+ t.start = time.Now()
+
+ // When this goroutine is done, either because test.F(t)
+ // returned normally or because a test failure triggered
+ // a call to runtime.Goexit, record the duration and send
+ // a signal saying that the test is done.
+ defer func() {
+ t.duration = time.Now().Sub(t.start)
+ t.signal <- t
+ }()
+
+ test.F(t)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+ flag.Parse()
+ parseCpuList()
+
+ before()
+ startAlarm()
+ testOk := RunTests(matchString, tests)
+ exampleOk := RunExamples(examples)
+ if !testOk || !exampleOk {
+ fmt.Println("FAIL")
+ os.Exit(1)
+ }
+ fmt.Println("PASS")
+ stopAlarm()
+ RunBenchmarks(matchString, benchmarks)
+ after()
+}
+
+func (t *T) report() {
+ tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
+ format := "--- %s: %s %s\n%s"
+ if t.failed {
+ fmt.Printf(format, "FAIL", t.name, tstr, t.output)
+ } else if *chatty {
+ fmt.Printf(format, "PASS", t.name, tstr, t.output)
+ }
+}
+
+func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
+ ok = true
+ if len(tests) == 0 {
+ fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+ return
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ // We build a new channel tree for each run of the loop.
+ // collector merges in one channel all the upstream signals from parallel tests.
+ // If all tests pump to the same channel, a bug can occur where a test
+ // kicks off a goroutine that Fails, yet the test still delivers a completion signal,
+ // which skews the counting.
+ var collector = make(chan interface{})
+
+ numParallel := 0
+ startParallel := make(chan bool)
+
+ for i := 0; i < len(tests); i++ {
+ matched, err := matchString(*match, tests[i].Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ testName := tests[i].Name
+ if procs != 1 {
+ testName = fmt.Sprintf("%s-%d", tests[i].Name, procs)
+ }
+ t := &T{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ name: testName,
+ startParallel: startParallel,
+ }
+ t.self = t
+ if *chatty {
+ fmt.Printf("=== RUN %s\n", t.name)
+ }
+ go tRunner(t, &tests[i])
+ out := (<-t.signal).(*T)
+ if out == nil { // Parallel run.
+ go func() {
+ collector <- <-t.signal
+ }()
+ numParallel++
+ continue
+ }
+ t.report()
+ ok = ok && !out.failed
+ }
+
+ running := 0
+ for numParallel+running > 0 {
+ if running < *parallel && numParallel > 0 {
+ startParallel <- true
+ running++
+ numParallel--
+ continue
+ }
+ t := (<-collector).(*T)
+ t.report()
+ ok = ok && !t.failed
+ running--
+ }
+ }
+ return
+}
+
+// before runs before all testing.
+func before() {
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ }
+ if *cpuProfile != "" {
+ f, err := os.Create(*cpuProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err)
+ f.Close()
+ return
+ }
+ // Could save f so after can call f.Close; not worth the effort.
+ }
+
+}
+
+// after runs after all testing.
+func after() {
+ if *cpuProfile != "" {
+ pprof.StopCPUProfile() // flushes profile to disk
+ }
+ if *memProfile != "" {
+ f, err := os.Create(*memProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err = pprof.WriteHeapProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err)
+ }
+ f.Close()
+ }
+}
+
+var timer *time.Timer
+
+// startAlarm starts an alarm if requested.
+func startAlarm() {
+ if *timeout > 0 {
+ timer = time.AfterFunc(*timeout, alarm)
+ }
+}
+
+// stopAlarm turns off the alarm.
+func stopAlarm() {
+ if *timeout > 0 {
+ timer.Stop()
+ }
+}
+
+// alarm is called if the timeout expires.
+func alarm() {
+ panic("test timed out")
+}
+
+func parseCpuList() {
+ if len(*cpuListStr) == 0 {
+ cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
+ } else {
+ for _, val := range strings.Split(*cpuListStr, ",") {
+ cpu, err := strconv.Atoi(val)
+ if err != nil || cpu <= 0 {
+ fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val)
+ os.Exit(1)
+ }
+ cpuList = append(cpuList, cpu)
+ }
+ }
+}
diff --git a/src/pkg/go/parser/Makefile b/src/pkg/go/parser/Makefile
deleted file mode 100644
index d301f41eb..000000000
--- a/src/pkg/go/parser/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/parser
-GOFILES=\
- interface.go\
- parser.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/parser/error_test.go b/src/pkg/go/parser/error_test.go
new file mode 100644
index 000000000..377c8b80c
--- /dev/null
+++ b/src/pkg/go/parser/error_test.go
@@ -0,0 +1,166 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a parser test harness. The files in the testdata
+// directory are parsed and the errors reported are compared against the
+// error messages expected in the test files. The test files must end in
+// .src rather than .go so that they are not disturbed by gofmt runs.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+package parser
+
+import (
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+const testdata = "testdata"
+
+// getFile assumes that each filename occurs at most once
+func getFile(filename string) (file *token.File) {
+ fset.Iterate(func(f *token.File) bool {
+ if f.Name() == filename {
+ if file != nil {
+ panic(filename + " used multiple times")
+ }
+ file = f
+ }
+ return true
+ })
+ return file
+}
+
+func getPos(filename string, offset int) token.Pos {
+ if f := getFile(filename); f != nil {
+ return f.Pos(offset)
+ }
+ return token.NoPos
+}
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+//
+var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
+
+// expectedErrors collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func expectedErrors(t *testing.T, filename string, src []byte) map[token.Pos]string {
+ errors := make(map[token.Pos]string)
+
+ var s scanner.Scanner
+ // file was parsed already - do not add it again to the file
+ // set otherwise the position information returned here will
+ // not match the position information collected by the parser
+ s.Init(getFile(filename), src, nil, scanner.ScanComments)
+ var prev token.Pos // position of last non-comment, non-semicolon token
+
+ for {
+ pos, tok, lit := s.Scan()
+ switch tok {
+ case token.EOF:
+ return errors
+ case token.COMMENT:
+ s := errRx.FindStringSubmatch(lit)
+ if len(s) == 2 {
+ errors[prev] = string(s[1])
+ }
+ default:
+ prev = pos
+ }
+ }
+
+ panic("unreachable")
+}
+
+// compareErrors compares the map of expected error messages with the list
+// of found errors and reports discrepancies.
+//
+func compareErrors(t *testing.T, expected map[token.Pos]string, found scanner.ErrorList) {
+ for _, error := range found {
+ // error.Pos is a token.Position, but we want
+ // a token.Pos so we can do a map lookup
+ pos := getPos(error.Pos.Filename, error.Pos.Offset)
+ if msg, found := expected[pos]; found {
+ // we expect a message at pos; check if it matches
+ rx, err := regexp.Compile(msg)
+ if err != nil {
+ t.Errorf("%s: %v", error.Pos, err)
+ continue
+ }
+ if match := rx.MatchString(error.Msg); !match {
+ t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
+ continue
+ }
+ // we have a match - eliminate this error
+ delete(expected, pos)
+ } else {
+ // To keep in mind when analyzing failed test output:
+ // If the same error position occurs multiple times in errors,
+ // this message will be triggered (because the first error at
+ // the position removes this position from the expected errors).
+ t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg)
+ }
+ }
+
+ // there should be no expected errors left
+ if len(expected) > 0 {
+ t.Errorf("%d errors not reported:", len(expected))
+ for pos, msg := range expected {
+ t.Errorf("%s: %s\n", fset.Position(pos), msg)
+ }
+ }
+}
+
+func checkErrors(t *testing.T, filename string, input interface{}) {
+ src, err := readSource(filename, input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ _, err = ParseFile(fset, filename, src, DeclarationErrors)
+ found, ok := err.(scanner.ErrorList)
+ if err != nil && !ok {
+ t.Error(err)
+ return
+ }
+
+ // we are expecting the following errors
+ // (collect these after parsing a file so that it is found in the file set)
+ expected := expectedErrors(t, filename, src)
+
+ // verify errors returned by the parser
+ compareErrors(t, expected, found)
+}
+
+func TestErrors(t *testing.T) {
+ list, err := ioutil.ReadDir(testdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".src") {
+ checkErrors(t, filepath.Join(testdata, name), nil)
+ }
+ }
+}
diff --git a/src/pkg/go/parser/example_test.go b/src/pkg/go/parser/example_test.go
new file mode 100644
index 000000000..3c58e63a9
--- /dev/null
+++ b/src/pkg/go/parser/example_test.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser_test
+
+import (
+ "fmt"
+ "go/parser"
+ "go/token"
+)
+
+func ExampleParseFile() {
+ fset := token.NewFileSet() // positions are relative to fset
+
+ // Parse the file containing this very example
+ // but stop after processing the imports.
+ f, err := parser.ParseFile(fset, "example_test.go", nil, parser.ImportsOnly)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Print the imports from the file's AST.
+ for _, s := range f.Imports {
+ fmt.Println(s.Path.Value)
+ }
+
+ // output:
+ //
+ // "fmt"
+ // "go/parser"
+ // "go/token"
+}
diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go
index 4f980fc65..5c203a784 100644
--- a/src/pkg/go/parser/interface.go
+++ b/src/pkg/go/parser/interface.go
@@ -8,8 +8,8 @@ package parser
import (
"bytes"
+ "errors"
"go/ast"
- "go/scanner"
"go/token"
"io"
"io/ioutil"
@@ -21,7 +21,7 @@ import (
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
-func readSource(filename string, src interface{}) ([]byte, os.Error) {
+func readSource(filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
@@ -35,86 +35,30 @@ func readSource(filename string, src interface{}) ([]byte, os.Error) {
}
case io.Reader:
var buf bytes.Buffer
- _, err := io.Copy(&buf, s)
- if err != nil {
+ if _, err := io.Copy(&buf, s); err != nil {
return nil, err
}
return buf.Bytes(), nil
- default:
- return nil, os.NewError("invalid source")
}
+ return nil, errors.New("invalid source")
}
-
return ioutil.ReadFile(filename)
}
-func (p *parser) errors() os.Error {
- mode := scanner.Sorted
- if p.mode&SpuriousErrors == 0 {
- mode = scanner.NoMultiples
- }
- return p.GetError(mode)
-}
-
-// ParseExpr parses a Go expression and returns the corresponding
-// AST node. The fset, filename, and src arguments have the same interpretation
-// as for ParseFile. If there is an error, the result expression
-// may be nil or contain a partial AST.
-//
-func ParseExpr(fset *token.FileSet, filename string, src interface{}) (ast.Expr, os.Error) {
- data, err := readSource(filename, src)
- if err != nil {
- return nil, err
- }
-
- var p parser
- p.init(fset, filename, data, 0)
- x := p.parseRhs()
- if p.tok == token.SEMICOLON {
- p.next() // consume automatically inserted semicolon, if any
- }
- p.expect(token.EOF)
-
- return x, p.errors()
-}
-
-// ParseStmtList parses a list of Go statements and returns the list
-// of corresponding AST nodes. The fset, filename, and src arguments have the same
-// interpretation as for ParseFile. If there is an error, the node
-// list may be nil or contain partial ASTs.
+// A Mode value is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
//
-func ParseStmtList(fset *token.FileSet, filename string, src interface{}) ([]ast.Stmt, os.Error) {
- data, err := readSource(filename, src)
- if err != nil {
- return nil, err
- }
-
- var p parser
- p.init(fset, filename, data, 0)
- list := p.parseStmtList()
- p.expect(token.EOF)
-
- return list, p.errors()
-}
-
-// ParseDeclList parses a list of Go declarations and returns the list
-// of corresponding AST nodes. The fset, filename, and src arguments have the same
-// interpretation as for ParseFile. If there is an error, the node
-// list may be nil or contain partial ASTs.
-//
-func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast.Decl, os.Error) {
- data, err := readSource(filename, src)
- if err != nil {
- return nil, err
- }
-
- var p parser
- p.init(fset, filename, data, 0)
- list := p.parseDeclList()
- p.expect(token.EOF)
-
- return list, p.errors()
-}
+type Mode uint
+
+const (
+ PackageClauseOnly Mode = 1 << iota // parsing stops after package clause
+ ImportsOnly // parsing stops after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+ SpuriousErrors // report all (not just the first) errors per line
+)
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
@@ -123,7 +67,6 @@ func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
-//
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
@@ -132,49 +75,30 @@ func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
-// errors were found, the result is a partial AST (with ast.BadX nodes
+// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
-func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint) (*ast.File, os.Error) {
- data, err := readSource(filename, src)
+func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (*ast.File, error) {
+ // get source
+ text, err := readSource(filename, src)
if err != nil {
return nil, err
}
+ // parse source
var p parser
- p.init(fset, filename, data, mode)
- file := p.parseFile() // parseFile reads to EOF
+ p.init(fset, filename, text, mode)
+ f := p.parseFile()
- return file, p.errors()
-}
-
-// ParseFiles calls ParseFile for each file in the filenames list and returns
-// a map of package name -> package AST with all the packages found. The mode
-// bits are passed to ParseFile unchanged. Position information is recorded
-// in the file set fset.
-//
-// Files with parse errors are ignored. In this case the map of packages may
-// be incomplete (missing packages and/or incomplete packages) and the first
-// error encountered is returned.
-//
-func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[string]*ast.Package, first os.Error) {
- pkgs = make(map[string]*ast.Package)
- for _, filename := range filenames {
- if src, err := ParseFile(fset, filename, nil, mode); err == nil {
- name := src.Name.Name
- pkg, found := pkgs[name]
- if !found {
- // TODO(gri) Use NewPackage here; reconsider ParseFiles API.
- pkg = &ast.Package{name, nil, nil, make(map[string]*ast.File)}
- pkgs[name] = pkg
- }
- pkg.Files[filename] = src
- } else if first == nil {
- first = err
- }
+ // sort errors
+ if p.mode&SpuriousErrors == 0 {
+ p.errors.RemoveMultiples()
+ } else {
+ p.errors.Sort()
}
- return
+
+ return f, p.errors.Err()
}
// ParseDir calls ParseFile for the files in the directory specified by path and
@@ -185,9 +109,9 @@ func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[st
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occurred, a non-nil but incomplete map and the
-// error are returned.
+// first error encountered are returned.
//
-func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, os.Error) {
+func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
fd, err := os.Open(path)
if err != nil {
return nil, err
@@ -199,16 +123,41 @@ func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool,
return nil, err
}
- filenames := make([]string, len(list))
- n := 0
- for i := 0; i < len(list); i++ {
- d := &list[i]
+ pkgs = make(map[string]*ast.Package)
+ for _, d := range list {
if filter == nil || filter(d) {
- filenames[n] = filepath.Join(path, d.Name)
- n++
+ filename := filepath.Join(path, d.Name())
+ if src, err := ParseFile(fset, filename, nil, mode); err == nil {
+ name := src.Name.Name
+ pkg, found := pkgs[name]
+ if !found {
+ pkg = &ast.Package{
+ Name: name,
+ Files: make(map[string]*ast.File),
+ }
+ pkgs[name] = pkg
+ }
+ pkg.Files[filename] = src
+ } else if first == nil {
+ first = err
+ }
}
}
- filenames = filenames[0:n]
- return ParseFiles(fset, filenames, mode)
+ return
+}
+
+// ParseExpr is a convenience function for obtaining the AST of an expression x.
+// The position information recorded in the AST is undefined.
+//
+func ParseExpr(x string) (ast.Expr, error) {
+ // parse x within the context of a complete package for correct scopes;
+ // use //line directive for correct positions in error messages and put
+ // x alone on a separate line (handles line comments), followed by a ';'
+ // to force an error if the expression is incomplete
+ file, err := ParseFile(token.NewFileSet(), "", "package p;func _(){_=\n//line :1\n"+x+"\n;}", 0)
+ if err != nil {
+ return nil, err
+ }
+ return file.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt).Rhs[0], nil
}
diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go
index 9c14d1667..e362e13a7 100644
--- a/src/pkg/go/parser/parser.go
+++ b/src/pkg/go/parser/parser.go
@@ -14,29 +14,19 @@ import (
"go/ast"
"go/scanner"
"go/token"
-)
-
-// The mode parameter to the Parse* functions is a set of flags (or 0).
-// They control the amount of source code parsed and other optional
-// parser functionality.
-//
-const (
- PackageClauseOnly uint = 1 << iota // parsing stops after package clause
- ImportsOnly // parsing stops after import declarations
- ParseComments // parse comments and add them to AST
- Trace // print a trace of parsed productions
- DeclarationErrors // report declaration errors
- SpuriousErrors // report all (not just the first) errors per line
+ "strconv"
+ "strings"
+ "unicode"
)
// The parser structure holds the parser's internal state.
type parser struct {
- file *token.File
- scanner.ErrorVector
+ file *token.File
+ errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
- mode uint // parsing mode
+ mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent uint // indentation used for tracing output
@@ -50,6 +40,13 @@ type parser struct {
tok token.Token // one token look-ahead
lit string // token literal
+ // Error recovery
+ // (used to limit the number of calls to syncXXX functions
+ // w/o making scanning progress - avoids potential endless
+ // loops across multiple parser functions during error recovery)
+ syncPos token.Pos // last synchronization position
+ syncCnt int // number of calls to syncXXX without progress
+
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
@@ -65,18 +62,14 @@ type parser struct {
targetStack [][]*ast.Ident // stack of unresolved labels
}
-// scannerMode returns the scanner mode bits given the parser's mode bits.
-func scannerMode(mode uint) uint {
- var m uint = scanner.InsertSemis
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
+ p.file = fset.AddFile(filename, fset.Base(), len(src))
+ var m scanner.Mode
if mode&ParseComments != 0 {
- m |= scanner.ScanComments
+ m = scanner.ScanComments
}
- return m
-}
-
-func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
- p.file = fset.AddFile(filename, fset.Base(), len(src))
- p.scanner.Init(p.file, src, p, scannerMode(mode))
+ eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
+ p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
@@ -144,28 +137,31 @@ func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjK
}
}
-func (p *parser) shortVarDecl(idents []*ast.Ident) {
+func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
- for _, ident := range idents {
- assert(ident.Obj == nil, "identifier already declared or resolved")
- obj := ast.NewObj(ast.Var, ident.Name)
- // short var declarations cannot have redeclaration errors
- // and are not global => no need to remember the respective
- // declaration
- ident.Obj = obj
- if ident.Name != "_" {
- if alt := p.topScope.Insert(obj); alt != nil {
- ident.Obj = alt // redeclaration
- } else {
- n++ // new declaration
+ for _, x := range list {
+ if ident, isIdent := x.(*ast.Ident); isIdent {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // remember corresponding assignment for other tools
+ obj.Decl = decl
+ ident.Obj = obj
+ if ident.Name != "_" {
+ if alt := p.topScope.Insert(obj); alt != nil {
+ ident.Obj = alt // redeclaration
+ } else {
+ n++ // new declaration
+ }
}
+ } else {
+ p.errorExpected(x.Pos(), "identifier")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
- p.error(idents[0].Pos(), "no new variables on left side of :=")
+ p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
@@ -263,7 +259,7 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
}
}
- comment = &ast.Comment{p.pos, p.lit}
+ comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
@@ -284,7 +280,7 @@ func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int)
}
// add comment group to the comments list
- comments = &ast.CommentGroup{list}
+ comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
@@ -341,7 +337,7 @@ func (p *parser) next() {
}
func (p *parser) error(pos token.Pos, msg string) {
- p.Error(p.file.Position(pos), msg)
+ p.errors.Add(p.file.Position(pos), msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
@@ -349,7 +345,7 @@ func (p *parser) errorExpected(pos token.Pos, msg string) {
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
- if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
+ if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
@@ -370,10 +366,39 @@ func (p *parser) expect(tok token.Token) token.Pos {
return pos
}
+// expectClosing is like expect but provides a better error message
+// for the common case of a missing comma before a newline.
+//
+func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
+ if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
+ p.error(p.pos, "missing ',' before newline in "+context)
+ p.next()
+ }
+ return p.expect(tok)
+}
+
func (p *parser) expectSemi() {
+ // semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
- p.expect(token.SEMICOLON)
+ if p.tok == token.SEMICOLON {
+ p.next()
+ } else {
+ p.errorExpected(p.pos, "';'")
+ syncStmt(p)
+ }
+ }
+}
+
+func (p *parser) atComma(context string) bool {
+ if p.tok == token.COMMA {
+ return true
}
+ if p.tok == token.SEMICOLON && p.lit == "\n" {
+ p.error(p.pos, "missing ',' before newline in "+context)
+ return true // "insert" the comma and continue
+
+ }
+ return false
}
func assert(cond bool, msg string) {
@@ -382,6 +407,68 @@ func assert(cond bool, msg string) {
}
}
+// syncStmt advances to the next statement.
+// Used for synchronization after an error.
+//
+func syncStmt(p *parser) {
+ for {
+ switch p.tok {
+ case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
+ token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
+ token.IF, token.RETURN, token.SELECT, token.SWITCH,
+ token.TYPE, token.VAR:
+ // Return only if parser made some progress since last
+ // sync or if it has not reached 10 sync calls without
+ // progress. Otherwise consume at least one token to
+ // avoid an endless parser loop (it is possible that
+ // both parseOperand and parseStmt call syncStmt and
+ // correctly do not advance, thus the need for the
+ // invocation limit p.syncCnt).
+ if p.pos == p.syncPos && p.syncCnt < 10 {
+ p.syncCnt++
+ return
+ }
+ if p.pos > p.syncPos {
+ p.syncPos = p.pos
+ p.syncCnt = 0
+ return
+ }
+ // Reaching here indicates a parser bug, likely an
+ // incorrect token list in this function, but it only
+ // leads to skipping of possibly correct code if a
+ // previous error is present, and thus is preferred
+ // over a non-terminating parse.
+ case token.EOF:
+ return
+ }
+ p.next()
+ }
+}
+
+// syncDecl advances to the next declaration.
+// Used for synchronization after an error.
+//
+func syncDecl(p *parser) {
+ for {
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ // see comments in syncStmt
+ if p.pos == p.syncPos && p.syncCnt < 10 {
+ p.syncCnt++
+ return
+ }
+ if p.pos > p.syncPos {
+ p.syncPos = p.pos
+ p.syncCnt = 0
+ return
+ }
+ case token.EOF:
+ return
+ }
+ p.next()
+ }
+}
+
// ----------------------------------------------------------------------------
// Identifiers
@@ -394,7 +481,7 @@ func (p *parser) parseIdent() *ast.Ident {
} else {
p.expect(token.IDENT) // use expect() error handling
}
- return &ast.Ident{pos, name, nil}
+ return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
@@ -434,7 +521,9 @@ func (p *parser) parseLhsList() []ast.Expr {
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
- p.shortVarDecl(p.makeIdentList(list))
+ // but doesn't enter scope until later:
+ // caller must call p.shortVarDecl(p.makeIdentList(list))
+ // at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
@@ -470,7 +559,7 @@ func (p *parser) parseType() ast.Expr {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
- return &ast.BadExpr{pos, p.pos}
+ return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
@@ -490,7 +579,7 @@ func (p *parser) parseTypeName() ast.Expr {
p.next()
p.resolve(ident)
sel := p.parseIdent()
- return &ast.SelectorExpr{ident, sel}
+ return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
@@ -504,7 +593,7 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
lbrack := p.expect(token.LBRACK)
var len ast.Expr
if ellipsisOk && p.tok == token.ELLIPSIS {
- len = &ast.Ellipsis{p.pos, nil}
+ len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
@@ -512,7 +601,7 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
p.expect(token.RBRACK)
elt := p.parseType()
- return &ast.ArrayType{lbrack, len, elt}
+ return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
@@ -520,9 +609,11 @@ func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
- pos := x.(ast.Expr).Pos()
- p.errorExpected(pos, "identifier")
- ident = &ast.Ident{pos, "_", nil}
+ if _, isBad := x.(*ast.BadExpr); !isBad {
+ // only report error if it's a new one
+ p.errorExpected(x.Pos(), "identifier")
+ }
+ ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
@@ -542,7 +633,7 @@ func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
- tag = &ast.BasicLit{p.pos, p.tok, p.lit}
+ tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
@@ -558,13 +649,13 @@ func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
- typ = &ast.BadExpr{pos, list[n-1].End()}
+ typ = &ast.BadExpr{From: pos, To: list[n-1].End()}
}
}
p.expectSemi() // call before accessing p.linecomment
- field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
return field
@@ -587,8 +678,14 @@ func (p *parser) parseStructType() *ast.StructType {
}
rbrace := p.expect(token.RBRACE)
- // TODO(gri): store struct scope in AST
- return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+ return &ast.StructType{
+ Struct: pos,
+ Fields: &ast.FieldList{
+ Opening: lbrace,
+ List: list,
+ Closing: rbrace,
+ },
+ }
}
func (p *parser) parsePointerType() *ast.StarExpr {
@@ -599,7 +696,7 @@ func (p *parser) parsePointerType() *ast.StarExpr {
star := p.expect(token.MUL)
base := p.parseType()
- return &ast.StarExpr{star, base}
+ return &ast.StarExpr{Star: star, X: base}
}
func (p *parser) tryVarType(isParam bool) ast.Expr {
@@ -609,12 +706,9 @@ func (p *parser) tryVarType(isParam bool) ast.Expr {
typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
- typ = &ast.BadExpr{pos, p.pos}
+ typ = &ast.BadExpr{From: pos, To: p.pos}
}
- if p.tok != token.RPAREN {
- p.error(pos, "can use '...' with last parameter type only")
- }
- return &ast.Ellipsis{pos, typ}
+ return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType(false)
}
@@ -625,7 +719,7 @@ func (p *parser) parseVarType(isParam bool) ast.Expr {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
- typ = &ast.BadExpr{pos, p.pos}
+ typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
@@ -636,21 +730,21 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
}
// a list of identifiers looks like a list of type names
- for {
- // parseVarType accepts any type (including parenthesized ones)
- // even though the syntax does not permit them here: we
- // accept them all for more robust parsing and complain
- // afterwards
- list = append(list, p.parseVarType(isParam))
+ //
+ // parse/tryVarType accepts any type (including parenthesized
+ // ones) even though the syntax does not permit them here: we
+ // accept them all for more robust parsing and complain later
+ for typ := p.parseVarType(isParam); typ != nil; {
+ list = append(list, typ)
if p.tok != token.COMMA {
break
}
p.next()
+ typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
}
// if we had a list of identifiers, it must be followed by a type
- typ = p.tryVarType(isParam)
- if typ != nil {
+ if typ = p.tryVarType(isParam); typ != nil {
p.resolve(typ)
}
@@ -666,7 +760,7 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
- field := &ast.Field{nil, idents, typ, nil, nil}
+ field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
@@ -678,12 +772,12 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
- field := &ast.Field{nil, idents, typ, nil, nil}
+ field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
- if p.tok != token.COMMA {
+ if !p.atComma("parameter list") {
break
}
p.next()
@@ -713,7 +807,7 @@ func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldLi
}
rparen := p.expect(token.RPAREN)
- return &ast.FieldList{lparen, params, rparen}
+ return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
@@ -755,7 +849,7 @@ func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
- return &ast.FuncType{pos, params, results}, scope
+ return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
@@ -772,7 +866,7 @@ func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
- typ = &ast.FuncType{token.NoPos, params, results}
+ typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
@@ -780,7 +874,7 @@ func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
}
p.expectSemi() // call before accessing p.linecomment
- spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
@@ -800,8 +894,14 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType {
}
rbrace := p.expect(token.RBRACE)
- // TODO(gri): store interface scope in AST
- return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+ return &ast.InterfaceType{
+ Interface: pos,
+ Methods: &ast.FieldList{
+ Opening: lbrace,
+ List: list,
+ Closing: rbrace,
+ },
+ }
}
func (p *parser) parseMapType() *ast.MapType {
@@ -815,7 +915,7 @@ func (p *parser) parseMapType() *ast.MapType {
p.expect(token.RBRACK)
value := p.parseType()
- return &ast.MapType{pos, key, value}
+ return &ast.MapType{Map: pos, Key: key, Value: value}
}
func (p *parser) parseChanType() *ast.ChanType {
@@ -838,7 +938,7 @@ func (p *parser) parseChanType() *ast.ChanType {
}
value := p.parseType()
- return &ast.ChanType{pos, dir, value}
+ return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
@@ -866,7 +966,7 @@ func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
- return &ast.ParenExpr{lparen, typ, rparen}
+ return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
@@ -909,7 +1009,7 @@ func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
p.closeScope()
rbrace := p.expect(token.RBRACE)
- return &ast.BlockStmt{lbrace, list, rbrace}
+ return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
@@ -923,7 +1023,7 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt {
p.closeScope()
rbrace := p.expect(token.RBRACE)
- return &ast.BlockStmt{lbrace, list, rbrace}
+ return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
@@ -944,7 +1044,7 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr {
body := p.parseBody(scope)
p.exprLev--
- return &ast.FuncLit{typ, body}
+ return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
@@ -965,7 +1065,7 @@ func (p *parser) parseOperand(lhs bool) ast.Expr {
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
- x := &ast.BasicLit{p.pos, p.tok, p.lit}
+ x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
@@ -976,24 +1076,24 @@ func (p *parser) parseOperand(lhs bool) ast.Expr {
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
- return &ast.ParenExpr{lparen, x, rparen}
+ return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
+ }
- default:
- if typ := p.tryIdentOrType(true); typ != nil {
- // could be type for composite literal or conversion
- _, isIdent := typ.(*ast.Ident)
- assert(!isIdent, "type cannot be identifier")
- return typ
- }
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
}
+ // we have an error
pos := p.pos
p.errorExpected(pos, "operand")
- p.next() // make progress
- return &ast.BadExpr{pos, p.pos}
+ syncStmt(p)
+ return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
@@ -1003,7 +1103,7 @@ func (p *parser) parseSelector(x ast.Expr) ast.Expr {
sel := p.parseIdent()
- return &ast.SelectorExpr{x, sel}
+ return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
@@ -1021,7 +1121,7 @@ func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
}
p.expect(token.RPAREN)
- return &ast.TypeAssertExpr{x, typ}
+ return &ast.TypeAssertExpr{X: x, Type: typ}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
@@ -1047,9 +1147,9 @@ func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
rbrack := p.expect(token.RBRACK)
if isSlice {
- return &ast.SliceExpr{x, lbrack, low, high, rbrack}
+ return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: low, High: high, Rbrack: rbrack}
}
- return &ast.IndexExpr{x, lbrack, low, rbrack}
+ return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: low, Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
@@ -1067,15 +1167,15 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
ellipsis = p.pos
p.next()
}
- if p.tok != token.COMMA {
+ if !p.atComma("argument list") {
break
}
p.next()
}
p.exprLev--
- rparen := p.expect(token.RPAREN)
+ rparen := p.expectClosing(token.RPAREN, "argument list")
- return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
+ return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseElement(keyOk bool) ast.Expr {
@@ -1092,7 +1192,7 @@ func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.tok == token.COLON {
colon := p.pos
p.next()
- return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
p.resolve(x) // not a map key
}
@@ -1107,7 +1207,7 @@ func (p *parser) parseElementList() (list []ast.Expr) {
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement(true))
- if p.tok != token.COMMA {
+ if !p.atComma("composite literal") {
break
}
p.next()
@@ -1128,13 +1228,13 @@ func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
elts = p.parseElementList()
}
p.exprLev--
- rbrace := p.expect(token.RBRACE)
- return &ast.CompositeLit{typ, lbrace, elts, rbrace}
+ rbrace := p.expectClosing(token.RBRACE, "composite literal")
+ return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
- switch t := unparen(x).(type) {
+ switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
@@ -1158,7 +1258,7 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr {
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
- x = &ast.BadExpr{x.Pos(), x.End()}
+ x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
return x
}
@@ -1221,7 +1321,7 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
- x = &ast.BadExpr{x.Pos(), x.End()}
+ x = &ast.BadExpr{From: x.Pos(), To: x.End()}
}
}
@@ -1251,9 +1351,9 @@ L:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
- p.next() // make progress
p.errorExpected(pos, "selector or type assertion")
- x = &ast.BadExpr{pos, p.pos}
+ p.next() // make progress
+ x = &ast.BadExpr{From: pos, To: p.pos}
}
case token.LBRACK:
if lhs {
@@ -1294,7 +1394,7 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
- return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
+ return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.ARROW:
// channel type or receive expression
@@ -1303,18 +1403,18 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.tok == token.CHAN {
p.next()
value := p.parseType()
- return &ast.ChanType{pos, ast.RECV, value}
+ return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
}
x := p.parseUnaryExpr(false)
- return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
+ return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
- return &ast.StarExpr{pos, p.checkExprOrType(x)}
+ return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
@@ -1336,7 +1436,7 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
lhs = false
}
y := p.parseBinaryExpr(false, prec+1)
- x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
+ x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
@@ -1398,12 +1498,16 @@ func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
- y = []ast.Expr{&ast.UnaryExpr{pos, token.RANGE, p.parseRhs()}}
+ y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
- return &ast.AssignStmt{x, pos, tok, y}, isRange
+ as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
+ if tok == token.DEFINE {
+ p.shortVarDecl(as, x)
+ }
+ return as, isRange
}
if len(x) > 1 {
@@ -1420,7 +1524,7 @@ func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
- stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+ stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
@@ -1431,24 +1535,24 @@ func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
- return &ast.BadStmt{x[0].Pos(), colon + 1}, false
+ return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.ARROW:
// send statement
arrow := p.pos
- p.next() // consume "<-"
+ p.next()
y := p.parseRhs()
- return &ast.SendStmt{x[0], arrow, y}, false
+ return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
case token.INC, token.DEC:
// increment or decrement
- s := &ast.IncDecStmt{x[0], p.pos, p.tok}
- p.next() // consume "++" or "--"
+ s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
+ p.next()
return s, false
}
// expression
- return &ast.ExprStmt{x[0]}, false
+ return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr() *ast.CallExpr {
@@ -1456,7 +1560,10 @@ func (p *parser) parseCallExpr() *ast.CallExpr {
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
- p.errorExpected(x.Pos(), "function/method call")
+ if _, isBad := x.(*ast.BadExpr); !isBad {
+ // only report error if it's a new one
+ p.errorExpected(x.Pos(), "function/method call")
+ }
return nil
}
@@ -1469,10 +1576,10 @@ func (p *parser) parseGoStmt() ast.Stmt {
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
- return &ast.BadStmt{pos, pos + 2} // len("go")
+ return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
}
- return &ast.GoStmt{pos, call}
+ return &ast.GoStmt{Go: pos, Call: call}
}
func (p *parser) parseDeferStmt() ast.Stmt {
@@ -1484,10 +1591,10 @@ func (p *parser) parseDeferStmt() ast.Stmt {
call := p.parseCallExpr()
p.expectSemi()
if call == nil {
- return &ast.BadStmt{pos, pos + 5} // len("defer")
+ return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
- return &ast.DeferStmt{pos, call}
+ return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
@@ -1503,7 +1610,7 @@ func (p *parser) parseReturnStmt() *ast.ReturnStmt {
}
p.expectSemi()
- return &ast.ReturnStmt{pos, x}
+ return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
@@ -1521,7 +1628,7 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
}
p.expectSemi()
- return &ast.BranchStmt{pos, tok, label}
+ return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
@@ -1532,7 +1639,7 @@ func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
return p.checkExpr(es.X)
}
p.error(s.Pos(), "expected condition, found simple statement")
- return &ast.BadExpr{s.Pos(), s.End()}
+ return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
func (p *parser) parseIfStmt() *ast.IfStmt {
@@ -1574,7 +1681,7 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
p.expectSemi()
}
- return &ast.IfStmt{pos, s, x, body, else_}
+ return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
@@ -1591,7 +1698,7 @@ func (p *parser) parseTypeList() (list []ast.Expr) {
return
}
-func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
+func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
@@ -1600,10 +1707,10 @@ func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
var list []ast.Expr
if p.tok == token.CASE {
p.next()
- if exprSwitch {
- list = p.parseRhsList()
- } else {
+ if typeSwitch {
list = p.parseTypeList()
+ } else {
+ list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
@@ -1614,18 +1721,22 @@ func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
body := p.parseStmtList()
p.closeScope()
- return &ast.CaseClause{pos, list, colon, body}
+ return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
-func isExprSwitch(s ast.Stmt) bool {
- if s == nil {
- return true
- }
- if e, ok := s.(*ast.ExprStmt); ok {
- if a, ok := e.X.(*ast.TypeAssertExpr); ok {
- return a.Type != nil // regular type assertion
- }
- return true
+func isTypeSwitchAssert(x ast.Expr) bool {
+ a, ok := x.(*ast.TypeAssertExpr)
+ return ok && a.Type == nil
+}
+
+func isTypeSwitchGuard(s ast.Stmt) bool {
+ switch t := s.(type) {
+ case *ast.ExprStmt:
+ // x.(nil)
+ return isTypeSwitchAssert(t.X)
+ case *ast.AssignStmt:
+ // v := x.(nil)
+ return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
}
return false
}
@@ -1651,28 +1762,41 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
+ // A TypeSwitchGuard may declare a variable in addition
+ // to the variable declared in the initial SimpleStmt.
+ // Introduce extra scope to avoid redeclaration errors:
+ //
+ // switch t := 0; t := x.(T) { ... }
+ //
+ // (this code is not valid Go because the first t will
+ // cannot be accessed and thus is never used, the extra
+ // scope is needed for the correct error message).
+ //
+ // If we don't have a type switch, s2 must be an expression.
+ // Having the extra nested but empty scope won't affect it.
+ p.openScope()
+ defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
- exprSwitch := isExprSwitch(s2)
+ typeSwitch := isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
- list = append(list, p.parseCaseClause(exprSwitch))
+ list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
- body := &ast.BlockStmt{lbrace, list, rbrace}
+ body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
- if exprSwitch {
- return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+ if typeSwitch {
+ return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
- // type switch
- // TODO(gri): do all the checks!
- return &ast.TypeSwitchStmt{pos, s1, s2, body}
+
+ return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
@@ -1695,34 +1819,31 @@ func (p *parser) parseCommClause() *ast.CommClause {
arrow := p.pos
p.next()
rhs := p.parseRhs()
- comm = &ast.SendStmt{lhs[0], arrow, rhs}
+ comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
} else {
// RecvStmt
- pos := p.pos
- tok := p.tok
- var rhs ast.Expr
- if tok == token.ASSIGN || tok == token.DEFINE {
+ if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
+ pos := p.pos
p.next()
- rhs = p.parseRhs()
+ rhs := p.parseRhs()
+ as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
+ if tok == token.DEFINE {
+ p.shortVarDecl(as, lhs)
+ }
+ comm = as
} else {
- // rhs must be single receive operation
+ // lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
- rhs = lhs[0]
- lhs = nil // there is no lhs
- }
- if lhs != nil {
- comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
- } else {
- comm = &ast.ExprStmt{rhs}
+ comm = &ast.ExprStmt{X: lhs[0]}
}
}
} else {
@@ -1733,7 +1854,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
body := p.parseStmtList()
p.closeScope()
- return &ast.CommClause{pos, comm, colon, body}
+ return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseSelectStmt() *ast.SelectStmt {
@@ -1749,9 +1870,9 @@ func (p *parser) parseSelectStmt() *ast.SelectStmt {
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
- body := &ast.BlockStmt{lbrace, list, rbrace}
+ body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
- return &ast.SelectStmt{pos, body}
+ return &ast.SelectStmt{Select: pos, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
@@ -1800,16 +1921,30 @@ func (p *parser) parseForStmt() ast.Stmt {
key = as.Lhs[0]
default:
p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
- return &ast.BadStmt{pos, body.End()}
+ return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
- return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, x, body}
+ return &ast.RangeStmt{
+ For: pos,
+ Key: key,
+ Value: value,
+ TokPos: as.TokPos,
+ Tok: as.Tok,
+ X: x,
+ Body: body,
+ }
}
// regular for statement
- return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+ return &ast.ForStmt{
+ For: pos,
+ Init: s1,
+ Cond: p.makeExpr(s2),
+ Post: s3,
+ Body: body,
+ }
}
func (p *parser) parseStmt() (s ast.Stmt) {
@@ -1819,12 +1954,12 @@ func (p *parser) parseStmt() (s ast.Stmt) {
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
- s = &ast.DeclStmt{p.parseDecl()}
+ s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
case
- // tokens that may start a top-level expression
- token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
- token.LBRACK, token.STRUCT, // composite type
- token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
+ // tokens that may start an expression
+ token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
+ token.LBRACK, token.STRUCT, // composite types
+ token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
@@ -1852,17 +1987,17 @@ func (p *parser) parseStmt() (s ast.Stmt) {
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
- s = &ast.EmptyStmt{p.pos}
+ s = &ast.EmptyStmt{Semicolon: p.pos}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
- s = &ast.EmptyStmt{p.pos}
+ s = &ast.EmptyStmt{Semicolon: p.pos}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
- p.next() // make progress
- s = &ast.BadStmt{pos, p.pos}
+ syncStmt(p)
+ s = &ast.BadStmt{From: pos, To: p.pos}
}
return
@@ -1873,6 +2008,17 @@ func (p *parser) parseStmt() (s ast.Stmt) {
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+func isValidImport(lit string) bool {
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return false
+ }
+ }
+ return s != ""
+}
+
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
@@ -1881,7 +2027,7 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
- ident = &ast.Ident{p.pos, ".", nil}
+ ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
@@ -1889,7 +2035,10 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
var path *ast.BasicLit
if p.tok == token.STRING {
- path = &ast.BasicLit{p.pos, p.tok, p.lit}
+ if !isValidImport(p.lit) {
+ p.error(p.pos, "invalid import path: "+p.lit)
+ }
+ path = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
@@ -1897,7 +2046,12 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
p.expectSemi() // call before accessing p.linecomment
// collect imports
- spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ spec := &ast.ImportSpec{
+ Doc: doc,
+ Name: ident,
+ Path: path,
+ Comment: p.lineComment,
+ }
p.imports = append(p.imports, spec)
return spec
@@ -1921,7 +2075,13 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
- spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ spec := &ast.ValueSpec{
+ Doc: doc,
+ Names: idents,
+ Type: typ,
+ Values: values,
+ Comment: p.lineComment,
+ }
p.declare(spec, iota, p.topScope, ast.Con, idents...)
return spec
@@ -1938,7 +2098,7 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
- spec := &ast.TypeSpec{doc, ident, nil, nil}
+ spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
spec.Type = p.parseType()
@@ -1966,7 +2126,13 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
- spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ spec := &ast.ValueSpec{
+ Doc: doc,
+ Names: idents,
+ Type: typ,
+ Values: values,
+ Comment: p.lineComment,
+ }
p.declare(spec, nil, p.topScope, ast.Var, idents...)
return spec
@@ -1993,7 +2159,14 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen
list = append(list, f(p, nil, 0))
}
- return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
+ return &ast.GenDecl{
+ Doc: doc,
+ TokPos: pos,
+ Tok: keyword,
+ Lparen: lparen,
+ Specs: list,
+ Rparen: rparen,
+ }
}
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
@@ -2001,14 +2174,12 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
defer un(trace(p, "Receiver"))
}
- pos := p.pos
par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
- p.errorExpected(pos, "exactly one receiver")
- // TODO determine a better range for BadExpr below
- par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{pos, pos}}}
+ p.errorExpected(par.Opening, "exactly one receiver")
+ par.List = []*ast.Field{{Type: &ast.BadExpr{From: par.Opening, To: par.Closing + 1}}}
return par
}
@@ -2016,8 +2187,13 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
recv := par.List[0]
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
- p.errorExpected(base.Pos(), "(unqualified) identifier")
- par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+ if _, isBad := base.(*ast.BadExpr); !isBad {
+ // only report error if it's a new one
+ p.errorExpected(base.Pos(), "(unqualified) identifier")
+ }
+ par.List = []*ast.Field{
+ {Type: &ast.BadExpr{From: recv.Pos(), To: recv.End()}},
+ }
}
return par
@@ -2047,7 +2223,17 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl {
}
p.expectSemi()
- decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ decl := &ast.FuncDecl{
+ Doc: doc,
+ Recv: recv,
+ Name: ident,
+ Type: &ast.FuncType{
+ Func: pos,
+ Params: params,
+ Results: results,
+ },
+ Body: body,
+ }
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
@@ -2063,7 +2249,7 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl {
return decl
}
-func (p *parser) parseDecl() ast.Decl {
+func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
@@ -2085,26 +2271,13 @@ func (p *parser) parseDecl() ast.Decl {
default:
pos := p.pos
p.errorExpected(pos, "declaration")
- p.next() // make progress
- decl := &ast.BadDecl{pos, p.pos}
- return decl
+ sync(p)
+ return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
-func (p *parser) parseDeclList() (list []ast.Decl) {
- if p.trace {
- defer un(trace(p, "DeclList"))
- }
-
- for p.tok != token.EOF {
- list = append(list, p.parseDecl())
- }
-
- return
-}
-
// ----------------------------------------------------------------------------
// Source files
@@ -2129,7 +2302,7 @@ func (p *parser) parseFile() *ast.File {
// Don't bother parsing the rest if we had errors already.
// Likely not a Go source file at all.
- if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
+ if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
@@ -2138,7 +2311,7 @@ func (p *parser) parseFile() *ast.File {
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
- decls = append(decls, p.parseDecl())
+ decls = append(decls, p.parseDecl(syncDecl))
}
}
}
@@ -2157,5 +2330,14 @@ func (p *parser) parseFile() *ast.File {
}
}
- return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
+ return &ast.File{
+ Doc: doc,
+ Package: pos,
+ Name: ident,
+ Decls: decls,
+ Scope: p.pkgScope,
+ Imports: p.imports,
+ Unresolved: p.unresolved[0:i],
+ Comments: p.comments,
+ }
}
diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go
index 39a78e515..5e45acd00 100644
--- a/src/pkg/go/parser/parser_test.go
+++ b/src/pkg/go/parser/parser_test.go
@@ -5,6 +5,8 @@
package parser
import (
+ "fmt"
+ "go/ast"
"go/token"
"os"
"testing"
@@ -12,81 +14,14 @@ import (
var fset = token.NewFileSet()
-var illegalInputs = []interface{}{
- nil,
- 3.14,
- []byte(nil),
- "foo!",
- `package p; func f() { if /* should have condition */ {} };`,
- `package p; func f() { if ; /* should have condition */ {} };`,
- `package p; func f() { if f(); /* should have condition */ {} };`,
- `package p; const c; /* should have constant value */`,
- `package p; func f() { if _ = range x; true {} };`,
- `package p; func f() { switch _ = range x; true {} };`,
- `package p; func f() { for _ = range x ; ; {} };`,
- `package p; func f() { for ; ; _ = range x {} };`,
- `package p; func f() { for ; _ = range x ; {} };`,
- `package p; var a = [1]int; /* illegal expression */`,
- `package p; var a = [...]int; /* illegal expression */`,
- `package p; var a = struct{} /* illegal expression */`,
- `package p; var a = func(); /* illegal expression */`,
- `package p; var a = interface{} /* illegal expression */`,
- `package p; var a = []int /* illegal expression */`,
- `package p; var a = map[int]int /* illegal expression */`,
- `package p; var a = chan int; /* illegal expression */`,
- `package p; var a = []int{[]int}; /* illegal expression */`,
- `package p; var a = ([]int); /* illegal expression */`,
- `package p; var a = a[[]int:[]int]; /* illegal expression */`,
- `package p; var a = <- chan int; /* illegal expression */`,
- `package p; func f() { select { case _ <- chan int: } };`,
-}
-
-func TestParseIllegalInputs(t *testing.T) {
- for _, src := range illegalInputs {
- _, err := ParseFile(fset, "", src, 0)
- if err == nil {
- t.Errorf("ParseFile(%v) should have failed", src)
- }
- }
-}
-
-var validPrograms = []interface{}{
- "package p\n",
- `package p;`,
- `package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
- `package p; func f() { if f(T{}) {} };`,
- `package p; func f() { _ = (<-chan int)(x) };`,
- `package p; func f() { _ = (<-chan <-chan int)(x) };`,
- `package p; func f(func() func() func());`,
- `package p; func f(...T);`,
- `package p; func f(float, ...int);`,
- `package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
- `package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
- `package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
- `package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
- `package p; var a = T{{1, 2}, {3, 4}}`,
- `package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
- `package p; func f() { select { case x := (<-c): } };`,
- `package p; func f() { if ; true {} };`,
- `package p; func f() { switch ; {} };`,
- `package p; func f() { for _ = range "foo" + "bar" {} };`,
-}
-
-func TestParseValidPrograms(t *testing.T) {
- for _, src := range validPrograms {
- _, err := ParseFile(fset, "", src, 0)
- if err != nil {
- t.Errorf("ParseFile(%q): %v", src, err)
- }
- }
-}
-
var validFiles = []string{
"parser.go",
"parser_test.go",
+ "error_test.go",
+ "short_test.go",
}
-func TestParse3(t *testing.T) {
+func TestParse(t *testing.T) {
for _, filename := range validFiles {
_, err := ParseFile(fset, filename, nil, DeclarationErrors)
if err != nil {
@@ -106,9 +41,9 @@ func nameFilter(filename string) bool {
return true
}
-func dirFilter(f *os.FileInfo) bool { return nameFilter(f.Name) }
+func dirFilter(f os.FileInfo) bool { return nameFilter(f.Name()) }
-func TestParse4(t *testing.T) {
+func TestParseDir(t *testing.T) {
path := "."
pkgs, err := ParseDir(fset, path, dirFilter, 0)
if err != nil {
@@ -128,3 +63,117 @@ func TestParse4(t *testing.T) {
}
}
}
+
+func TestParseExpr(t *testing.T) {
+ // just kicking the tires:
+ // a valid expression
+ src := "a + b"
+ x, err := ParseExpr(src)
+ if err != nil {
+ t.Errorf("ParseExpr(%s): %v", src, err)
+ }
+ // sanity check
+ if _, ok := x.(*ast.BinaryExpr); !ok {
+ t.Errorf("ParseExpr(%s): got %T, expected *ast.BinaryExpr", src, x)
+ }
+
+ // an invalid expression
+ src = "a + *"
+ _, err = ParseExpr(src)
+ if err == nil {
+ t.Errorf("ParseExpr(%s): %v", src, err)
+ }
+
+ // it must not crash
+ for _, src := range valids {
+ ParseExpr(src)
+ }
+}
+
+func TestColonEqualsScope(t *testing.T) {
+ f, err := ParseFile(fset, "", `package p; func f() { x, y, z := x, y, z }`, 0)
+ if err != nil {
+ t.Errorf("parse: %s", err)
+ }
+
+ // RHS refers to undefined globals; LHS does not.
+ as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt)
+ for _, v := range as.Rhs {
+ id := v.(*ast.Ident)
+ if id.Obj != nil {
+ t.Errorf("rhs %s has Obj, should not", id.Name)
+ }
+ }
+ for _, v := range as.Lhs {
+ id := v.(*ast.Ident)
+ if id.Obj == nil {
+ t.Errorf("lhs %s does not have Obj, should", id.Name)
+ }
+ }
+}
+
+func TestVarScope(t *testing.T) {
+ f, err := ParseFile(fset, "", `package p; func f() { var x, y, z = x, y, z }`, 0)
+ if err != nil {
+ t.Errorf("parse: %s", err)
+ }
+
+ // RHS refers to undefined globals; LHS does not.
+ as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ for _, v := range as.Values {
+ id := v.(*ast.Ident)
+ if id.Obj != nil {
+ t.Errorf("rhs %s has Obj, should not", id.Name)
+ }
+ }
+ for _, id := range as.Names {
+ if id.Obj == nil {
+ t.Errorf("lhs %s does not have Obj, should", id.Name)
+ }
+ }
+}
+
+var imports = map[string]bool{
+ `"a"`: true,
+ "`a`": true,
+ `"a/b"`: true,
+ `"a.b"`: true,
+ `"m\x61th"`: true,
+ `"greek/αβ"`: true,
+ `""`: false,
+
+ // Each of these pairs tests both `` vs "" strings
+ // and also use of invalid characters spelled out as
+ // escape sequences and written directly.
+ // For example `"\x00"` tests import "\x00"
+ // while "`\x00`" tests import `<actual-NUL-byte>`.
+ `"\x00"`: false,
+ "`\x00`": false,
+ `"\x7f"`: false,
+ "`\x7f`": false,
+ `"a!"`: false,
+ "`a!`": false,
+ `"a b"`: false,
+ "`a b`": false,
+ `"a\\b"`: false,
+ "`a\\b`": false,
+ "\"`a`\"": false,
+ "`\"a\"`": false,
+ `"\x80\x80"`: false,
+ "`\x80\x80`": false,
+ `"\xFFFD"`: false,
+ "`\xFFFD`": false,
+}
+
+func TestImports(t *testing.T) {
+ for path, isValid := range imports {
+ src := fmt.Sprintf("package p; import %s", path)
+ _, err := ParseFile(fset, "", src, 0)
+ switch {
+ case err != nil && isValid:
+ t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
+ case err == nil && !isValid:
+ t.Errorf("ParseFile(%s): got no error; expected one", src)
+ }
+ }
+}
diff --git a/src/pkg/go/parser/short_test.go b/src/pkg/go/parser/short_test.go
new file mode 100644
index 000000000..238492bf3
--- /dev/null
+++ b/src/pkg/go/parser/short_test.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for short valid and invalid programs.
+
+package parser
+
+import "testing"
+
+var valids = []string{
+ "package p\n",
+ `package p;`,
+ `package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
+ `package p; func f() { if f(T{}) {} };`,
+ `package p; func f() { _ = (<-chan int)(x) };`,
+ `package p; func f() { _ = (<-chan <-chan int)(x) };`,
+ `package p; func f(func() func() func());`,
+ `package p; func f(...T);`,
+ `package p; func f(float, ...int);`,
+ `package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
+ `package p; func f(int,) {};`,
+ `package p; func f(...int,) {};`,
+ `package p; func f(x ...int,) {};`,
+ `package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
+ `package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
+ `package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
+ `package p; var a = T{{1, 2}, {3, 4}}`,
+ `package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
+ `package p; func f() { select { case x := (<-c): } };`,
+ `package p; func f() { if ; true {} };`,
+ `package p; func f() { switch ; {} };`,
+ `package p; func f() { for _ = range "foo" + "bar" {} };`,
+}
+
+func TestValid(t *testing.T) {
+ for _, src := range valids {
+ checkErrors(t, src, src)
+ }
+}
+
+var invalids = []string{
+ `foo /* ERROR "expected 'package'" */ !`,
+ `package p; func f() { if { /* ERROR "expected operand" */ } };`,
+ `package p; func f() { if ; { /* ERROR "expected operand" */ } };`,
+ `package p; func f() { if f(); { /* ERROR "expected operand" */ } };`,
+ `package p; const c; /* ERROR "expected '='" */`,
+ `package p; func f() { if _ /* ERROR "expected condition" */ = range x; true {} };`,
+ `package p; func f() { switch _ /* ERROR "expected condition" */ = range x; true {} };`,
+ `package p; func f() { for _ = range x ; /* ERROR "expected '{'" */ ; {} };`,
+ `package p; func f() { for ; ; _ = range /* ERROR "expected operand" */ x {} };`,
+ `package p; func f() { for ; _ /* ERROR "expected condition" */ = range x ; {} };`,
+ `package p; func f() { switch t /* ERROR "expected condition" */ = t.(type) {} };`,
+ `package p; func f() { switch t /* ERROR "expected condition" */ , t = t.(type) {} };`,
+ `package p; func f() { switch t /* ERROR "expected condition" */ = t.(type), t {} };`,
+ `package p; var a = [ /* ERROR "expected expression" */ 1]int;`,
+ `package p; var a = [ /* ERROR "expected expression" */ ...]int;`,
+ `package p; var a = struct /* ERROR "expected expression" */ {}`,
+ `package p; var a = func /* ERROR "expected expression" */ ();`,
+ `package p; var a = interface /* ERROR "expected expression" */ {}`,
+ `package p; var a = [ /* ERROR "expected expression" */ ]int`,
+ `package p; var a = map /* ERROR "expected expression" */ [int]int`,
+ `package p; var a = chan /* ERROR "expected expression" */ int;`,
+ `package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
+ `package p; var a = ( /* ERROR "expected expression" */ []int);`,
+ `package p; var a = a[[ /* ERROR "expected expression" */ ]int:[]int];`,
+ `package p; var a = <- /* ERROR "expected expression" */ chan int;`,
+ `package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
+}
+
+func TestInvalid(t *testing.T) {
+ for _, src := range invalids {
+ checkErrors(t, src, src)
+ }
+}
diff --git a/src/pkg/go/parser/testdata/commas.src b/src/pkg/go/parser/testdata/commas.src
new file mode 100644
index 000000000..af6e70645
--- /dev/null
+++ b/src/pkg/go/parser/testdata/commas.src
@@ -0,0 +1,19 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for error messages/parser synchronization
+// after missing commas.
+
+package p
+
+var _ = []int{
+ 0 /* ERROR "missing ','" */
+}
+
+var _ = []int{
+ 0,
+ 1,
+ 2,
+ 3 /* ERROR "missing ','" */
+}
diff --git a/src/pkg/go/parser/testdata/issue3106.src b/src/pkg/go/parser/testdata/issue3106.src
new file mode 100644
index 000000000..82796c8ce
--- /dev/null
+++ b/src/pkg/go/parser/testdata/issue3106.src
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 3106: Better synchronization of
+// parser after certain syntax errors.
+
+package main
+
+func f() {
+ var m Mutex
+ c := MakeCond(&m)
+ percent := 0
+ const step = 10
+ for i := 0; i < 5; i++ {
+ go func() {
+ for {
+ // Emulates some useful work.
+ time.Sleep(1e8)
+ m.Lock()
+ defer
+ if /* ERROR "expected operand, found 'if'" */ percent == 100 {
+ m.Unlock()
+ break
+ }
+ percent++
+ if percent % step == 0 {
+ //c.Signal()
+ }
+ m.Unlock()
+ }
+ }()
+ }
+ for {
+ m.Lock()
+ if percent == 0 || percent % step != 0 {
+ c.Wait()
+ }
+ fmt.Print(",")
+ if percent == 100 {
+ m.Unlock()
+ break
+ }
+ m.Unlock()
+ }
+}
diff --git a/src/pkg/go/printer/Makefile b/src/pkg/go/printer/Makefile
deleted file mode 100644
index 6a71efc93..000000000
--- a/src/pkg/go/printer/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/printer
-GOFILES=\
- printer.go\
- nodes.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/printer/example_test.go b/src/pkg/go/printer/example_test.go
new file mode 100644
index 000000000..e570040ba
--- /dev/null
+++ b/src/pkg/go/printer/example_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "strings"
+ "testing"
+)
+
+// Dummy test function so that godoc does not use the entire file as example.
+func Test(*testing.T) {}
+
+func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
+ fset = token.NewFileSet()
+ if file, err := parser.ParseFile(fset, filename, nil, 0); err == nil {
+ for _, d := range file.Decls {
+ if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
+ fun = f
+ return
+ }
+ }
+ }
+ panic("function not found")
+}
+
+func ExampleFprint() {
+ // Parse source file and extract the AST without comments for
+ // this function, with position information referring to the
+ // file set fset.
+ funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+
+ // Print the function body into buffer buf.
+ // The file set is provided to the printer so that it knows
+ // about the original source formatting and can add additional
+ // line breaks where they were present in the source.
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, funcAST.Body)
+
+ // Remove braces {} enclosing the function body, unindent,
+ // and trim leading and trailing white space.
+ s := buf.String()
+ s = s[1 : len(s)-1]
+ s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1))
+
+ // Print the cleaned-up body text to stdout.
+ fmt.Println(s)
+
+ // output:
+ // funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+ //
+ // var buf bytes.Buffer
+ // printer.Fprint(&buf, fset, funcAST.Body)
+ //
+ // s := buf.String()
+ // s = s[1 : len(s)-1]
+ // s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1))
+ //
+ // fmt.Println(s)
+}
diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go
index 9cd975ec1..727d2a371 100644
--- a/src/pkg/go/printer/nodes.go
+++ b/src/pkg/go/printer/nodes.go
@@ -12,9 +12,10 @@ import (
"bytes"
"go/ast"
"go/token"
+ "unicode/utf8"
)
-// Other formatting issues:
+// Formatting issues:
// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
// when the comment spans multiple lines; if such a comment is just two lines, formatting is
// not idempotent
@@ -39,7 +40,10 @@ import (
// future (not yet interspersed) comments in this function.
//
func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (printedBreak bool) {
- n := p.nlines(line-p.pos.Line, min)
+ n := nlimit(line - p.pos.Line)
+ if n < min {
+ n = min
+ }
if n > 0 {
p.print(ws)
if newSection {
@@ -69,74 +73,61 @@ func (p *printer) setComment(g *ast.CommentGroup) {
// for some reason there are pending comments; this
// should never happen - handle gracefully and flush
// all comments up to g, ignore anything after that
- p.flush(p.fset.Position(g.List[0].Pos()), token.ILLEGAL)
+ p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL)
}
p.comments[0] = g
p.cindex = 0
+ p.nextComment() // get comment ready for use
}
type exprListMode uint
const (
- blankStart exprListMode = 1 << iota // print a blank before a non-empty list
- blankEnd // print a blank after a non-empty list
- commaSep // elements are separated by commas
- commaTerm // list is optionally terminated by a comma
- noIndent // no extra indentation in multi-line lists
- periodSep // elements are separated by periods
+ commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
+ noIndent // no extra indentation in multi-line lists
)
-// Sets multiLine to true if the identifier list spans multiple lines.
// If indent is set, a multi-line identifier list is indented after the
// first linebreak encountered.
-func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) {
+func (p *printer) identList(list []*ast.Ident, indent bool) {
// convert into an expression list so we can re-use exprList formatting
xlist := make([]ast.Expr, len(list))
for i, x := range list {
xlist[i] = x
}
- mode := commaSep
+ var mode exprListMode
if !indent {
- mode |= noIndent
+ mode = noIndent
}
- p.exprList(token.NoPos, xlist, 1, mode, multiLine, token.NoPos)
+ p.exprList(token.NoPos, xlist, 1, mode, token.NoPos)
}
// Print a list of expressions. If the list spans multiple
// source lines, the original line breaks are respected between
-// expressions. Sets multiLine to true if the list spans multiple
-// lines.
+// expressions.
//
// TODO(gri) Consider rewriting this to be independent of []ast.Expr
// so that we can use the algorithm for any kind of list
// (e.g., pass list via a channel over which to range).
-func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, multiLine *bool, next0 token.Pos) {
+func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos) {
if len(list) == 0 {
return
}
- if mode&blankStart != 0 {
- p.print(blank)
- }
-
- prev := p.fset.Position(prev0)
- next := p.fset.Position(next0)
- line := p.fset.Position(list[0].Pos()).Line
- endLine := p.fset.Position(list[len(list)-1].End()).Line
+ prev := p.posFor(prev0)
+ next := p.posFor(next0)
+ line := p.lineFor(list[0].Pos())
+ endLine := p.lineFor(list[len(list)-1].End())
if prev.IsValid() && prev.Line == line && line == endLine {
// all list entries on a single line
for i, x := range list {
if i > 0 {
- if mode&commaSep != 0 {
- p.print(token.COMMA)
- }
- p.print(blank)
+ // use position of expression following the comma as
+ // comma position for correct comment placement
+ p.print(x.Pos(), token.COMMA, blank)
}
- p.expr0(x, depth, multiLine)
- }
- if mode&blankEnd != 0 {
- p.print(blank)
+ p.expr0(x, depth)
}
return
}
@@ -156,7 +147,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
prevBreak := -1 // index of last expression that was followed by a linebreak
if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) {
ws = ignore
- *multiLine = true
prevBreak = 0
}
@@ -166,7 +156,7 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// print all list elements
for i, x := range list {
prevLine := line
- line = p.fset.Position(x.Pos()).Line
+ line = p.lineFor(x.Pos())
// determine if the next linebreak, if any, needs to use formfeed:
// in general, use the entire node size to make the decision; for
@@ -209,20 +199,21 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
}
if i > 0 {
- switch {
- case mode&commaSep != 0:
- p.print(token.COMMA)
- case mode&periodSep != 0:
- p.print(token.PERIOD)
+ needsLinebreak := prevLine < line && prevLine > 0 && line > 0
+ // use position of expression following the comma as
+ // comma position for correct comment placement, but
+ // only if the expression is on the same line
+ if !needsLinebreak {
+ p.print(x.Pos())
}
- needsBlank := mode&periodSep == 0 // period-separated list elements don't need a blank
- if prevLine < line && prevLine > 0 && line > 0 {
+ p.print(token.COMMA)
+ needsBlank := true
+ if needsLinebreak {
// lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on
// the same line in which case formfeed is used
if p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) {
ws = ignore
- *multiLine = true
prevBreak = i
needsBlank = false // we got a line break instead
}
@@ -236,11 +227,11 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// we have a key:value expression that fits onto one line and
// is in a list with more then one entry: use a column for the
// key such that consecutive entries can align if possible
- p.expr(pair.Key, multiLine)
+ p.expr(pair.Key)
p.print(pair.Colon, token.COLON, vtab)
- p.expr(pair.Value, multiLine)
+ p.expr(pair.Value)
} else {
- p.expr0(x, depth, multiLine)
+ p.expr0(x, depth)
}
}
@@ -255,67 +246,95 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
return
}
- if mode&blankEnd != 0 {
- p.print(blank)
- }
-
if ws == ignore && mode&noIndent == 0 {
// unindent if we indented
p.print(unindent)
}
}
-// Sets multiLine to true if the the parameter list spans multiple lines.
-func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
+func (p *printer) parameters(fields *ast.FieldList) {
p.print(fields.Opening, token.LPAREN)
if len(fields.List) > 0 {
- var prevLine, line int
+ prevLine := p.lineFor(fields.Opening)
+ ws := indent
for i, par := range fields.List {
+ // determine par begin and end line (may be different
+ // if there are multiple parameter names for this par
+ // or the type is on a separate line)
+ var parLineBeg int
+ var parLineEnd = p.lineFor(par.Type.Pos())
+ if len(par.Names) > 0 {
+ parLineBeg = p.lineFor(par.Names[0].Pos())
+ } else {
+ parLineBeg = parLineEnd
+ }
+ // separating "," if needed
+ needsLinebreak := 0 < prevLine && prevLine < parLineBeg
if i > 0 {
- p.print(token.COMMA)
- if len(par.Names) > 0 {
- line = p.fset.Position(par.Names[0].Pos()).Line
- } else {
- line = p.fset.Position(par.Type.Pos()).Line
- }
- if 0 < prevLine && prevLine < line && p.linebreak(line, 0, ignore, true) {
- *multiLine = true
- } else {
- p.print(blank)
+ // use position of parameter following the comma as
+ // comma position for correct comma placement, but
+ // only if the next parameter is on the same line
+ if !needsLinebreak {
+ p.print(par.Pos())
}
+ p.print(token.COMMA)
}
+ // separator if needed (linebreak or blank)
+ if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) {
+ // break line if the opening "(" or previous parameter ended on a different line
+ ws = ignore
+ } else if i > 0 {
+ p.print(blank)
+ }
+ // parameter names
if len(par.Names) > 0 {
- p.identList(par.Names, false, multiLine)
+ // Very subtle: If we indented before (ws == ignore), identList
+ // won't indent again. If we didn't (ws == indent), identList will
+ // indent if the identList spans multiple lines, and it will outdent
+ // again at the end (and still ws == indent). Thus, a subsequent indent
+ // by a linebreak call after a type, or in the next multi-line identList
+ // will do the right thing.
+ p.identList(par.Names, ws == indent)
p.print(blank)
}
- p.expr(par.Type, multiLine)
- prevLine = p.fset.Position(par.Type.Pos()).Line
+ // parameter type
+ p.expr(par.Type)
+ prevLine = parLineEnd
+ }
+ // if the closing ")" is on a separate line from the last parameter,
+ // print an additional "," and line break
+ if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
+ p.print(token.COMMA)
+ p.linebreak(closing, 0, ignore, true)
+ }
+ // unindent if we indented
+ if ws == ignore {
+ p.print(unindent)
}
}
p.print(fields.Closing, token.RPAREN)
}
-// Sets multiLine to true if the signature spans multiple lines.
-func (p *printer) signature(params, result *ast.FieldList, multiLine *bool) {
- p.parameters(params, multiLine)
+func (p *printer) signature(params, result *ast.FieldList) {
+ p.parameters(params)
n := result.NumFields()
if n > 0 {
p.print(blank)
if n == 1 && result.List[0].Names == nil {
// single anonymous result; no ()'s
- p.expr(result.List[0].Type, multiLine)
+ p.expr(result.List[0].Type)
return
}
- p.parameters(result, multiLine)
+ p.parameters(result)
}
}
func identListSize(list []*ast.Ident, maxSize int) (size int) {
for i, x := range list {
if i > 0 {
- size += 2 // ", "
+ size += len(", ")
}
- size += len(x.Name)
+ size += utf8.RuneCountInString(x.Name)
if size >= maxSize {
break
}
@@ -342,16 +361,21 @@ func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
}
func (p *printer) setLineComment(text string) {
- p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, text}}})
+ p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
+}
+
+func (p *printer) isMultiLine(n ast.Node) bool {
+ return p.lineFor(n.End())-p.lineFor(n.Pos()) > 0
}
func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
lbrace := fields.Opening
list := fields.List
rbrace := fields.Closing
- srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.fset.Position(lbrace).Line == p.fset.Position(rbrace).Line
+ hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace))
+ srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace)
- if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) && srcIsOneLine {
+ if !hasComments && srcIsOneLine {
// possibly a one-line struct/interface
if len(list) == 0 {
// no blank between keyword and {} in this case
@@ -364,44 +388,48 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
f := list[0]
for i, x := range f.Names {
if i > 0 {
+ // no comments so no need for comma position
p.print(token.COMMA, blank)
}
- p.expr(x, ignoreMultiLine)
+ p.expr(x)
}
if len(f.Names) > 0 {
p.print(blank)
}
- p.expr(f.Type, ignoreMultiLine)
+ p.expr(f.Type)
p.print(blank, rbrace, token.RBRACE)
return
}
}
+ // hasComments || !srcIsOneLine
+
+ p.print(blank, lbrace, token.LBRACE, indent)
+ if hasComments || len(list) > 0 {
+ p.print(formfeed)
+ }
- // at least one entry or incomplete
- p.print(blank, lbrace, token.LBRACE, indent, formfeed)
if isStruct {
sep := vtab
if len(list) == 1 {
sep = blank
}
- var ml bool
+ newSection := false
for i, f := range list {
if i > 0 {
- p.linebreak(p.fset.Position(f.Pos()).Line, 1, ignore, ml)
+ p.linebreak(p.lineFor(f.Pos()), 1, ignore, newSection)
}
- ml = false
extraTabs := 0
p.setComment(f.Doc)
if len(f.Names) > 0 {
// named fields
- p.identList(f.Names, false, &ml)
+ p.identList(f.Names, false)
p.print(sep)
- p.expr(f.Type, &ml)
+ p.expr(f.Type)
extraTabs = 1
} else {
// anonymous field
- p.expr(f.Type, &ml)
+ p.expr(f.Type)
extraTabs = 2
}
if f.Tag != nil {
@@ -409,7 +437,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
p.print(sep)
}
p.print(sep)
- p.expr(f.Tag, &ml)
+ p.expr(f.Tag)
extraTabs = 0
}
if f.Comment != nil {
@@ -418,39 +446,40 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
}
p.setComment(f.Comment)
}
+ newSection = p.isMultiLine(f)
}
if isIncomplete {
if len(list) > 0 {
p.print(formfeed)
}
- p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
p.setLineComment("// contains filtered or unexported fields")
}
} else { // interface
- var ml bool
+ newSection := false
for i, f := range list {
if i > 0 {
- p.linebreak(p.fset.Position(f.Pos()).Line, 1, ignore, ml)
+ p.linebreak(p.lineFor(f.Pos()), 1, ignore, newSection)
}
- ml = false
p.setComment(f.Doc)
if ftyp, isFtyp := f.Type.(*ast.FuncType); isFtyp {
// method
- p.expr(f.Names[0], &ml)
- p.signature(ftyp.Params, ftyp.Results, &ml)
+ p.expr(f.Names[0])
+ p.signature(ftyp.Params, ftyp.Results)
} else {
// embedded interface
- p.expr(f.Type, &ml)
+ p.expr(f.Type)
}
p.setComment(f.Comment)
+ newSection = p.isMultiLine(f)
}
if isIncomplete {
if len(list) > 0 {
p.print(formfeed)
}
- p.flush(p.fset.Position(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
p.setLineComment("// contains filtered or unexported methods")
}
@@ -585,15 +614,14 @@ func reduceDepth(depth int) int {
// cutoff is 6 (always use spaces) in Normal mode
// and 4 (never use spaces) in Compact mode.
//
-// Sets multiLine to true if the binary expression spans multiple lines.
-func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiLine *bool) {
+func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
prec := x.Op.Precedence()
if prec < prec1 {
// parenthesis needed
// Note: The parser inserts an ast.ParenExpr node; thus this case
// can only occur if the AST is created in a different way.
p.print(token.LPAREN)
- p.expr0(x, reduceDepth(depth), multiLine) // parentheses undo one level of depth
+ p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
p.print(token.RPAREN)
return
}
@@ -601,26 +629,25 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
printBlank := prec < cutoff
ws := indent
- p.expr1(x.X, prec, depth+diffPrec(x.X, prec), multiLine)
+ p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
if printBlank {
p.print(blank)
}
xline := p.pos.Line // before the operator (it may be on the next line!)
- yline := p.fset.Position(x.Y.Pos()).Line
+ yline := p.lineFor(x.Y.Pos())
p.print(x.OpPos, x.Op)
if xline != yline && xline > 0 && yline > 0 {
// at least one line break, but respect an extra empty line
// in the source
if p.linebreak(yline, 1, ws, true) {
ws = ignore
- *multiLine = true
printBlank = false // no blank after line break
}
}
if printBlank {
p.print(blank)
}
- p.expr1(x.Y, prec+1, depth+1, multiLine)
+ p.expr1(x.Y, prec+1, depth+1)
if ws == ignore {
p.print(unindent)
}
@@ -631,65 +658,7 @@ func isBinary(expr ast.Expr) bool {
return ok
}
-// If the expression contains one or more selector expressions, splits it into
-// two expressions at the rightmost period. Writes entire expr to suffix when
-// selector isn't found. Rewrites AST nodes for calls, index expressions and
-// type assertions, all of which may be found in selector chains, to make them
-// parts of the chain.
-func splitSelector(expr ast.Expr) (body, suffix ast.Expr) {
- switch x := expr.(type) {
- case *ast.SelectorExpr:
- body, suffix = x.X, x.Sel
- return
- case *ast.CallExpr:
- body, suffix = splitSelector(x.Fun)
- if body != nil {
- suffix = &ast.CallExpr{suffix, x.Lparen, x.Args, x.Ellipsis, x.Rparen}
- return
- }
- case *ast.IndexExpr:
- body, suffix = splitSelector(x.X)
- if body != nil {
- suffix = &ast.IndexExpr{suffix, x.Lbrack, x.Index, x.Rbrack}
- return
- }
- case *ast.SliceExpr:
- body, suffix = splitSelector(x.X)
- if body != nil {
- suffix = &ast.SliceExpr{suffix, x.Lbrack, x.Low, x.High, x.Rbrack}
- return
- }
- case *ast.TypeAssertExpr:
- body, suffix = splitSelector(x.X)
- if body != nil {
- suffix = &ast.TypeAssertExpr{suffix, x.Type}
- return
- }
- }
- suffix = expr
- return
-}
-
-// Convert an expression into an expression list split at the periods of
-// selector expressions.
-func selectorExprList(expr ast.Expr) (list []ast.Expr) {
- // split expression
- for expr != nil {
- var suffix ast.Expr
- expr, suffix = splitSelector(expr)
- list = append(list, suffix)
- }
-
- // reverse list
- for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
- list[i], list[j] = list[j], list[i]
- }
-
- return
-}
-
-// Sets multiLine to true if the expression spans multiple lines.
-func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
+func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
p.print(expr.Pos())
switch x := expr.(type) {
@@ -704,12 +673,12 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.internalError("depth < 1:", depth)
depth = 1
}
- p.binaryExpr(x, prec1, cutoff(x, depth), depth, multiLine)
+ p.binaryExpr(x, prec1, cutoff(x, depth), depth)
case *ast.KeyValueExpr:
- p.expr(x.Key, multiLine)
+ p.expr(x.Key)
p.print(x.Colon, token.COLON, blank)
- p.expr(x.Value, multiLine)
+ p.expr(x.Value)
case *ast.StarExpr:
const prec = token.UnaryPrec
@@ -717,12 +686,12 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
// parenthesis needed
p.print(token.LPAREN)
p.print(token.MUL)
- p.expr(x.X, multiLine)
+ p.expr(x.X)
p.print(token.RPAREN)
} else {
// no parenthesis needed
p.print(token.MUL)
- p.expr(x.X, multiLine)
+ p.expr(x.X)
}
case *ast.UnaryExpr:
@@ -730,7 +699,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
if prec < prec1 {
// parenthesis needed
p.print(token.LPAREN)
- p.expr(x, multiLine)
+ p.expr(x)
p.print(token.RPAREN)
} else {
// no parenthesis needed
@@ -739,36 +708,41 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
// TODO(gri) Remove this code if it cannot be reached.
p.print(blank)
}
- p.expr1(x.X, prec, depth, multiLine)
+ p.expr1(x.X, prec, depth)
}
case *ast.BasicLit:
p.print(x)
case *ast.FuncLit:
- p.expr(x.Type, multiLine)
- p.funcBody(x.Body, p.distance(x.Type.Pos(), p.pos), true, multiLine)
+ p.expr(x.Type)
+ p.funcBody(x.Body, p.distance(x.Type.Pos(), p.pos), true)
case *ast.ParenExpr:
if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
// don't print parentheses around an already parenthesized expression
// TODO(gri) consider making this more general and incorporate precedence levels
- p.expr0(x.X, reduceDepth(depth), multiLine) // parentheses undo one level of depth
+ p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
} else {
p.print(token.LPAREN)
- p.expr0(x.X, reduceDepth(depth), multiLine) // parentheses undo one level of depth
+ p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
p.print(x.Rparen, token.RPAREN)
}
case *ast.SelectorExpr:
- parts := selectorExprList(expr)
- p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos)
+ p.expr1(x.X, token.HighestPrec, depth)
+ p.print(token.PERIOD)
+ if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
+ p.print(indent, newline, x.Sel.Pos(), x.Sel, unindent)
+ } else {
+ p.print(x.Sel.Pos(), x.Sel)
+ }
case *ast.TypeAssertExpr:
- p.expr1(x.X, token.HighestPrec, depth, multiLine)
+ p.expr1(x.X, token.HighestPrec, depth)
p.print(token.PERIOD, token.LPAREN)
if x.Type != nil {
- p.expr(x.Type, multiLine)
+ p.expr(x.Type)
} else {
p.print(token.TYPE)
}
@@ -776,17 +750,17 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.IndexExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
- p.expr1(x.X, token.HighestPrec, 1, multiLine)
+ p.expr1(x.X, token.HighestPrec, 1)
p.print(x.Lbrack, token.LBRACK)
- p.expr0(x.Index, depth+1, multiLine)
+ p.expr0(x.Index, depth+1)
p.print(x.Rbrack, token.RBRACK)
case *ast.SliceExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
- p.expr1(x.X, token.HighestPrec, 1, multiLine)
+ p.expr1(x.X, token.HighestPrec, 1)
p.print(x.Lbrack, token.LBRACK)
if x.Low != nil {
- p.expr0(x.Low, depth+1, multiLine)
+ p.expr0(x.Low, depth+1)
}
// blanks around ":" if both sides exist and either side is a binary expression
if depth <= 1 && x.Low != nil && x.High != nil && (isBinary(x.Low) || isBinary(x.High)) {
@@ -795,7 +769,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(token.COLON)
}
if x.High != nil {
- p.expr0(x.High, depth+1, multiLine)
+ p.expr0(x.High, depth+1)
}
p.print(x.Rbrack, token.RBRACK)
@@ -803,21 +777,26 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
if len(x.Args) > 1 {
depth++
}
- p.expr1(x.Fun, token.HighestPrec, depth, multiLine)
+ p.expr1(x.Fun, token.HighestPrec, depth)
p.print(x.Lparen, token.LPAREN)
- p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen)
if x.Ellipsis.IsValid() {
+ p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis)
p.print(x.Ellipsis, token.ELLIPSIS)
+ if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
+ p.print(token.COMMA, formfeed)
+ }
+ } else {
+ p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen)
}
p.print(x.Rparen, token.RPAREN)
case *ast.CompositeLit:
// composite literal elements that are composite literals themselves may have the type omitted
if x.Type != nil {
- p.expr1(x.Type, token.HighestPrec, depth, multiLine)
+ p.expr1(x.Type, token.HighestPrec, depth)
}
p.print(x.Lbrace, token.LBRACE)
- p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace)
+ p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace)
// do not insert extra line breaks because of comments before
// the closing '}' as it might break the code if there is no
// trailing ','
@@ -826,16 +805,16 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.Ellipsis:
p.print(token.ELLIPSIS)
if x.Elt != nil {
- p.expr(x.Elt, multiLine)
+ p.expr(x.Elt)
}
case *ast.ArrayType:
p.print(token.LBRACK)
if x.Len != nil {
- p.expr(x.Len, multiLine)
+ p.expr(x.Len)
}
p.print(token.RBRACK)
- p.expr(x.Elt, multiLine)
+ p.expr(x.Elt)
case *ast.StructType:
p.print(token.STRUCT)
@@ -843,7 +822,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.FuncType:
p.print(token.FUNC)
- p.signature(x.Params, x.Results, multiLine)
+ p.signature(x.Params, x.Results)
case *ast.InterfaceType:
p.print(token.INTERFACE)
@@ -851,9 +830,9 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
case *ast.MapType:
p.print(token.MAP, token.LBRACK)
- p.expr(x.Key, multiLine)
+ p.expr(x.Key)
p.print(token.RBRACK)
- p.expr(x.Value, multiLine)
+ p.expr(x.Value)
case *ast.ChanType:
switch x.Dir {
@@ -865,7 +844,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(token.CHAN, token.ARROW)
}
p.print(blank)
- p.expr(x.Value, multiLine)
+ p.expr(x.Value)
default:
panic("unreachable")
@@ -874,14 +853,13 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
return
}
-func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) {
- p.expr1(x, token.LowestPrec, depth, multiLine)
+func (p *printer) expr0(x ast.Expr, depth int) {
+ p.expr1(x, token.LowestPrec, depth)
}
-// Sets multiLine to true if the expression spans multiple lines.
-func (p *printer) expr(x ast.Expr, multiLine *bool) {
+func (p *printer) expr(x ast.Expr) {
const depth = 1
- p.expr1(x, token.LowestPrec, depth, multiLine)
+ p.expr1(x, token.LowestPrec, depth)
}
// ----------------------------------------------------------------------------
@@ -895,13 +873,13 @@ func (p *printer) stmtList(list []ast.Stmt, _indent int, nextIsRBrace bool) {
if _indent > 0 {
p.print(indent)
}
- var multiLine bool
+ multiLine := false
for i, s := range list {
// _indent == 0 only for lists of switch/select case clauses;
// in those cases each clause is a new section
- p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, i == 0 || _indent == 0 || multiLine)
- multiLine = false
- p.stmt(s, nextIsRBrace && i == len(list)-1, &multiLine)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || _indent == 0 || multiLine)
+ p.stmt(s, nextIsRBrace && i == len(list)-1)
+ multiLine = p.isMultiLine(s)
}
if _indent > 0 {
p.print(unindent)
@@ -912,7 +890,7 @@ func (p *printer) stmtList(list []ast.Stmt, _indent int, nextIsRBrace bool) {
func (p *printer) block(s *ast.BlockStmt, indent int) {
p.print(s.Pos(), token.LBRACE)
p.stmtList(s.List, indent, true)
- p.linebreak(p.fset.Position(s.Rbrace).Line, 1, ignore, true)
+ p.linebreak(p.lineFor(s.Rbrace), 1, ignore, true)
p.print(s.Rbrace, token.RBRACE)
}
@@ -958,25 +936,25 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po
if init == nil && post == nil {
// no semicolons required
if expr != nil {
- p.expr(stripParens(expr), ignoreMultiLine)
+ p.expr(stripParens(expr))
needsBlank = true
}
} else {
// all semicolons required
// (they are not separators, print them explicitly)
if init != nil {
- p.stmt(init, false, ignoreMultiLine)
+ p.stmt(init, false)
}
p.print(token.SEMICOLON, blank)
if expr != nil {
- p.expr(stripParens(expr), ignoreMultiLine)
+ p.expr(stripParens(expr))
needsBlank = true
}
if isForStmt {
p.print(token.SEMICOLON, blank)
needsBlank = false
if post != nil {
- p.stmt(post, false, ignoreMultiLine)
+ p.stmt(post, false)
needsBlank = true
}
}
@@ -986,8 +964,42 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po
}
}
-// Sets multiLine to true if the statements spans multiple lines.
-func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
+// indentList reports whether an expression list would look better if it
+// were indented wholesale (starting with the very first element, rather
+// than starting at the first line break).
+//
+func (p *printer) indentList(list []ast.Expr) bool {
+ // Heuristic: indentList returns true if there are more than one multi-
+ // line element in the list, or if there is any element that is not
+ // starting on the same line as the previous one ends.
+ if len(list) >= 2 {
+ var b = p.lineFor(list[0].Pos())
+ var e = p.lineFor(list[len(list)-1].End())
+ if 0 < b && b < e {
+ // list spans multiple lines
+ n := 0 // multi-line element count
+ line := b
+ for _, x := range list {
+ xb := p.lineFor(x.Pos())
+ xe := p.lineFor(x.End())
+ if line < xb {
+ // x is not starting on the same
+ // line as the previous one ended
+ return true
+ }
+ if xb < xe {
+ // x is a multi-line element
+ n++
+ }
+ line = xe
+ }
+ return n > 1
+ }
+ }
+ return false
+}
+
+func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
p.print(stmt.Pos())
switch s := stmt.(type) {
@@ -995,7 +1007,7 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print("BadStmt")
case *ast.DeclStmt:
- p.decl(s.Decl, multiLine)
+ p.decl(s.Decl)
case *ast.EmptyStmt:
// nothing to do
@@ -1005,7 +1017,7 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
// is applied before the line break if there is no comment
// between (see writeWhitespace)
p.print(unindent)
- p.expr(s.Label, multiLine)
+ p.expr(s.Label)
p.print(s.Colon, token.COLON, indent)
if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
if !nextIsRBrace {
@@ -1013,23 +1025,23 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
break
}
} else {
- p.linebreak(p.fset.Position(s.Stmt.Pos()).Line, 1, ignore, true)
+ p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
}
- p.stmt(s.Stmt, nextIsRBrace, multiLine)
+ p.stmt(s.Stmt, nextIsRBrace)
case *ast.ExprStmt:
const depth = 1
- p.expr0(s.X, depth, multiLine)
+ p.expr0(s.X, depth)
case *ast.SendStmt:
const depth = 1
- p.expr0(s.Chan, depth, multiLine)
+ p.expr0(s.Chan, depth)
p.print(blank, s.Arrow, token.ARROW, blank)
- p.expr0(s.Value, depth, multiLine)
+ p.expr0(s.Value, depth)
case *ast.IncDecStmt:
const depth = 1
- p.expr0(s.X, depth+1, multiLine)
+ p.expr0(s.X, depth+1)
p.print(s.TokPos, s.Tok)
case *ast.AssignStmt:
@@ -1037,56 +1049,66 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
depth++
}
- p.exprList(s.Pos(), s.Lhs, depth, commaSep, multiLine, s.TokPos)
- p.print(blank, s.TokPos, s.Tok)
- p.exprList(s.TokPos, s.Rhs, depth, blankStart|commaSep, multiLine, token.NoPos)
+ p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos)
+ p.print(blank, s.TokPos, s.Tok, blank)
+ p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos)
case *ast.GoStmt:
p.print(token.GO, blank)
- p.expr(s.Call, multiLine)
+ p.expr(s.Call)
case *ast.DeferStmt:
p.print(token.DEFER, blank)
- p.expr(s.Call, multiLine)
+ p.expr(s.Call)
case *ast.ReturnStmt:
p.print(token.RETURN)
if s.Results != nil {
- p.exprList(s.Pos(), s.Results, 1, blankStart|commaSep, multiLine, token.NoPos)
+ p.print(blank)
+ // Use indentList heuristic to make corner cases look
+ // better (issue 1207). A more systematic approach would
+ // always indent, but this would cause significant
+ // reformatting of the code base and not necessarily
+ // lead to more nicely formatted code in general.
+ if p.indentList(s.Results) {
+ p.print(indent)
+ p.exprList(s.Pos(), s.Results, 1, noIndent, token.NoPos)
+ p.print(unindent)
+ } else {
+ p.exprList(s.Pos(), s.Results, 1, 0, token.NoPos)
+ }
}
case *ast.BranchStmt:
p.print(s.Tok)
if s.Label != nil {
p.print(blank)
- p.expr(s.Label, multiLine)
+ p.expr(s.Label)
}
case *ast.BlockStmt:
p.block(s, 1)
- *multiLine = true
case *ast.IfStmt:
p.print(token.IF)
p.controlClause(false, s.Init, s.Cond, nil)
p.block(s.Body, 1)
- *multiLine = true
if s.Else != nil {
p.print(blank, token.ELSE, blank)
switch s.Else.(type) {
case *ast.BlockStmt, *ast.IfStmt:
- p.stmt(s.Else, nextIsRBrace, ignoreMultiLine)
+ p.stmt(s.Else, nextIsRBrace)
default:
p.print(token.LBRACE, indent, formfeed)
- p.stmt(s.Else, true, ignoreMultiLine)
+ p.stmt(s.Else, true)
p.print(unindent, formfeed, token.RBRACE)
}
}
case *ast.CaseClause:
if s.List != nil {
- p.print(token.CASE)
- p.exprList(s.Pos(), s.List, 1, blankStart|commaSep, multiLine, s.Colon)
+ p.print(token.CASE, blank)
+ p.exprList(s.Pos(), s.List, 1, 0, s.Colon)
} else {
p.print(token.DEFAULT)
}
@@ -1097,25 +1119,23 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print(token.SWITCH)
p.controlClause(false, s.Init, s.Tag, nil)
p.block(s.Body, 0)
- *multiLine = true
case *ast.TypeSwitchStmt:
p.print(token.SWITCH)
if s.Init != nil {
p.print(blank)
- p.stmt(s.Init, false, ignoreMultiLine)
+ p.stmt(s.Init, false)
p.print(token.SEMICOLON)
}
p.print(blank)
- p.stmt(s.Assign, false, ignoreMultiLine)
+ p.stmt(s.Assign, false)
p.print(blank)
p.block(s.Body, 0)
- *multiLine = true
case *ast.CommClause:
if s.Comm != nil {
p.print(token.CASE, blank)
- p.stmt(s.Comm, false, ignoreMultiLine)
+ p.stmt(s.Comm, false)
} else {
p.print(token.DEFAULT)
}
@@ -1125,32 +1145,31 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
case *ast.SelectStmt:
p.print(token.SELECT, blank)
body := s.Body
- if len(body.List) == 0 && !p.commentBefore(p.fset.Position(body.Rbrace)) {
+ if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) {
// print empty select statement w/o comments on one line
p.print(body.Lbrace, token.LBRACE, body.Rbrace, token.RBRACE)
} else {
p.block(body, 0)
- *multiLine = true
}
case *ast.ForStmt:
p.print(token.FOR)
p.controlClause(true, s.Init, s.Cond, s.Post)
p.block(s.Body, 1)
- *multiLine = true
case *ast.RangeStmt:
p.print(token.FOR, blank)
- p.expr(s.Key, multiLine)
+ p.expr(s.Key)
if s.Value != nil {
- p.print(token.COMMA, blank)
- p.expr(s.Value, multiLine)
+ // use position of value following the comma as
+ // comma position for correct comment placement
+ p.print(s.Value.Pos(), token.COMMA, blank)
+ p.expr(s.Value)
}
p.print(blank, s.TokPos, s.Tok, blank, token.RANGE, blank)
- p.expr(stripParens(s.X), multiLine)
+ p.expr(stripParens(s.X))
p.print(blank)
p.block(s.Body, 1)
- *multiLine = true
default:
panic("unreachable")
@@ -1227,20 +1246,20 @@ func keepTypeColumn(specs []ast.Spec) []bool {
return m
}
-func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine *bool) {
+func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
p.setComment(s.Doc)
- p.identList(s.Names, doIndent, multiLine) // always present
+ p.identList(s.Names, false) // always present
extraTabs := 3
if s.Type != nil || keepType {
p.print(vtab)
extraTabs--
}
if s.Type != nil {
- p.expr(s.Type, multiLine)
+ p.expr(s.Type)
}
if s.Values != nil {
- p.print(vtab, token.ASSIGN)
- p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
+ p.print(vtab, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
extraTabs--
}
if s.Comment != nil {
@@ -1254,44 +1273,44 @@ func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine
// The parameter n is the number of specs in the group. If doIndent is set,
// multi-line identifier lists in the spec are indented when the first
// linebreak is encountered.
-// Sets multiLine to true if the spec spans multiple lines.
//
-func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
+func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
switch s := spec.(type) {
case *ast.ImportSpec:
p.setComment(s.Doc)
if s.Name != nil {
- p.expr(s.Name, multiLine)
+ p.expr(s.Name)
p.print(blank)
}
- p.expr(s.Path, multiLine)
+ p.expr(s.Path)
p.setComment(s.Comment)
+ p.print(s.EndPos)
case *ast.ValueSpec:
if n != 1 {
p.internalError("expected n = 1; got", n)
}
p.setComment(s.Doc)
- p.identList(s.Names, doIndent, multiLine) // always present
+ p.identList(s.Names, doIndent) // always present
if s.Type != nil {
p.print(blank)
- p.expr(s.Type, multiLine)
+ p.expr(s.Type)
}
if s.Values != nil {
- p.print(blank, token.ASSIGN)
- p.exprList(token.NoPos, s.Values, 1, blankStart|commaSep, multiLine, token.NoPos)
+ p.print(blank, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
}
p.setComment(s.Comment)
case *ast.TypeSpec:
p.setComment(s.Doc)
- p.expr(s.Name, multiLine)
+ p.expr(s.Name)
if n == 1 {
p.print(blank)
} else {
p.print(vtab)
}
- p.expr(s.Type, multiLine)
+ p.expr(s.Type)
p.setComment(s.Comment)
default:
@@ -1299,8 +1318,7 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
}
}
-// Sets multiLine to true if the declaration spans multiple lines.
-func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
+func (p *printer) genDecl(d *ast.GenDecl) {
p.setComment(d.Doc)
p.print(d.Pos(), d.Tok, blank)
@@ -1313,32 +1331,31 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
// two or more grouped const/var declarations:
// determine if the type column must be kept
keepType := keepTypeColumn(d.Specs)
- var ml bool
+ newSection := false
for i, s := range d.Specs {
if i > 0 {
- p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, ml)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, newSection)
}
- ml = false
- p.valueSpec(s.(*ast.ValueSpec), keepType[i], false, &ml)
+ p.valueSpec(s.(*ast.ValueSpec), keepType[i])
+ newSection = p.isMultiLine(s)
}
} else {
- var ml bool
+ newSection := false
for i, s := range d.Specs {
if i > 0 {
- p.linebreak(p.fset.Position(s.Pos()).Line, 1, ignore, ml)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, newSection)
}
- ml = false
- p.spec(s, n, false, &ml)
+ p.spec(s, n, false)
+ newSection = p.isMultiLine(s)
}
}
p.print(unindent, formfeed)
- *multiLine = true
}
p.print(d.Rparen, token.RPAREN)
} else {
// single declaration
- p.spec(d.Specs[0], 1, true, multiLine)
+ p.spec(d.Specs[0], 1, true)
}
}
@@ -1364,7 +1381,7 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
// in RawFormat
cfg := Config{Mode: RawFormat}
var buf bytes.Buffer
- if _, err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
+ if err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
return
}
if buf.Len() <= maxSize {
@@ -1382,11 +1399,11 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
pos1 := b.Pos()
pos2 := b.Rbrace
- if pos1.IsValid() && pos2.IsValid() && p.fset.Position(pos1).Line != p.fset.Position(pos2).Line {
+ if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
// opening and closing brace are on different lines - don't make it a one-liner
return false
}
- if len(b.List) > 5 || p.commentBefore(p.fset.Position(pos2)) {
+ if len(b.List) > 5 || p.commentBefore(p.posFor(pos2)) {
// too many statements or there is a comment inside - don't make it a one-liner
return false
}
@@ -1402,8 +1419,7 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
return headerSize+bodySize <= maxSize
}
-// Sets multiLine to true if the function body spans multiple lines.
-func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLine *bool) {
+func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool) {
if b == nil {
return
}
@@ -1420,7 +1436,7 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi
if i > 0 {
p.print(token.SEMICOLON, blank)
}
- p.stmt(s, i == len(b.List)-1, ignoreMultiLine)
+ p.stmt(s, i == len(b.List)-1)
}
p.print(blank)
}
@@ -1430,42 +1446,39 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi
p.print(blank)
p.block(b, 1)
- *multiLine = true
}
// distance returns the column difference between from and to if both
// are on the same line; if they are on different lines (or unknown)
// the result is infinity.
func (p *printer) distance(from0 token.Pos, to token.Position) int {
- from := p.fset.Position(from0)
+ from := p.posFor(from0)
if from.IsValid() && to.IsValid() && from.Line == to.Line {
return to.Column - from.Column
}
return infinity
}
-// Sets multiLine to true if the declaration spans multiple lines.
-func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) {
+func (p *printer) funcDecl(d *ast.FuncDecl) {
p.setComment(d.Doc)
p.print(d.Pos(), token.FUNC, blank)
if d.Recv != nil {
- p.parameters(d.Recv, multiLine) // method: print receiver
+ p.parameters(d.Recv) // method: print receiver
p.print(blank)
}
- p.expr(d.Name, multiLine)
- p.signature(d.Type.Params, d.Type.Results, multiLine)
- p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false, multiLine)
+ p.expr(d.Name)
+ p.signature(d.Type.Params, d.Type.Results)
+ p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false)
}
-// Sets multiLine to true if the declaration spans multiple lines.
-func (p *printer) decl(decl ast.Decl, multiLine *bool) {
+func (p *printer) decl(decl ast.Decl) {
switch d := decl.(type) {
case *ast.BadDecl:
p.print(d.Pos(), "BadDecl")
case *ast.GenDecl:
- p.genDecl(d, multiLine)
+ p.genDecl(d)
case *ast.FuncDecl:
- p.funcDecl(d, multiLine)
+ p.funcDecl(d)
default:
panic("unreachable")
}
@@ -1488,7 +1501,7 @@ func declToken(decl ast.Decl) (tok token.Token) {
func (p *printer) file(src *ast.File) {
p.setComment(src.Doc)
p.print(src.Pos(), token.PACKAGE, blank)
- p.expr(src.Name, ignoreMultiLine)
+ p.expr(src.Name)
if len(src.Decls) > 0 {
tok := token.ILLEGAL
@@ -1496,13 +1509,18 @@ func (p *printer) file(src *ast.File) {
prev := tok
tok = declToken(d)
// if the declaration token changed (e.g., from CONST to TYPE)
+ // or the next declaration has documentation associated with it,
// print an empty line between top-level declarations
+ // (because p.linebreak is called with the position of d, which
+ // is past any documentation, the minimum requirement is satisfied
+ // even w/o the extra getDoc(d) nil-check - leave it in case the
+ // linebreak logic improves - there's already a TODO).
min := 1
- if prev != tok {
+ if prev != tok || getDoc(d) != nil {
min = 2
}
- p.linebreak(p.fset.Position(d.Pos()).Line, min, ignore, false)
- p.decl(d, ignoreMultiLine)
+ p.linebreak(p.lineFor(d.Pos()), min, ignore, false)
+ p.decl(d)
}
}
diff --git a/src/pkg/go/printer/performance_test.go b/src/pkg/go/printer/performance_test.go
index 84fb2808e..0c6a4e71f 100644
--- a/src/pkg/go/printer/performance_test.go
+++ b/src/pkg/go/printer/performance_test.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// This file implements a simple printer performance benchmark:
-// gotest -bench=BenchmarkPrint
+// go test -bench=BenchmarkPrint
package printer
@@ -20,7 +20,7 @@ import (
var testfile *ast.File
func testprint(out io.Writer, file *ast.File) {
- if _, err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil {
+ if err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil {
log.Fatalf("print error: %s", err)
}
}
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index 871fefa0c..a027d32da 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -6,21 +6,23 @@
package printer
import (
- "bytes"
"fmt"
"go/ast"
"go/token"
"io"
"os"
- "path/filepath"
- "runtime"
- "tabwriter"
+ "strconv"
+ "strings"
+ "text/tabwriter"
)
-const debug = false // enable for debugging
-
+const (
+ maxNewlines = 2 // max. number of newlines between source text
+ debug = false // enable for debugging
+ infinity = 1 << 30
+)
-type whiteSpace int
+type whiteSpace byte
const (
ignore = whiteSpace(0)
@@ -32,72 +34,98 @@ const (
unindent = whiteSpace('<')
)
-var (
- esc = []byte{tabwriter.Escape}
- htab = []byte{'\t'}
- htabs = []byte("\t\t\t\t\t\t\t\t")
- newlines = []byte("\n\n\n\n\n\n\n\n") // more than the max determined by nlines
- formfeeds = []byte("\f\f\f\f\f\f\f\f") // more than the max determined by nlines
-)
-
-// Special positions
-var noPos token.Position // use noPos when a position is needed but not known
-var infinity = 1 << 30
-
-// Use ignoreMultiLine if the multiLine information is not important.
-var ignoreMultiLine = new(bool)
-
// A pmode value represents the current printer mode.
type pmode int
const (
- inLiteral pmode = 1 << iota
- noExtraLinebreak
+ noExtraLinebreak pmode = 1 << iota
)
type printer struct {
// Configuration (does not change after initialization)
- output io.Writer
Config
- fset *token.FileSet
- errors chan os.Error
+ fset *token.FileSet
// Current state
- written int // number of bytes written
- indent int // current indentation
- mode pmode // current printer mode
- lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
-
- // Reused buffers
- wsbuf []whiteSpace // delayed white space
- litbuf bytes.Buffer // for creation of escaped literals and comments
-
- // The (possibly estimated) position in the generated output;
- // in AST space (i.e., pos is set whenever a token position is
- // known accurately, and updated dependending on what has been
- // written).
- pos token.Position
-
- // The value of pos immediately after the last item has been
- // written using writeItem.
- last token.Position
+ output []byte // raw printer result
+ indent int // current indentation
+ mode pmode // current printer mode
+ impliedSemi bool // if set, a linebreak implies a semicolon
+ lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
+ wsbuf []whiteSpace // delayed white space
+
+ // Positions
+ // The out position differs from the pos position when the result
+ // formatting differs from the source formatting (in the amount of
+ // white space). If there's a difference and SourcePos is set in
+ // ConfigMode, //line comments are used in the output to restore
+ // original source positions for a reader.
+ pos token.Position // current position in AST (source) space
+ out token.Position // current position in output space
+ last token.Position // value of pos after calling writeString
// The list of all source comments, in order of appearance.
comments []*ast.CommentGroup // may be nil
cindex int // current comment index
useNodeComments bool // if not set, ignore lead and line comments of nodes
+ // Information about p.comments[p.cindex]; set up by nextComment.
+ comment *ast.CommentGroup // = p.comments[p.cindex]; or nil
+ commentOffset int // = p.posFor(p.comments[p.cindex].List[0].Pos()).Offset; or infinity
+ commentNewline bool // true if the comment group contains newlines
+
// Cache of already computed node sizes.
nodeSizes map[ast.Node]int
+
+ // Cache of most recently computed line position.
+ cachedPos token.Pos
+ cachedLine int // line corresponding to cachedPos
}
-func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
- p.output = output
+func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
p.Config = *cfg
p.fset = fset
- p.errors = make(chan os.Error)
+ p.pos = token.Position{Line: 1, Column: 1}
+ p.out = token.Position{Line: 1, Column: 1}
p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
p.nodeSizes = nodeSizes
+ p.cachedPos = -1
+}
+
+// commentsHaveNewline reports whether a list of comments belonging to
+// an *ast.CommentGroup contains newlines. Because the position information
+// may only be partially correct, we also have to read the comment text.
+func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
+ // len(list) > 0
+ line := p.lineFor(list[0].Pos())
+ for i, c := range list {
+ if i > 0 && p.lineFor(list[i].Pos()) != line {
+ // not all comments on the same line
+ return true
+ }
+ if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
+ return true
+ }
+ }
+ _ = line
+ return false
+}
+
+func (p *printer) nextComment() {
+ for p.cindex < len(p.comments) {
+ c := p.comments[p.cindex]
+ p.cindex++
+ if list := c.List; len(list) > 0 {
+ p.comment = c
+ p.commentOffset = p.posFor(list[0].Pos()).Offset
+ p.commentNewline = p.commentsHaveNewline(list)
+ return
+ }
+ // we should not reach here (correct ASTs don't have empty
+ // ast.CommentGroup nodes), but be conservative and try again
+ }
+ // no more comments
+ p.commentOffset = infinity
}
func (p *printer) internalError(msg ...interface{}) {
@@ -108,137 +136,139 @@ func (p *printer) internalError(msg ...interface{}) {
}
}
-// escape escapes string s by bracketing it with tabwriter.Escape.
-// Escaped strings pass through tabwriter unchanged. (Note that
-// valid Go programs cannot contain tabwriter.Escape bytes since
-// they do not appear in legal UTF-8 sequences).
-//
-func (p *printer) escape(s string) string {
- p.litbuf.Reset()
- p.litbuf.WriteByte(tabwriter.Escape)
- p.litbuf.WriteString(s)
- p.litbuf.WriteByte(tabwriter.Escape)
- return p.litbuf.String()
+func (p *printer) posFor(pos token.Pos) token.Position {
+ // not used frequently enough to cache entire token.Position
+ return p.fset.Position(pos)
}
-// nlines returns the adjusted number of linebreaks given the desired number
-// of breaks n such that min <= result <= max.
-//
-func (p *printer) nlines(n, min int) int {
- const max = 2 // max. number of newlines
- switch {
- case n < min:
- return min
- case n > max:
- return max
+func (p *printer) lineFor(pos token.Pos) int {
+ if pos != p.cachedPos {
+ p.cachedPos = pos
+ p.cachedLine = p.fset.Position(pos).Line
}
- return n
+ return p.cachedLine
}
-// write0 writes raw (uninterpreted) data to p.output and handles errors.
-// write0 does not indent after newlines, and does not HTML-escape or update p.pos.
-//
-func (p *printer) write0(data []byte) {
- if len(data) > 0 {
- n, err := p.output.Write(data)
- p.written += n
- if err != nil {
- p.errors <- err
- runtime.Goexit()
- }
+// atLineBegin emits a //line comment if necessary and prints indentation.
+func (p *printer) atLineBegin(pos token.Position) {
+ // write a //line comment if necessary
+ if p.Config.Mode&SourcePos != 0 && pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
+ p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
+ p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
+ p.output = append(p.output, tabwriter.Escape)
+ // p.out must match the //line comment
+ p.out.Filename = pos.Filename
+ p.out.Line = pos.Line
}
-}
-
-// write interprets data and writes it to p.output. It inserts indentation
-// after a line break unless in a tabwriter escape sequence.
-// It updates p.pos as a side-effect.
-//
-func (p *printer) write(data []byte) {
- i0 := 0
- for i, b := range data {
- switch b {
- case '\n', '\f':
- // write segment ending in b
- p.write0(data[i0 : i+1])
-
- // update p.pos
- p.pos.Offset += i + 1 - i0
- p.pos.Line++
- p.pos.Column = 1
-
- if p.mode&inLiteral == 0 {
- // write indentation
- // use "hard" htabs - indentation columns
- // must not be discarded by the tabwriter
- j := p.indent
- for ; j > len(htabs); j -= len(htabs) {
- p.write0(htabs)
- }
- p.write0(htabs[0:j])
- // update p.pos
- p.pos.Offset += p.indent
- p.pos.Column += p.indent
- }
-
- // next segment start
- i0 = i + 1
+ // write indentation
+ // use "hard" htabs - indentation columns
+ // must not be discarded by the tabwriter
+ for i := 0; i < p.indent; i++ {
+ p.output = append(p.output, '\t')
+ }
- case tabwriter.Escape:
- p.mode ^= inLiteral
+ // update positions
+ i := p.indent
+ p.pos.Offset += i
+ p.pos.Column += i
+ p.out.Column += i
+}
- // ignore escape chars introduced by printer - they are
- // invisible and must not affect p.pos (was issue #1089)
- p.pos.Offset--
- p.pos.Column--
- }
+// writeByte writes ch n times to p.output and updates p.pos.
+func (p *printer) writeByte(ch byte, n int) {
+ if p.out.Column == 1 {
+ p.atLineBegin(p.pos)
}
- // write remaining segment
- p.write0(data[i0:])
-
- // update p.pos
- d := len(data) - i0
- p.pos.Offset += d
- p.pos.Column += d
-}
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, ch)
+ }
-func (p *printer) writeNewlines(n int, useFF bool) {
- if n > 0 {
- n = p.nlines(n, 0)
- if useFF {
- p.write(formfeeds[0:n])
- } else {
- p.write(newlines[0:n])
- }
+ // update positions
+ p.pos.Offset += n
+ if ch == '\n' || ch == '\f' {
+ p.pos.Line += n
+ p.out.Line += n
+ p.pos.Column = 1
+ p.out.Column = 1
+ return
}
+ p.pos.Column += n
+ p.out.Column += n
}
-// writeItem writes data at position pos. data is the text corresponding to
-// a single lexical token, but may also be comment text. pos is the actual
-// (or at least very accurately estimated) position of the data in the original
-// source text. writeItem updates p.last to the position immediately following
-// the data.
+// writeString writes the string s to p.output and updates p.pos, p.out,
+// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
+// to protect s from being interpreted by the tabwriter.
//
-func (p *printer) writeItem(pos token.Position, data string) {
+// Note: writeString is only used to write Go tokens, literals, and
+// comments, all of which must be written literally. Thus, it is correct
+// to always set isLit = true. However, setting it explicitly only when
+// needed (i.e., when we don't know that s contains no tabs or line breaks)
+// avoids processing extra escape characters and reduces run time of the
+// printer benchmark by up to 10%.
+//
+func (p *printer) writeString(pos token.Position, s string, isLit bool) {
+ if p.out.Column == 1 {
+ p.atLineBegin(pos)
+ }
+
if pos.IsValid() {
- // continue with previous position if we don't have a valid pos
+ // update p.pos (if pos is invalid, continue with existing p.pos)
+ // Note: Must do this after handling line beginnings because
+ // atLineBegin updates p.pos if there's indentation, but p.pos
+ // is the position of s.
+ p.pos = pos
+ // reset state if the file changed
+ // (used when printing merged ASTs of different files
+ // e.g., the result of ast.MergePackageFiles)
if p.last.IsValid() && p.last.Filename != pos.Filename {
- // the file has changed - reset state
- // (used when printing merged ASTs of different files
- // e.g., the result of ast.MergePackageFiles)
p.indent = 0
p.mode = 0
p.wsbuf = p.wsbuf[0:0]
}
- p.pos = pos
}
+
+ if isLit {
+ // Protect s such that is passes through the tabwriter
+ // unchanged. Note that valid Go programs cannot contain
+ // tabwriter.Escape bytes since they do not appear in legal
+ // UTF-8 sequences.
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
if debug {
- // do not update p.pos - use write0
- _, filename := filepath.Split(pos.Filename)
- p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column)))
+ p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
+ }
+ p.output = append(p.output, s...)
+
+ // update positions
+ nlines := 0
+ var li int // index of last newline; valid if nlines > 0
+ for i := 0; i < len(s); i++ {
+ // Go tokens cannot contain '\f' - no need to look for it
+ if s[i] == '\n' {
+ nlines++
+ li = i
+ }
+ }
+ p.pos.Offset += len(s)
+ if nlines > 0 {
+ p.pos.Line += nlines
+ p.out.Line += nlines
+ c := len(s) - li
+ p.pos.Column = c
+ p.out.Column = c
+ } else {
+ p.pos.Column += len(s)
+ p.out.Column += len(s)
+ }
+
+ if isLit {
+ p.output = append(p.output, tabwriter.Escape)
}
- p.write([]byte(data))
+
p.last = p.pos
}
@@ -247,18 +277,17 @@ func (p *printer) writeItem(pos token.Position, data string) {
// it as is likely to help position the comment nicely.
// pos is the comment position, next the position of the item
// after all pending comments, prev is the previous comment in
-// a group of comments (or nil), and isKeyword indicates if the
-// next item is a keyword.
+// a group of comments (or nil), and tok is the next token.
//
-func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, isKeyword bool) {
- if p.written == 0 {
+func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, tok token.Token) {
+ if len(p.output) == 0 {
// the comment is the first item to be printed - don't write any whitespace
return
}
if pos.IsValid() && pos.Filename != p.last.Filename {
- // comment in a different file - separate with newlines (writeNewlines will limit the number)
- p.writeNewlines(10, true)
+ // comment in a different file - separate with newlines
+ p.writeByte('\f', maxNewlines)
return
}
@@ -291,82 +320,106 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment
}
// make sure there is at least one separator
if !hasSep {
+ sep := byte('\t')
if pos.Line == next.Line {
// next item is on the same line as the comment
// (which must be a /*-style comment): separate
// with a blank instead of a tab
- p.write([]byte{' '})
- } else {
- p.write(htab)
+ sep = ' '
}
+ p.writeByte(sep, 1)
}
} else {
// comment on a different line:
// separate with at least one line break
- if prev == nil {
- // first comment of a comment group
- j := 0
- for i, ch := range p.wsbuf {
- switch ch {
- case blank, vtab:
- // ignore any horizontal whitespace before line breaks
- p.wsbuf[i] = ignore
+ droppedLinebreak := false
+ j := 0
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank, vtab:
+ // ignore any horizontal whitespace before line breaks
+ p.wsbuf[i] = ignore
+ continue
+ case indent:
+ // apply pending indentation
+ continue
+ case unindent:
+ // if this is not the last unindent, apply it
+ // as it is (likely) belonging to the last
+ // construct (e.g., a multi-line expression list)
+ // and is not part of closing a block
+ if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
continue
- case indent:
- // apply pending indentation
+ }
+ // if the next token is not a closing }, apply the unindent
+ // if it appears that the comment is aligned with the
+ // token; otherwise assume the unindent is part of a
+ // closing block and stop (this scenario appears with
+ // comments before a case label where the comments
+ // apply to the next case instead of the current one)
+ if tok != token.RBRACE && pos.Column == next.Column {
continue
- case unindent:
- // if the next token is a keyword, apply the outdent
- // if it appears that the comment is aligned with the
- // keyword; otherwise assume the outdent is part of a
- // closing block and stop (this scenario appears with
- // comments before a case label where the comments
- // apply to the next case instead of the current one)
- if isKeyword && pos.Column == next.Column {
- continue
- }
- case newline, formfeed:
- // TODO(gri): may want to keep formfeed info in some cases
- p.wsbuf[i] = ignore
}
- j = i
- break
+ case newline, formfeed:
+ p.wsbuf[i] = ignore
+ droppedLinebreak = prev == nil // record only if first comment of a group
+ }
+ j = i
+ break
+ }
+ p.writeWhitespace(j)
+
+ // determine number of linebreaks before the comment
+ n := 0
+ if pos.IsValid() && p.last.IsValid() {
+ n = pos.Line - p.last.Line
+ if n < 0 { // should never happen
+ n = 0
}
- p.writeWhitespace(j)
}
- // use formfeeds to break columns before a comment;
- // this is analogous to using formfeeds to separate
- // individual lines of /*-style comments - but make
- // sure there is at least one line break if the previous
- // comment was a line comment
- n := pos.Line - p.last.Line // if !pos.IsValid(), pos.Line == 0, and n will be 0
- if n <= 0 && prev != nil && prev.Text[1] == '/' {
+
+ // at the package scope level only (p.indent == 0),
+ // add an extra newline if we dropped one before:
+ // this preserves a blank line before documentation
+ // comments at the package scope level (issue 2570)
+ if p.indent == 0 && droppedLinebreak {
+ n++
+ }
+
+ // make sure there is at least one line break
+ // if the previous comment was a line comment
+ if n == 0 && prev != nil && prev.Text[1] == '/' {
n = 1
}
- p.writeNewlines(n, true)
+
+ if n > 0 {
+ // use formfeeds to break columns before a comment;
+ // this is analogous to using formfeeds to separate
+ // individual lines of /*-style comments
+ p.writeByte('\f', nlimit(n))
+ }
}
}
-// TODO(gri): It should be possible to convert the code below from using
-// []byte to string and in the process eliminate some conversions.
-
// Split comment text into lines
-func split(text []byte) [][]byte {
+// (using strings.Split(text, "\n") is significantly slower for
+// this specific purpose, as measured with: go test -bench=Print)
+func split(text string) []string {
// count lines (comment text never ends in a newline)
n := 1
- for _, c := range text {
- if c == '\n' {
+ for i := 0; i < len(text); i++ {
+ if text[i] == '\n' {
n++
}
}
// split
- lines := make([][]byte, n)
+ lines := make([]string, n)
n = 0
i := 0
- for j, c := range text {
- if c == '\n' {
+ for j := 0; j < len(text); j++ {
+ if text[j] == '\n' {
lines[n] = text[i:j] // exclude newline
i = j + 1 // discard newline
n++
@@ -377,16 +430,18 @@ func split(text []byte) [][]byte {
return lines
}
-func isBlank(s []byte) bool {
- for _, b := range s {
- if b > ' ' {
+// Returns true if s contains only white space
+// (only tabs and blanks can appear in the printer's context).
+func isBlank(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] > ' ' {
return false
}
}
return true
}
-func commonPrefix(a, b []byte) []byte {
+func commonPrefix(a, b string) string {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
i++
@@ -394,7 +449,7 @@ func commonPrefix(a, b []byte) []byte {
return a[0:i]
}
-func stripCommonPrefix(lines [][]byte) {
+func stripCommonPrefix(lines []string) {
if len(lines) < 2 {
return // at most one line - nothing to do
}
@@ -418,19 +473,21 @@ func stripCommonPrefix(lines [][]byte) {
// Note that the first and last line are never empty (they
// contain the opening /* and closing */ respectively) and
// thus they can be ignored by the blank line check.
- var prefix []byte
+ var prefix string
if len(lines) > 2 {
+ first := true
for i, line := range lines[1 : len(lines)-1] {
switch {
case isBlank(line):
- lines[1+i] = nil // range starts at line 1
- case prefix == nil:
+ lines[1+i] = "" // range starts at line 1
+ case first:
prefix = commonPrefix(line, line)
+ first = false
default:
prefix = commonPrefix(prefix, line)
}
}
- } else { // len(lines) == 2
+ } else { // len(lines) == 2, lines cannot be blank (contain /* and */)
line := lines[1]
prefix = commonPrefix(line, line)
}
@@ -439,7 +496,7 @@ func stripCommonPrefix(lines [][]byte) {
* Check for vertical "line of stars" and correct prefix accordingly.
*/
lineOfStars := false
- if i := bytes.Index(prefix, []byte{'*'}); i >= 0 {
+ if i := strings.Index(prefix, "*"); i >= 0 {
// Line of stars present.
if i > 0 && prefix[i-1] == ' ' {
i-- // remove trailing blank from prefix so stars remain aligned
@@ -487,7 +544,7 @@ func stripCommonPrefix(lines [][]byte) {
}
// Shorten the computed common prefix by the length of
// suffix, if it is found as suffix of the prefix.
- if bytes.HasSuffix(prefix, suffix) {
+ if strings.HasSuffix(prefix, string(suffix)) {
prefix = prefix[0 : len(prefix)-len(suffix)]
}
}
@@ -497,19 +554,18 @@ func stripCommonPrefix(lines [][]byte) {
// with the opening /*, otherwise align the text with the other
// lines.
last := lines[len(lines)-1]
- closing := []byte("*/")
- i := bytes.Index(last, closing)
+ closing := "*/"
+ i := strings.Index(last, closing) // i >= 0 (closing is always present)
if isBlank(last[0:i]) {
// last line only contains closing */
- var sep []byte
if lineOfStars {
- // insert an aligning blank
- sep = []byte{' '}
+ closing = " */" // add blank to align final star
}
- lines[len(lines)-1] = bytes.Join([][]byte{prefix, closing}, sep)
+ lines[len(lines)-1] = prefix + closing
} else {
// last line contains more comment text - assume
- // it is aligned like the other lines
+ // it is aligned like the other lines and include
+ // in prefix computation
prefix = commonPrefix(prefix, last)
}
@@ -523,29 +579,50 @@ func stripCommonPrefix(lines [][]byte) {
func (p *printer) writeComment(comment *ast.Comment) {
text := comment.Text
+ pos := p.posFor(comment.Pos())
+
+ const linePrefix = "//line "
+ if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
+ // possibly a line directive
+ ldir := strings.TrimSpace(text[len(linePrefix):])
+ if i := strings.LastIndex(ldir, ":"); i >= 0 {
+ if line, err := strconv.Atoi(ldir[i+1:]); err == nil && line > 0 {
+ // The line directive we are about to print changed
+ // the Filename and Line number used for subsequent
+ // tokens. We have to update our AST-space position
+ // accordingly and suspend indentation temporarily.
+ indent := p.indent
+ p.indent = 0
+ defer func() {
+ p.pos.Filename = ldir[:i]
+ p.pos.Line = line
+ p.pos.Column = 1
+ p.indent = indent
+ }()
+ }
+ }
+ }
// shortcut common case of //-style comments
if text[1] == '/' {
- p.writeItem(p.fset.Position(comment.Pos()), p.escape(text))
+ p.writeString(pos, text, true)
return
}
// for /*-style comments, print line by line and let the
// write function take care of the proper indentation
- lines := split([]byte(text))
+ lines := split(text)
stripCommonPrefix(lines)
// write comment lines, separated by formfeed,
// without a line break after the last line
- linebreak := formfeeds[0:1]
- pos := p.fset.Position(comment.Pos())
for i, line := range lines {
if i > 0 {
- p.write(linebreak)
+ p.writeByte('\f', 1)
pos = p.pos
}
if len(line) > 0 {
- p.writeItem(pos, p.escape(string(line)))
+ p.writeString(pos, line, true)
}
}
}
@@ -553,10 +630,11 @@ func (p *printer) writeComment(comment *ast.Comment) {
// writeCommentSuffix writes a line break after a comment if indicated
// and processes any leftover indentation information. If a line break
// is needed, the kind of break (newline vs formfeed) depends on the
-// pending whitespace. writeCommentSuffix returns true if a pending
-// formfeed was dropped from the whitespace buffer.
+// pending whitespace. The writeCommentSuffix result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace
+// buffer.
//
-func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
+func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
@@ -569,6 +647,7 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
// but remember if we dropped any formfeeds
if needsLinebreak {
needsLinebreak = false
+ wroteNewline = true
} else {
if ch == formfeed {
droppedFF = true
@@ -581,7 +660,8 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
// make sure we have a line break
if needsLinebreak {
- p.write([]byte{'\n'})
+ p.writeByte('\n', 1)
+ wroteNewline = true
}
return
@@ -590,24 +670,27 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
// intersperseComments consumes all comments that appear before the next token
// tok and prints it together with the buffered whitespace (i.e., the whitespace
// that needs to be written before the next token). A heuristic is used to mix
-// the comments and whitespace. intersperseComments returns true if a pending
-// formfeed was dropped from the whitespace buffer.
+// the comments and whitespace. The intersperseComments result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace buffer.
//
-func (p *printer) intersperseComments(next token.Position, tok token.Token) (droppedFF bool) {
+func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
var last *ast.Comment
- for ; p.commentBefore(next); p.cindex++ {
- for _, c := range p.comments[p.cindex].List {
- p.writeCommentPrefix(p.fset.Position(c.Pos()), next, last, tok.IsKeyword())
+ for p.commentBefore(next) {
+ for _, c := range p.comment.List {
+ p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok)
p.writeComment(c)
last = c
}
+ p.nextComment()
}
if last != nil {
- if last.Text[1] == '*' && p.fset.Position(last.Pos()).Line == next.Line {
- // the last comment is a /*-style comment and the next item
- // follows on the same line: separate with an extra blank
- p.write([]byte{' '})
+ // if the last comment is a /*-style comment and the next item
+ // follows on the same line but is not a comma or a "closing"
+ // token, add an extra blank for separation
+ if last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && tok != token.COMMA &&
+ tok != token.RPAREN && tok != token.RBRACK && tok != token.RBRACE {
+ p.writeByte(' ', 1)
}
// ensure that there is a line break after a //-style comment,
// before a closing '}' unless explicitly disabled, or at eof
@@ -621,13 +704,12 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (dro
// no comment was written - we should never reach here since
// intersperseComments should not be called in that case
p.internalError("intersperseComments called without pending comments")
- return false
+ return
}
// whiteWhitespace writes the first n whitespace entries.
func (p *printer) writeWhitespace(n int) {
// write entries
- var data [1]byte
for i := 0; i < n; i++ {
switch ch := p.wsbuf[i]; ch {
case ignore:
@@ -659,8 +741,7 @@ func (p *printer) writeWhitespace(n int) {
}
fallthrough
default:
- data[0] = byte(ch)
- p.write(data[0:])
+ p.writeByte(byte(ch), 1)
}
}
@@ -676,6 +757,13 @@ func (p *printer) writeWhitespace(n int) {
// ----------------------------------------------------------------------------
// Printing interface
+// nlines limits n to maxNewlines.
+func nlimit(n int) int {
+ if n > maxNewlines {
+ n = maxNewlines
+ }
+ return n
+}
func mayCombine(prev token.Token, next byte) (b bool) {
switch prev {
@@ -707,21 +795,24 @@ func mayCombine(prev token.Token, next byte) (b bool) {
// printed, followed by the actual token.
//
func (p *printer) print(args ...interface{}) {
- for _, f := range args {
- next := p.pos // estimated position of next item
+ for _, arg := range args {
+ // information about the current arg
var data string
- var tok token.Token
+ var isLit bool
+ var impliedSemi bool // value for p.impliedSemi after this arg
- switch x := f.(type) {
+ switch x := arg.(type) {
case pmode:
// toggle printer mode
p.mode ^= x
+ continue
+
case whiteSpace:
if x == ignore {
// don't add ignore's to the buffer; they
// may screw up "correcting" unindents (see
// LabeledStmt)
- break
+ continue
}
i := len(p.wsbuf)
if i == cap(p.wsbuf) {
@@ -733,12 +824,27 @@ func (p *printer) print(args ...interface{}) {
}
p.wsbuf = p.wsbuf[0 : i+1]
p.wsbuf[i] = x
+ if x == newline || x == formfeed {
+ // newlines affect the current state (p.impliedSemi)
+ // and not the state after printing arg (impliedSemi)
+ // because comments can be interspersed before the arg
+ // in this case
+ p.impliedSemi = false
+ }
+ p.lastTok = token.ILLEGAL
+ continue
+
case *ast.Ident:
data = x.Name
- tok = token.IDENT
+ impliedSemi = true
+ p.lastTok = token.IDENT
+
case *ast.BasicLit:
- data = p.escape(x.Value)
- tok = x.Kind
+ data = x.Value
+ isLit = true
+ impliedSemi = true
+ p.lastTok = x.Kind
+
case token.Token:
s := x.String()
if mayCombine(p.lastTok, s[0]) {
@@ -755,50 +861,77 @@ func (p *printer) print(args ...interface{}) {
p.wsbuf[0] = ' '
}
data = s
- tok = x
+ // some keywords followed by a newline imply a semicolon
+ switch x {
+ case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
+ token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
+ impliedSemi = true
+ }
+ p.lastTok = x
+
case token.Pos:
if x.IsValid() {
- next = p.fset.Position(x) // accurate position of next item
+ p.pos = p.posFor(x) // accurate position of next item
}
- tok = p.lastTok
+ continue
+
+ case string:
+ // incorrect AST - print error message
+ data = x
+ isLit = true
+ impliedSemi = true
+ p.lastTok = token.STRING
+
default:
- fmt.Fprintf(os.Stderr, "print: unsupported argument type %T\n", f)
+ fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
panic("go/printer type")
}
- p.lastTok = tok
- p.pos = next
-
- if data != "" {
- droppedFF := p.flush(next, tok)
-
- // intersperse extra newlines if present in the source
- // (don't do this in flush as it will cause extra newlines
- // at the end of a file) - use formfeeds if we dropped one
- // before
- p.writeNewlines(next.Line-p.pos.Line, droppedFF)
-
- p.writeItem(next, data)
+ // data != ""
+
+ next := p.pos // estimated/accurate position of next item
+ wroteNewline, droppedFF := p.flush(next, p.lastTok)
+
+ // intersperse extra newlines if present in the source and
+ // if they don't cause extra semicolons (don't do this in
+ // flush as it will cause extra newlines at the end of a file)
+ if !p.impliedSemi {
+ n := nlimit(next.Line - p.pos.Line)
+ // don't exceed maxNewlines if we already wrote one
+ if wroteNewline && n == maxNewlines {
+ n = maxNewlines - 1
+ }
+ if n > 0 {
+ ch := byte('\n')
+ if droppedFF {
+ ch = '\f' // use formfeed since we dropped one before
+ }
+ p.writeByte(ch, n)
+ impliedSemi = false
+ }
}
+
+ p.writeString(next, data, isLit)
+ p.impliedSemi = impliedSemi
}
}
-// commentBefore returns true iff the current comment occurs
-// before the next position in the source code.
+// commentBefore returns true iff the current comment group occurs
+// before the next position in the source code and printing it does
+// not introduce implicit semicolons.
//
-func (p *printer) commentBefore(next token.Position) bool {
- return p.cindex < len(p.comments) && p.fset.Position(p.comments[p.cindex].List[0].Pos()).Offset < next.Offset
+func (p *printer) commentBefore(next token.Position) (result bool) {
+ return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
}
-// Flush prints any pending comments and whitespace occurring
-// textually before the position of the next token tok. Flush
-// returns true if a pending formfeed character was dropped
-// from the whitespace buffer as a result of interspersing
-// comments.
+// flush prints any pending comments and whitespace occurring textually
+// before the position of the next token tok. The flush result indicates
+// if a newline was written or if a formfeed was dropped from the whitespace
+// buffer.
//
-func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) {
+func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
if p.commentBefore(next) {
// if there are comments before the next item, intersperse them
- droppedFF = p.intersperseComments(next, tok)
+ wroteNewline, droppedFF = p.intersperseComments(next, tok)
} else {
// otherwise, write any leftover whitespace
p.writeWhitespace(len(p.wsbuf))
@@ -806,6 +939,101 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) {
return
}
+// getNode returns the ast.CommentGroup associated with n, if any.
+func getDoc(n ast.Node) *ast.CommentGroup {
+ switch n := n.(type) {
+ case *ast.Field:
+ return n.Doc
+ case *ast.ImportSpec:
+ return n.Doc
+ case *ast.ValueSpec:
+ return n.Doc
+ case *ast.TypeSpec:
+ return n.Doc
+ case *ast.GenDecl:
+ return n.Doc
+ case *ast.FuncDecl:
+ return n.Doc
+ case *ast.File:
+ return n.Doc
+ }
+ return nil
+}
+
+func (p *printer) printNode(node interface{}) error {
+ // unpack *CommentedNode, if any
+ var comments []*ast.CommentGroup
+ if cnode, ok := node.(*CommentedNode); ok {
+ node = cnode.Node
+ comments = cnode.Comments
+ }
+
+ if comments != nil {
+ // commented node - restrict comment list to relevant range
+ n, ok := node.(ast.Node)
+ if !ok {
+ goto unsupported
+ }
+ beg := n.Pos()
+ end := n.End()
+ // if the node has associated documentation,
+ // include that commentgroup in the range
+ // (the comment list is sorted in the order
+ // of the comment appearance in the source code)
+ if doc := getDoc(n); doc != nil {
+ beg = doc.Pos()
+ }
+ // token.Pos values are global offsets, we can
+ // compare them directly
+ i := 0
+ for i < len(comments) && comments[i].End() < beg {
+ i++
+ }
+ j := i
+ for j < len(comments) && comments[j].Pos() < end {
+ j++
+ }
+ if i < j {
+ p.comments = comments[i:j]
+ }
+ } else if n, ok := node.(*ast.File); ok {
+ // use ast.File comments, if any
+ p.comments = n.Comments
+ }
+
+ // if there are no comments, use node comments
+ p.useNodeComments = p.comments == nil
+
+ // get comments ready for use
+ p.nextComment()
+
+ // format node
+ switch n := node.(type) {
+ case ast.Expr:
+ p.expr(n)
+ case ast.Stmt:
+ // A labeled statement will un-indent to position the
+ // label. Set indent to 1 so we don't get indent "underflow".
+ if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
+ p.indent = 1
+ }
+ p.stmt(n, false)
+ case ast.Decl:
+ p.decl(n)
+ case ast.Spec:
+ p.spec(n, 1, false)
+ case *ast.File:
+ p.file(n)
+ default:
+ goto unsupported
+ }
+
+ return nil
+
+unsupported:
+ return fmt.Errorf("go/printer: unsupported node type %T", node)
+}
+
// ----------------------------------------------------------------------------
// Trimmer
@@ -818,7 +1046,7 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) {
type trimmer struct {
output io.Writer
state int
- space bytes.Buffer
+ space []byte
}
// trimmer is implemented as a state machine.
@@ -829,13 +1057,20 @@ const (
inText // inside text
)
+func (p *trimmer) resetSpace() {
+ p.state = inSpace
+ p.space = p.space[0:0]
+}
+
// Design note: It is tempting to eliminate extra blanks occurring in
// whitespace in this function as it could simplify some
// of the blanks logic in the node printing functions.
// However, this would mess up any formatting done by
// the tabwriter.
-func (p *trimmer) Write(data []byte) (n int, err os.Error) {
+var aNewline = []byte("\n")
+
+func (p *trimmer) Write(data []byte) (n int, err error) {
// invariants:
// p.state == inSpace:
// p.space is unwritten
@@ -851,37 +1086,34 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
case inSpace:
switch b {
case '\t', ' ':
- p.space.WriteByte(b) // WriteByte returns no errors
+ p.space = append(p.space, b)
case '\n', '\f':
- p.space.Reset() // discard trailing space
- _, err = p.output.Write(newlines[0:1]) // write newline
+ p.resetSpace() // discard trailing space
+ _, err = p.output.Write(aNewline)
case tabwriter.Escape:
- _, err = p.output.Write(p.space.Bytes())
+ _, err = p.output.Write(p.space)
p.state = inEscape
m = n + 1 // +1: skip tabwriter.Escape
default:
- _, err = p.output.Write(p.space.Bytes())
+ _, err = p.output.Write(p.space)
p.state = inText
m = n
}
case inEscape:
if b == tabwriter.Escape {
_, err = p.output.Write(data[m:n])
- p.state = inSpace
- p.space.Reset()
+ p.resetSpace()
}
case inText:
switch b {
case '\t', ' ':
_, err = p.output.Write(data[m:n])
- p.state = inSpace
- p.space.Reset()
- p.space.WriteByte(b) // WriteByte returns no errors
+ p.resetSpace()
+ p.space = append(p.space, b)
case '\n', '\f':
_, err = p.output.Write(data[m:n])
- p.state = inSpace
- p.space.Reset()
- _, err = p.output.Write(newlines[0:1]) // write newline
+ p.resetSpace()
+ _, err = p.output.Write(aNewline)
case tabwriter.Escape:
_, err = p.output.Write(data[m:n])
p.state = inEscape
@@ -899,8 +1131,7 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
switch p.state {
case inEscape, inText:
_, err = p.output.Write(data[m:n])
- p.state = inSpace
- p.space.Reset()
+ p.resetSpace()
}
return
@@ -909,29 +1140,41 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
// ----------------------------------------------------------------------------
// Public interface
-// General printing is controlled with these Config.Mode flags.
+// A Mode value is a set of flags (or 0). They coontrol printing.
+type Mode uint
+
const (
- RawFormat uint = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
+ RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
TabIndent // use tabs for indentation independent of UseSpaces
UseSpaces // use spaces instead of tabs for alignment
+ SourcePos // emit //line comments to preserve original source positions
)
// A Config node controls the output of Fprint.
type Config struct {
- Mode uint // default: 0
+ Mode Mode // default: 0
Tabwidth int // default: 8
}
// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
-func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) {
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {
+ // print node
+ var p printer
+ p.init(cfg, fset, nodeSizes)
+ if err = p.printNode(node); err != nil {
+ return
+ }
+ // print outstanding comments
+ p.impliedSemi = false // EOF acts like a newline
+ p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
+
// redirect output through a trimmer to eliminate trailing whitespace
// (Input to a tabwriter must be untrimmed since trailing tabs provide
// formatting information. The tabwriter could provide trimming
// functionality but no tabwriter is used when RawFormat is set.)
output = &trimmer{output: output}
- // setup tabwriter if needed and redirect output
- var tw *tabwriter.Writer
+ // redirect output through a tabwriter if necessary
if cfg.Mode&RawFormat == 0 {
minwidth := cfg.Tabwidth
@@ -946,67 +1189,42 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{
twmode |= tabwriter.TabIndent
}
- tw = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
- output = tw
+ output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
}
- // setup printer and print node
- var p printer
- p.init(output, cfg, fset, nodeSizes)
- go func() {
- switch n := node.(type) {
- case ast.Expr:
- p.useNodeComments = true
- p.expr(n, ignoreMultiLine)
- case ast.Stmt:
- p.useNodeComments = true
- // A labeled statement will un-indent to position the
- // label. Set indent to 1 so we don't get indent "underflow".
- if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
- p.indent = 1
- }
- p.stmt(n, false, ignoreMultiLine)
- case ast.Decl:
- p.useNodeComments = true
- p.decl(n, ignoreMultiLine)
- case ast.Spec:
- p.useNodeComments = true
- p.spec(n, 1, false, ignoreMultiLine)
- case *ast.File:
- p.comments = n.Comments
- p.useNodeComments = n.Comments == nil
- p.file(n)
- default:
- p.errors <- fmt.Errorf("printer.Fprint: unsupported node type %T", n)
- runtime.Goexit()
- }
- p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
- p.errors <- nil // no errors
- }()
- err := <-p.errors // wait for completion of goroutine
+ // write printer result via tabwriter/trimmer to output
+ if _, err = output.Write(p.output); err != nil {
+ return
+ }
// flush tabwriter, if any
- if tw != nil {
- tw.Flush() // ignore errors
+ if tw, _ := (output).(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
}
- return p.written, err
+ return
+}
+
+// A CommentedNode bundles an AST node and corresponding comments.
+// It may be provided as argument to any of the Fprint functions.
+//
+type CommentedNode struct {
+ Node interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
+ Comments []*ast.CommentGroup
}
-// Fprint "pretty-prints" an AST node to output and returns the number
-// of bytes written and an error (if any) for a given configuration cfg.
+// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
// Position information is interpreted relative to the file set fset.
-// The node type must be *ast.File, or assignment-compatible to ast.Expr,
-// ast.Decl, ast.Spec, or ast.Stmt.
+// The node type must be *ast.File, *CommentedNode, or assignment-compatible
+// to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
//
-func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
return cfg.fprint(output, fset, node, make(map[ast.Node]int))
}
// Fprint "pretty-prints" an AST node to output.
// It calls Config.Fprint with default settings.
//
-func Fprint(output io.Writer, fset *token.FileSet, node interface{}) os.Error {
- _, err := (&Config{Tabwidth: 8}).Fprint(output, fset, node) // don't care about number of bytes written
- return err
+func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
+ return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
}
diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go
index ff2d906b5..497d671f2 100644
--- a/src/pkg/go/printer/printer_test.go
+++ b/src/pkg/go/printer/printer_test.go
@@ -7,10 +7,10 @@ package printer
import (
"bytes"
"flag"
- "io/ioutil"
"go/ast"
"go/parser"
"go/token"
+ "io/ioutil"
"path/filepath"
"testing"
"time"
@@ -62,11 +62,18 @@ func runcheck(t *testing.T, source, golden string, mode checkMode) {
// format source
var buf bytes.Buffer
- if _, err := cfg.Fprint(&buf, fset, prog); err != nil {
+ if err := cfg.Fprint(&buf, fset, prog); err != nil {
t.Error(err)
}
res := buf.Bytes()
+ // formatted source must be valid
+ if _, err := parser.ParseFile(fset, "", res, 0); err != nil {
+ t.Error(err)
+ t.Logf("\n%s", res)
+ return
+ }
+
// update golden files if necessary
if *update {
if err := ioutil.WriteFile(golden, res, 0644); err != nil {
@@ -107,7 +114,7 @@ func check(t *testing.T, source, golden string, mode checkMode) {
// start a timer to produce a time-out signal
tc := make(chan int)
go func() {
- time.Sleep(10e9) // plenty of a safety margin, even for very slow machines
+ time.Sleep(10 * time.Second) // plenty of a safety margin, even for very slow machines
tc <- 0
}()
@@ -133,7 +140,7 @@ type entry struct {
mode checkMode
}
-// Use gotest -update to create/update the respective golden files.
+// Use go test -update to create/update the respective golden files.
var data = []entry{
{"empty.input", "empty.golden", 0},
{"comments.input", "comments.golden", 0},
@@ -147,15 +154,12 @@ var data = []entry{
}
func TestFiles(t *testing.T) {
- for i, e := range data {
+ for _, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden, e.mode)
// TODO(gri) check that golden is idempotent
//check(t, golden, golden, e.mode)
- if testing.Short() && i >= 3 {
- break
- }
}
}
@@ -171,14 +175,14 @@ func TestLineComments(t *testing.T) {
`
fset := token.NewFileSet()
- ast1, err1 := parser.ParseFile(fset, "", src, parser.ParseComments)
- if err1 != nil {
- panic(err1)
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
}
var buf bytes.Buffer
fset = token.NewFileSet() // use the wrong file set
- Fprint(&buf, fset, ast1)
+ Fprint(&buf, fset, f)
nlines := 0
for _, ch := range buf.Bytes() {
@@ -190,5 +194,219 @@ func TestLineComments(t *testing.T) {
const expected = 3
if nlines < expected {
t.Errorf("got %d, expected %d\n", nlines, expected)
+ t.Errorf("result:\n%s", buf.Bytes())
+ }
+}
+
+// Verify that the printer can be invoked during initialization.
+func init() {
+ const name = "foobar"
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
+ panic(err) // error in test
+ }
+ // in debug mode, the result contains additional information;
+ // ignore it
+ if s := buf.String(); !debug && s != name {
+ panic("got " + s + ", want " + name)
+ }
+}
+
+// Verify that the printer doesn't crash if the AST contains BadXXX nodes.
+func TestBadNodes(t *testing.T) {
+ const src = "package p\n("
+ const res = "package p\nBadDecl\n"
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err == nil {
+ t.Error("expected illegal program") // error in test
+ }
+ var buf bytes.Buffer
+ Fprint(&buf, fset, f)
+ if buf.String() != res {
+ t.Errorf("got %q, expected %q", buf.String(), res)
+ }
+}
+
+// testComment verifies that f can be parsed again after printing it
+// with its first comment set to comment at any possible source offset.
+func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
+ f.Comments[0].List[0] = comment
+ var buf bytes.Buffer
+ for offs := 0; offs <= srclen; offs++ {
+ buf.Reset()
+ // Printing f should result in a correct program no
+ // matter what the (incorrect) comment position is.
+ if err := Fprint(&buf, fset, f); err != nil {
+ t.Error(err)
+ }
+ if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+ t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
+ }
+ // Position information is just an offset.
+ // Move comment one byte down in the source.
+ comment.Slash++
+ }
+}
+
+// Verify that the printer produces always produces a correct program
+// even if the position information of comments introducing newlines
+// is incorrect.
+func TestBadComments(t *testing.T) {
+ const src = `
+// first comment - text and position changed by test
+package p
+import "fmt"
+const pi = 3.14 // rough circle
+var (
+ x, y, z int = 1, 2, 3
+ u, v float64
+)
+func fibo(n int) {
+ if n < 2 {
+ return n /* seed values */
+ }
+ return fibo(n-1) + fibo(n-2)
+}
+`
+
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Error(err) // error in test
+ }
+
+ comment := f.Comments[0].List[0]
+ pos := comment.Pos()
+ if fset.Position(pos).Offset != 1 {
+ t.Error("expected offset 1") // error in test
+ }
+
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
+}
+
+type visitor chan *ast.Ident
+
+func (v visitor) Visit(n ast.Node) (w ast.Visitor) {
+ if ident, ok := n.(*ast.Ident); ok {
+ v <- ident
+ }
+ return v
+}
+
+// idents is an iterator that returns all idents in f via the result channel.
+func idents(f *ast.File) <-chan *ast.Ident {
+ v := make(visitor)
+ go func() {
+ ast.Walk(v, f)
+ close(v)
+ }()
+ return v
+}
+
+// identCount returns the number of identifiers found in f.
+func identCount(f *ast.File) int {
+ n := 0
+ for _ = range idents(f) {
+ n++
}
+ return n
+}
+
+// Verify that the SourcePos mode emits correct //line comments
+// by testing that position information for matching identifiers
+// is maintained.
+func TestSourcePos(t *testing.T) {
+ const src = `
+package p
+import ( "go/printer"; "math" )
+const pi = 3.14; var x = 0
+type t struct{ x, y, z int; u, v, w float32 }
+func (t *t) foo(a, b, c int) int {
+ return a*t.x + b*t.y +
+ // two extra lines here
+ // ...
+ c*t.z
+}
+`
+
+ // parse original
+ f1, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // pretty-print original
+ var buf bytes.Buffer
+ err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // parse pretty printed original
+ // (//line comments must be interpreted even w/o parser.ParseComments set)
+ f2, err := parser.ParseFile(fset, "", buf.Bytes(), 0)
+ if err != nil {
+ t.Fatalf("%s\n%s", err, buf.Bytes())
+ }
+
+ // At this point the position information of identifiers in f2 should
+ // match the position information of corresponding identifiers in f1.
+
+ // number of identifiers must be > 0 (test should run) and must match
+ n1 := identCount(f1)
+ n2 := identCount(f2)
+ if n1 == 0 {
+ t.Fatal("got no idents")
+ }
+ if n2 != n1 {
+ t.Errorf("got %d idents; want %d", n2, n1)
+ }
+
+ // verify that all identifiers have correct line information
+ i2range := idents(f2)
+ for i1 := range idents(f1) {
+ i2 := <-i2range
+
+ if i2.Name != i1.Name {
+ t.Errorf("got ident %s; want %s", i2.Name, i1.Name)
+ }
+
+ l1 := fset.Position(i1.Pos()).Line
+ l2 := fset.Position(i2.Pos()).Line
+ if l2 != l1 {
+ t.Errorf("got line %d; want %d for %s", l2, l1, i1.Name)
+ }
+ }
+
+ if t.Failed() {
+ t.Logf("\n%s", buf.Bytes())
+ }
+}
+
+// TextX is a skeleton test that can be filled in for debugging one-off cases.
+// Do not remove.
+func TestX(t *testing.T) {
+ const src = `
+package p
+func _() {}
+`
+ // parse original
+ f, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // pretty-print original
+ var buf bytes.Buffer
+ if err = (&Config{Mode: UseSpaces, Tabwidth: 8}).Fprint(&buf, fset, f); err != nil {
+ t.Fatal(err)
+ }
+
+ // parse pretty printed original
+ if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+ t.Fatalf("%s\n%s", err, buf.Bytes())
+ }
+
}
diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden
index 7b332252c..d9aa2d82f 100644
--- a/src/pkg/go/printer/testdata/comments.golden
+++ b/src/pkg/go/printer/testdata/comments.golden
@@ -106,7 +106,7 @@ type S3 struct {
var x int // x
var ()
-// This comment SHOULD be associated with the next declaration.
+// This comment SHOULD be associated with f0.
func f0() {
const pi = 3.14 // pi
var s1 struct{} /* an empty struct */ /* foo */
@@ -115,8 +115,9 @@ func f0() {
var s2 struct{} = struct{}{}
x := pi
}
+
//
-// NO SPACE HERE
+// This comment should be associated with f1, with one blank line before the comment.
//
func f1() {
f0()
@@ -167,6 +168,91 @@ func typeswitch(x interface{}) {
// this comment should not be indented
}
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
func _() {
/* freestanding comment
aligned line
@@ -403,17 +489,18 @@ func _() {
*/
}
-// Some interesting interspersed comments
+// Some interesting interspersed comments.
+// See below for more common cases.
func _( /* this */ x /* is */ /* an */ int) {
}
-func _( /* no params */ ) {}
+func _( /* no params */) {}
func _() {
- f( /* no args */ )
+ f( /* no args */)
}
-func ( /* comment1 */ T /* comment2 */ ) _() {}
+func ( /* comment1 */ T /* comment2 */) _() {}
func _() { /* one-line functions with comments are formatted as multi-line functions */
}
@@ -424,11 +511,32 @@ func _() {
}
func _() {
- _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */ }
+ _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2 /*jasldf*/}
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}/*jasldf
+ */
+
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}// jasldf
+
+ _ = a
}
-// Comments immediately adjacent to punctuation (for which the go/printer
-// may only have estimated position information) must remain after the punctuation.
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
func _() {
_ = T{
1, // comment after comma
@@ -458,6 +566,54 @@ func _() {
}
}
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x, /* comment */
+ y)
+ f(
+ x, /* comment */
+ )
+}
+
+func g(
+ x int, /* comment */
+) {
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+ _ = 0
+ // The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+ // The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+ // The following is not a legal line directive (negative line number):
+ //line foo:-3
+ _ = 3
+}
+
// Line comments with tabs
func _() {
var finput *bufio.Reader // input file
diff --git a/src/pkg/go/printer/testdata/comments.input b/src/pkg/go/printer/testdata/comments.input
index 2a9a86b68..6084b3fe4 100644
--- a/src/pkg/go/printer/testdata/comments.input
+++ b/src/pkg/go/printer/testdata/comments.input
@@ -107,7 +107,7 @@ var x int // x
var ()
-// This comment SHOULD be associated with the next declaration.
+// This comment SHOULD be associated with f0.
func f0() {
const pi = 3.14 // pi
var s1 struct {} /* an empty struct */ /* foo */
@@ -117,7 +117,7 @@ func f0() {
x := pi
}
//
-// NO SPACE HERE
+// This comment should be associated with f1, with one blank line before the comment.
//
func f1() {
f0()
@@ -130,7 +130,7 @@ func f1() {
func _() {
- // this comment should be properly indented
+// this comment should be properly indented
}
@@ -171,6 +171,91 @@ func typeswitch(x interface{}) {
// this comment should not be indented
}
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
func _() {
/* freestanding comment
aligned line
@@ -410,7 +495,8 @@ func _() {
}
-// Some interesting interspersed comments
+// Some interesting interspersed comments.
+// See below for more common cases.
func _(/* this */x/* is *//* an */ int) {
}
@@ -432,9 +518,30 @@ func _() {
_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
}
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2, /*jasldf*/
+ }
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2, /*jasldf
+ */
+ }
+ _ = a
+}
-// Comments immediately adjacent to punctuation (for which the go/printer
-// may only have estimated position information) must remain after the punctuation.
+func _() {
+ var a = []int{1, 2, // jasldf
+ }
+ _ = a
+}
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
func _() {
_ = T{
1, // comment after comma
@@ -466,6 +573,50 @@ func _() {
}
}
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x /* comment */,
+ y)
+ f(
+ x /* comment */,
+ )
+}
+
+func g(
+ x int /* comment */,
+) {}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct { a, b /* comment */, c int }
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+ _ = 0
+// The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+// The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+// The following is not a legal line directive (negative line number):
+//line foo:-3
+ _ = 3
+}
// Line comments with tabs
func _() {
diff --git a/src/pkg/go/printer/testdata/declarations.golden b/src/pkg/go/printer/testdata/declarations.golden
index 970533e8c..71ed32ed1 100644
--- a/src/pkg/go/printer/testdata/declarations.golden
+++ b/src/pkg/go/printer/testdata/declarations.golden
@@ -83,13 +83,13 @@ import (
// more import examples
import (
"xxx"
- "much longer name" // comment
- "short name" // comment
+ "much_longer_name" // comment
+ "short_name" // comment
)
import (
_ "xxx"
- "much longer name" // comment
+ "much_longer_name" // comment
)
import (
@@ -115,6 +115,18 @@ import _ "io"
var _ int
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+// T3 comment
+type T2 struct {
+} // should be a two-line struct
+
// printing of constant literals
const (
_ = "foobar"
@@ -286,6 +298,15 @@ type _ struct {
}
}
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{}
+type _ struct {
+}
+
+type _ interface{}
+type _ interface {
+}
+
// no tabs for single or ungrouped decls
func _() {
const xxxxxx = 0
@@ -545,6 +566,17 @@ var (
a4, b4, c4 int // this line should be indented
)
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
func _() {
var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
Headers: map[string]string{},
@@ -692,56 +724,137 @@ func _(x ...chan int)
// these parameter lists must remain multi-line since they are multi-line in the source
func _(bool,
-int) {
+ int) {
}
func _(x bool,
-y int) {
+ y int) {
}
func _(x,
-y bool) {
+ y bool) {
}
func _(bool, // comment
-int) {
+ int) {
}
func _(x bool, // comment
-y int) {
+ y int) {
}
func _(x, // comment
-y bool) {
+ y bool) {
}
func _(bool, // comment
-// comment
-int) {
+ // comment
+ int) {
}
func _(x bool, // comment
-// comment
-y int) {
+ // comment
+ y int) {
}
func _(x, // comment
-// comment
-y bool) {
+ // comment
+ y bool) {
}
func _(bool,
-// comment
-int) {
+ // comment
+ int) {
}
func _(x bool,
-// comment
-y int) {
+ // comment
+ y int) {
}
func _(x,
-// comment
-y bool) {
+ // comment
+ y bool) {
}
func _(x, // comment
-y, // comment
-z bool) {
+ y, // comment
+ z bool) {
}
func _(x, // comment
-y, // comment
-z bool) {
+ y, // comment
+ z bool) {
}
func _(x int, // comment
-y float, // comment
-z bool) {
+ y float, // comment
+ z bool) {
+}
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+ a, b, c int,
+) {
+}
+
+func MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+) {
+}
+
+func MultiLineSignature2(
+ a, b,
+ c int,
+) {
+}
+
+func MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int) {
+}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+ MultiLineSignature0(
+ a, b, c int,
+ )
+
+ MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+ )
+
+ MultiLineSignature2(
+ a, b,
+ c int,
+ )
+
+ MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int)
}
diff --git a/src/pkg/go/printer/testdata/declarations.input b/src/pkg/go/printer/testdata/declarations.input
index c6134096b..d74cff25d 100644
--- a/src/pkg/go/printer/testdata/declarations.input
+++ b/src/pkg/go/printer/testdata/declarations.input
@@ -84,13 +84,13 @@ import (
// more import examples
import (
"xxx"
- "much longer name" // comment
- "short name" // comment
+ "much_longer_name" // comment
+ "short_name" // comment
)
import (
_ "xxx"
- "much longer name" // comment
+ "much_longer_name" // comment
)
import (
@@ -115,6 +115,20 @@ import (
import _ "io"
var _ int
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+
+// T3 comment
+type T2 struct {
+
+
+} // should be a two-line struct
+
// printing of constant literals
const (
@@ -293,6 +307,18 @@ type _ struct {
}
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{ }
+type _ struct {
+
+}
+
+type _ interface{ }
+type _ interface {
+
+}
+
+
// no tabs for single or ungrouped decls
func _() {
const xxxxxx = 0
@@ -551,6 +577,16 @@ c3, d3 int // this line should be indented
a4, b4, c4 int // this line should be indented
)
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
func _() {
var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
@@ -755,3 +791,79 @@ func _(x int, // comment
y float, // comment
z bool) {
}
+
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+stat chan<- *TargetInfo,
+TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+a, b, c int,
+) {}
+
+func MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+) {}
+
+func MultiLineSignature2(
+a, b,
+c int,
+) {}
+
+func MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int) {}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+MultiLineSignature0(
+a, b, c int,
+)
+
+MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+)
+
+MultiLineSignature2(
+a, b,
+c int,
+)
+
+MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int)
+
+MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int)
+
+MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int)
+}
diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden
index d0cf24ad6..45fa4d97a 100644
--- a/src/pkg/go/printer/testdata/expressions.golden
+++ b/src/pkg/go/printer/testdata/expressions.golden
@@ -545,7 +545,7 @@ func _() {
// handle multiline argument list correctly
_ = new(T).
foo(
- 1).
+ 1).
foo(2)
_ = new(T).foo(
@@ -587,12 +587,12 @@ func _() {
_ = new(T).
Field.
Array[3+
- 4].
+ 4].
Table["foo"].
Blob.(*Type).
Slices[1:4].
Method(1, 2,
- 3).
+ 3).
Thingy
_ = a.b.c
@@ -625,3 +625,25 @@ func f() {
log.Fatal(err)
}
}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
diff --git a/src/pkg/go/printer/testdata/expressions.input b/src/pkg/go/printer/testdata/expressions.input
index d11314983..f545c6605 100644
--- a/src/pkg/go/printer/testdata/expressions.input
+++ b/src/pkg/go/printer/testdata/expressions.input
@@ -654,3 +654,25 @@ func f() {
log.Fatal(err)
}
}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw
index d7819a3ba..87a4b0083 100644
--- a/src/pkg/go/printer/testdata/expressions.raw
+++ b/src/pkg/go/printer/testdata/expressions.raw
@@ -545,7 +545,7 @@ func _() {
// handle multiline argument list correctly
_ = new(T).
foo(
- 1).
+ 1).
foo(2)
_ = new(T).foo(
@@ -587,12 +587,12 @@ func _() {
_ = new(T).
Field.
Array[3+
- 4].
+ 4].
Table["foo"].
Blob.(*Type).
Slices[1:4].
Method(1, 2,
- 3).
+ 3).
Thingy
_ = a.b.c
@@ -625,3 +625,25 @@ func f() {
log.Fatal(err)
}
}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
diff --git a/src/pkg/go/printer/testdata/linebreaks.golden b/src/pkg/go/printer/testdata/linebreaks.golden
index be780da67..006cf1718 100644
--- a/src/pkg/go/printer/testdata/linebreaks.golden
+++ b/src/pkg/go/printer/testdata/linebreaks.golden
@@ -220,4 +220,56 @@ testLoop:
}
}
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {
+}
+func _(x T,
+ y T,
+) {
+}
+func _(
+ x T,
+ y T) {
+}
+func _(
+ x T,
+ y T,
+) {
+}
+
+// Example from issue 2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
// There should be exactly one linebreak after this comment.
diff --git a/src/pkg/go/printer/testdata/linebreaks.input b/src/pkg/go/printer/testdata/linebreaks.input
index 457b491e6..e782bb044 100644
--- a/src/pkg/go/printer/testdata/linebreaks.input
+++ b/src/pkg/go/printer/testdata/linebreaks.input
@@ -220,4 +220,52 @@ testLoop:
}
}
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {}
+func _(x T,
+ y T,
+) {}
+func _(
+ x T,
+ y T) {}
+func _(
+ x T,
+ y T,
+) {}
+
+// Example from issue 2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
// There should be exactly one linebreak after this comment.
diff --git a/src/pkg/go/printer/testdata/parser.go b/src/pkg/go/printer/testdata/parser.go
index 2d27af499..dba8bbd43 100644
--- a/src/pkg/go/printer/testdata/parser.go
+++ b/src/pkg/go/printer/testdata/parser.go
@@ -6,7 +6,7 @@
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
-//
+
package parser
import (
@@ -52,7 +52,7 @@ type parser struct {
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
- // Ordinary identifer scopes
+ // Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
@@ -1999,7 +1999,7 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if par.NumFields() != 1 {
p.errorExpected(pos, "exactly one receiver")
// TODO determine a better range for BadExpr below
- par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{pos, pos}}}
+ par.List = []*ast.Field{{Type: &ast.BadExpr{pos, pos}}}
return par
}
@@ -2008,7 +2008,7 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
base := deref(recv.Type)
if _, isIdent := base.(*ast.Ident); !isIdent {
p.errorExpected(base.Pos(), "(unqualified) identifier")
- par.List = []*ast.Field{&ast.Field{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+ par.List = []*ast.Field{{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
}
return par
diff --git a/src/pkg/go/printer/testdata/statements.golden b/src/pkg/go/printer/testdata/statements.golden
index a6d85107f..4d70617bf 100644
--- a/src/pkg/go/printer/testdata/statements.golden
+++ b/src/pkg/go/printer/testdata/statements.golden
@@ -8,6 +8,125 @@ var expr bool
func use(x interface{}) {}
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T{
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
// Formatting of if-statement headers.
func _() {
if true {
@@ -271,7 +390,6 @@ func _() {
// Known bug: The first use call may have more than one empty line before
// (see go/printer/nodes.go, func linebreak).
-
use(x)
if x < x {
@@ -386,7 +504,6 @@ L: // A comment on the same line as the label, followed by a single empty line.
// Known bug: There may be more than one empty line before MoreCode()
// (see go/printer/nodes.go, func linebreak).
-
MoreCode()
}
diff --git a/src/pkg/go/printer/testdata/statements.input b/src/pkg/go/printer/testdata/statements.input
index 86a753c5a..bd03bc98b 100644
--- a/src/pkg/go/printer/testdata/statements.input
+++ b/src/pkg/go/printer/testdata/statements.input
@@ -8,6 +8,125 @@ var expr bool
func use(x interface{}) {}
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T {
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
// Formatting of if-statement headers.
func _() {
if true {}
diff --git a/src/pkg/go/scanner/Makefile b/src/pkg/go/scanner/Makefile
deleted file mode 100644
index 453faac00..000000000
--- a/src/pkg/go/scanner/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/scanner
-GOFILES=\
- errors.go\
- scanner.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/scanner/errors.go b/src/pkg/go/scanner/errors.go
index a0927e416..8a75a9650 100644
--- a/src/pkg/go/scanner/errors.go
+++ b/src/pkg/go/scanner/errors.go
@@ -8,48 +8,21 @@ import (
"fmt"
"go/token"
"io"
- "os"
"sort"
)
-// An implementation of an ErrorHandler may be provided to the Scanner.
-// If a syntax error is encountered and a handler was installed, Error
-// is called with a position and an error message. The position points
-// to the beginning of the offending token.
-//
-type ErrorHandler interface {
- Error(pos token.Position, msg string)
-}
-
-// ErrorVector implements the ErrorHandler interface. It maintains a list
-// of errors which can be retrieved with GetErrorList and GetError. The
-// zero value for an ErrorVector is an empty ErrorVector ready to use.
-//
-// A common usage pattern is to embed an ErrorVector alongside a
-// scanner in a data structure that uses the scanner. By passing a
-// reference to an ErrorVector to the scanner's Init call, default
-// error handling is obtained.
-//
-type ErrorVector struct {
- errors []*Error
-}
-
-// Reset resets an ErrorVector to no errors.
-func (h *ErrorVector) Reset() { h.errors = h.errors[:0] }
-
-// ErrorCount returns the number of errors collected.
-func (h *ErrorVector) ErrorCount() int { return len(h.errors) }
-
-// Within ErrorVector, an error is represented by an Error node. The
-// position Pos, if valid, points to the beginning of the offending
-// token, and the error condition is described by Msg.
+// In an ErrorList, an error is represented by an *Error.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
//
type Error struct {
Pos token.Position
Msg string
}
-func (e *Error) String() string {
+// Error implements the error interface.
+func (e Error) Error() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
// TODO(gri) reconsider the semantics of Position.IsValid
@@ -58,9 +31,19 @@ func (e *Error) String() string {
return e.Msg
}
-// An ErrorList is a (possibly sorted) list of Errors.
+// ErrorList is a list of *Errors.
+// The zero value for an ErrorList is an empty ErrorList ready to use.
+//
type ErrorList []*Error
+// Add adds an Error with given position and error message to an ErrorList.
+func (p *ErrorList) Add(pos token.Position, msg string) {
+ *p = append(*p, &Error{pos, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (p *ErrorList) Reset() { *p = (*p)[0:0] }
+
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
@@ -85,79 +68,54 @@ func (p ErrorList) Less(i, j int) bool {
return false
}
-func (p ErrorList) String() string {
- switch len(p) {
- case 0:
- return "unspecified error"
- case 1:
- return p[0].String()
- }
- return fmt.Sprintf("%s (and %d more errors)", p[0].String(), len(p)-1)
-}
-
-// These constants control the construction of the ErrorList
-// returned by GetErrors.
+// Sort sorts an ErrorList. *Error entries are sorted by position,
+// other errors are sorted by error message, and before any *Error
+// entry.
//
-const (
- Raw = iota // leave error list unchanged
- Sorted // sort error list by file, line, and column number
- NoMultiples // sort error list and leave only the first error per line
-)
-
-// GetErrorList returns the list of errors collected by an ErrorVector.
-// The construction of the ErrorList returned is controlled by the mode
-// parameter. If there are no errors, the result is nil.
-//
-func (h *ErrorVector) GetErrorList(mode int) ErrorList {
- if len(h.errors) == 0 {
- return nil
- }
-
- list := make(ErrorList, len(h.errors))
- copy(list, h.errors)
-
- if mode >= Sorted {
- sort.Sort(list)
- }
+func (p ErrorList) Sort() {
+ sort.Sort(p)
+}
- if mode >= NoMultiples {
- var last token.Position // initial last.Line is != any legal error line
- i := 0
- for _, e := range list {
- if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
- last = e.Pos
- list[i] = e
- i++
- }
+// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+func (p *ErrorList) RemoveMultiples() {
+ sort.Sort(p)
+ var last token.Position // initial last.Line is != any legal error line
+ i := 0
+ for _, e := range *p {
+ if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
+ last = e.Pos
+ (*p)[i] = e
+ i++
}
- list = list[0:i]
}
-
- return list
+ (*p) = (*p)[0:i]
}
-// GetError is like GetErrorList, but it returns an os.Error instead
-// so that a nil result can be assigned to an os.Error variable and
-// remains nil.
-//
-func (h *ErrorVector) GetError(mode int) os.Error {
- if len(h.errors) == 0 {
- return nil
+// An ErrorList implements the error interface.
+func (p ErrorList) Error() string {
+ switch len(p) {
+ case 0:
+ return "no errors"
+ case 1:
+ return p[0].Error()
}
-
- return h.GetErrorList(mode)
+ return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
}
-// ErrorVector implements the ErrorHandler interface.
-func (h *ErrorVector) Error(pos token.Position, msg string) {
- h.errors = append(h.errors, &Error{pos, msg})
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p ErrorList) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
}
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
-func PrintError(w io.Writer, err os.Error) {
+func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
diff --git a/src/pkg/go/scanner/example_test.go b/src/pkg/go/scanner/example_test.go
new file mode 100644
index 000000000..9004a4ad3
--- /dev/null
+++ b/src/pkg/go/scanner/example_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner_test
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+)
+
+func ExampleScanner_Scan() {
+ // src is the input that we want to tokenize.
+ src := []byte("cos(x) + 1i*sin(x) // Euler")
+
+ // Initialize the scanner.
+ var s scanner.Scanner
+ fset := token.NewFileSet() // positions are relative to fset
+ file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
+ s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
+
+ // Repeated calls to Scan yield the token sequence found in the input.
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
+ }
+
+ // output:
+ // 1:1 IDENT "cos"
+ // 1:4 ( ""
+ // 1:5 IDENT "x"
+ // 1:6 ) ""
+ // 1:8 + ""
+ // 1:10 IMAG "1i"
+ // 1:12 * ""
+ // 1:13 IDENT "sin"
+ // 1:16 ( ""
+ // 1:17 IDENT "x"
+ // 1:18 ) ""
+ // 1:20 ; "\n"
+ // 1:20 COMMENT "// Euler"
+}
diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go
index 7f3dd2373..da508747a 100644
--- a/src/pkg/go/scanner/scanner.go
+++ b/src/pkg/go/scanner/scanner.go
@@ -2,21 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package scanner implements a scanner for Go source text. Takes a []byte as
-// source which can then be tokenized through repeated calls to the Scan
-// function. Typical use:
-//
-// var s Scanner
-// fset := token.NewFileSet() // position information is relative to fset
-// file := fset.AddFile(filename, fset.Base(), len(src)) // register file
-// s.Init(file, src, nil /* no error handler */, 0)
-// for {
-// pos, tok, lit := s.Scan()
-// if tok == token.EOF {
-// break
-// }
-// // do something here with pos, tok, and lit
-// }
+// Package scanner implements a scanner for Go source text.
+// It takes a []byte as source which can then be tokenized
+// through repeated calls to the Scan method.
//
package scanner
@@ -27,9 +15,16 @@ import (
"path/filepath"
"strconv"
"unicode"
- "utf8"
+ "unicode/utf8"
)
+// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// encountered and a handler was installed, the handler is called with a
+// position and an error message. The position points to the beginning of
+// the offending token.
+//
+type ErrorHandler func(pos token.Position, msg string)
+
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
@@ -40,10 +35,10 @@ type Scanner struct {
dir string // directory portion of file.Name()
src []byte // source
err ErrorHandler // error reporting; or nil
- mode uint // scanning mode
+ mode Mode // scanning mode
// scanning state
- ch int // current character
+ ch rune // current character
offset int // character offset
rdOffset int // reading offset (position after current character)
lineOffset int // current line offset
@@ -53,94 +48,95 @@ type Scanner struct {
ErrorCount int // number of errors encountered
}
-// Read the next Unicode char into S.ch.
-// S.ch < 0 means end-of-file.
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
//
-func (S *Scanner) next() {
- if S.rdOffset < len(S.src) {
- S.offset = S.rdOffset
- if S.ch == '\n' {
- S.lineOffset = S.offset
- S.file.AddLine(S.offset)
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
}
- r, w := int(S.src[S.rdOffset]), 1
+ r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
- S.error(S.offset, "illegal character NUL")
+ s.error(s.offset, "illegal character NUL")
case r >= 0x80:
// not ASCII
- r, w = utf8.DecodeRune(S.src[S.rdOffset:])
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
- S.error(S.offset, "illegal UTF-8 encoding")
+ s.error(s.offset, "illegal UTF-8 encoding")
}
}
- S.rdOffset += w
- S.ch = r
+ s.rdOffset += w
+ s.ch = r
} else {
- S.offset = len(S.src)
- if S.ch == '\n' {
- S.lineOffset = S.offset
- S.file.AddLine(S.offset)
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
}
- S.ch = -1 // eof
+ s.ch = -1 // eof
}
}
-// The mode parameter to the Init function is a set of flags (or 0).
+// A mode value is set of flags (or 0).
// They control scanner behavior.
//
+type Mode uint
+
const (
- ScanComments = 1 << iota // return comments as COMMENT tokens
- AllowIllegalChars // do not report an error for illegal chars
- InsertSemis // automatically insert semicolons
+ ScanComments Mode = 1 << iota // return comments as COMMENT tokens
+ dontInsertSemis // do not automatically insert semicolons - for testing only
)
-// Init prepares the scanner S to tokenize the text src by setting the
+// Init prepares the scanner s to tokenize the text src by setting the
// scanner at the beginning of src. The scanner uses the file set file
// for position information and it adds line information for each line.
// It is ok to re-use the same file when re-scanning the same file as
// line information which is already present is ignored. Init causes a
// panic if the file size does not match the src size.
//
-// Calls to Scan will use the error handler err if they encounter a
+// Calls to Scan will invoke the error handler err if they encounter a
// syntax error and err is not nil. Also, for each error encountered,
// the Scanner field ErrorCount is incremented by one. The mode parameter
-// determines how comments, illegal characters, and semicolons are handled.
+// determines how comments are handled.
//
// Note that Init may call err if there is an error in the first character
// of the file.
//
-func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode uint) {
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
// Explicitly initialize all fields since a scanner may be reused.
if file.Size() != len(src) {
- panic("file size does not match src len")
- }
- S.file = file
- S.dir, _ = filepath.Split(file.Name())
- S.src = src
- S.err = err
- S.mode = mode
-
- S.ch = ' '
- S.offset = 0
- S.rdOffset = 0
- S.lineOffset = 0
- S.insertSemi = false
- S.ErrorCount = 0
-
- S.next()
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+ }
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.err = err
+ s.mode = mode
+
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.insertSemi = false
+ s.ErrorCount = 0
+
+ s.next()
}
-func (S *Scanner) error(offs int, msg string) {
- if S.err != nil {
- S.err.Error(S.file.Position(S.file.Pos(offs)), msg)
+func (s *Scanner) error(offs int, msg string) {
+ if s.err != nil {
+ s.err(s.file.Position(s.file.Pos(offs)), msg)
}
- S.ErrorCount++
+ s.ErrorCount++
}
var prefix = []byte("//line ")
-func (S *Scanner) interpretLineComment(text []byte) {
+func (s *Scanner) interpretLineComment(text []byte) {
if bytes.HasPrefix(text, prefix) {
// get filename and line number, if any
if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
@@ -149,303 +145,335 @@ func (S *Scanner) interpretLineComment(text []byte) {
filename := filepath.Clean(string(text[len(prefix):i]))
if !filepath.IsAbs(filename) {
// make filename relative to current directory
- filename = filepath.Join(S.dir, filename)
+ filename = filepath.Join(s.dir, filename)
}
// update scanner position
- S.file.AddLineInfo(S.lineOffset, filename, line-1) // -1 since comment applies to next line
+ s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line
}
}
}
}
-func (S *Scanner) scanComment() {
- // initial '/' already consumed; S.ch == '/' || S.ch == '*'
- offs := S.offset - 1 // position of initial '/'
+func (s *Scanner) scanComment() string {
+ // initial '/' already consumed; s.ch == '/' || s.ch == '*'
+ offs := s.offset - 1 // position of initial '/'
- if S.ch == '/' {
+ if s.ch == '/' {
//-style comment
- S.next()
- for S.ch != '\n' && S.ch >= 0 {
- S.next()
+ s.next()
+ for s.ch != '\n' && s.ch >= 0 {
+ s.next()
}
- if offs == S.lineOffset {
+ if offs == s.lineOffset {
// comment starts at the beginning of the current line
- S.interpretLineComment(S.src[offs:S.offset])
+ s.interpretLineComment(s.src[offs:s.offset])
}
- return
+ goto exit
}
/*-style comment */
- S.next()
- for S.ch >= 0 {
- ch := S.ch
- S.next()
- if ch == '*' && S.ch == '/' {
- S.next()
- return
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ goto exit
}
}
- S.error(offs, "comment not terminated")
+ s.error(offs, "comment not terminated")
+
+exit:
+ return string(s.src[offs:s.offset])
}
-func (S *Scanner) findLineEnd() bool {
+func (s *Scanner) findLineEnd() bool {
// initial '/' already consumed
defer func(offs int) {
// reset scanner state to where it was upon calling findLineEnd
- S.ch = '/'
- S.offset = offs
- S.rdOffset = offs + 1
- S.next() // consume initial '/' again
- }(S.offset - 1)
+ s.ch = '/'
+ s.offset = offs
+ s.rdOffset = offs + 1
+ s.next() // consume initial '/' again
+ }(s.offset - 1)
// read ahead until a newline, EOF, or non-comment token is found
- for S.ch == '/' || S.ch == '*' {
- if S.ch == '/' {
+ for s.ch == '/' || s.ch == '*' {
+ if s.ch == '/' {
//-style comment always contains a newline
return true
}
/*-style comment: look for newline */
- S.next()
- for S.ch >= 0 {
- ch := S.ch
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
if ch == '\n' {
return true
}
- S.next()
- if ch == '*' && S.ch == '/' {
- S.next()
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
break
}
}
- S.skipWhitespace() // S.insertSemi is set
- if S.ch < 0 || S.ch == '\n' {
+ s.skipWhitespace() // s.insertSemi is set
+ if s.ch < 0 || s.ch == '\n' {
return true
}
- if S.ch != '/' {
+ if s.ch != '/' {
// non-comment token
return false
}
- S.next() // consume '/'
+ s.next() // consume '/'
}
return false
}
-func isLetter(ch int) bool {
+func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
-func isDigit(ch int) bool {
+func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
-func (S *Scanner) scanIdentifier() token.Token {
- offs := S.offset
- for isLetter(S.ch) || isDigit(S.ch) {
- S.next()
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) {
+ s.next()
}
- return token.Lookup(S.src[offs:S.offset])
+ return string(s.src[offs:s.offset])
}
-func digitVal(ch int) int {
+func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
- return ch - '0'
+ return int(ch - '0')
case 'a' <= ch && ch <= 'f':
- return ch - 'a' + 10
+ return int(ch - 'a' + 10)
case 'A' <= ch && ch <= 'F':
- return ch - 'A' + 10
+ return int(ch - 'A' + 10)
}
return 16 // larger than any legal digit val
}
-func (S *Scanner) scanMantissa(base int) {
- for digitVal(S.ch) < base {
- S.next()
+func (s *Scanner) scanMantissa(base int) {
+ for digitVal(s.ch) < base {
+ s.next()
}
}
-func (S *Scanner) scanNumber(seenDecimalPoint bool) token.Token {
- // digitVal(S.ch) < 10
+func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) {
+ // digitVal(s.ch) < 10
+ offs := s.offset
tok := token.INT
if seenDecimalPoint {
+ offs--
tok = token.FLOAT
- S.scanMantissa(10)
+ s.scanMantissa(10)
goto exponent
}
- if S.ch == '0' {
+ if s.ch == '0' {
// int or float
- offs := S.offset
- S.next()
- if S.ch == 'x' || S.ch == 'X' {
+ offs := s.offset
+ s.next()
+ if s.ch == 'x' || s.ch == 'X' {
// hexadecimal int
- S.next()
- S.scanMantissa(16)
- if S.offset-offs <= 2 {
+ s.next()
+ s.scanMantissa(16)
+ if s.offset-offs <= 2 {
// only scanned "0x" or "0X"
- S.error(offs, "illegal hexadecimal number")
+ s.error(offs, "illegal hexadecimal number")
}
} else {
// octal int or float
seenDecimalDigit := false
- S.scanMantissa(8)
- if S.ch == '8' || S.ch == '9' {
+ s.scanMantissa(8)
+ if s.ch == '8' || s.ch == '9' {
// illegal octal int or float
seenDecimalDigit = true
- S.scanMantissa(10)
+ s.scanMantissa(10)
}
- if S.ch == '.' || S.ch == 'e' || S.ch == 'E' || S.ch == 'i' {
+ if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
goto fraction
}
// octal int
if seenDecimalDigit {
- S.error(offs, "illegal octal number")
+ s.error(offs, "illegal octal number")
}
}
goto exit
}
// decimal int or float
- S.scanMantissa(10)
+ s.scanMantissa(10)
fraction:
- if S.ch == '.' {
+ if s.ch == '.' {
tok = token.FLOAT
- S.next()
- S.scanMantissa(10)
+ s.next()
+ s.scanMantissa(10)
}
exponent:
- if S.ch == 'e' || S.ch == 'E' {
+ if s.ch == 'e' || s.ch == 'E' {
tok = token.FLOAT
- S.next()
- if S.ch == '-' || S.ch == '+' {
- S.next()
+ s.next()
+ if s.ch == '-' || s.ch == '+' {
+ s.next()
}
- S.scanMantissa(10)
+ s.scanMantissa(10)
}
- if S.ch == 'i' {
+ if s.ch == 'i' {
tok = token.IMAG
- S.next()
+ s.next()
}
exit:
- return tok
+ return tok, string(s.src[offs:s.offset])
}
-func (S *Scanner) scanEscape(quote int) {
- offs := S.offset
+func (s *Scanner) scanEscape(quote rune) {
+ offs := s.offset
var i, base, max uint32
- switch S.ch {
+ switch s.ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
- S.next()
+ s.next()
return
case '0', '1', '2', '3', '4', '5', '6', '7':
i, base, max = 3, 8, 255
case 'x':
- S.next()
+ s.next()
i, base, max = 2, 16, 255
case 'u':
- S.next()
+ s.next()
i, base, max = 4, 16, unicode.MaxRune
case 'U':
- S.next()
+ s.next()
i, base, max = 8, 16, unicode.MaxRune
default:
- S.next() // always make progress
- S.error(offs, "unknown escape sequence")
+ s.next() // always make progress
+ s.error(offs, "unknown escape sequence")
return
}
var x uint32
- for ; i > 0 && S.ch != quote && S.ch >= 0; i-- {
- d := uint32(digitVal(S.ch))
+ for ; i > 0 && s.ch != quote && s.ch >= 0; i-- {
+ d := uint32(digitVal(s.ch))
if d >= base {
- S.error(S.offset, "illegal character in escape sequence")
+ s.error(s.offset, "illegal character in escape sequence")
break
}
x = x*base + d
- S.next()
+ s.next()
}
// in case of an error, consume remaining chars
- for ; i > 0 && S.ch != quote && S.ch >= 0; i-- {
- S.next()
+ for ; i > 0 && s.ch != quote && s.ch >= 0; i-- {
+ s.next()
}
if x > max || 0xd800 <= x && x < 0xe000 {
- S.error(offs, "escape sequence is invalid Unicode code point")
+ s.error(offs, "escape sequence is invalid Unicode code point")
}
}
-func (S *Scanner) scanChar() {
+func (s *Scanner) scanChar() string {
// '\'' opening already consumed
- offs := S.offset - 1
+ offs := s.offset - 1
n := 0
- for S.ch != '\'' {
- ch := S.ch
+ for s.ch != '\'' {
+ ch := s.ch
n++
- S.next()
+ s.next()
if ch == '\n' || ch < 0 {
- S.error(offs, "character literal not terminated")
+ s.error(offs, "character literal not terminated")
n = 1
break
}
if ch == '\\' {
- S.scanEscape('\'')
+ s.scanEscape('\'')
}
}
- S.next()
+ s.next()
if n != 1 {
- S.error(offs, "illegal character literal")
+ s.error(offs, "illegal character literal")
}
+
+ return string(s.src[offs:s.offset])
}
-func (S *Scanner) scanString() {
+func (s *Scanner) scanString() string {
// '"' opening already consumed
- offs := S.offset - 1
+ offs := s.offset - 1
- for S.ch != '"' {
- ch := S.ch
- S.next()
+ for s.ch != '"' {
+ ch := s.ch
+ s.next()
if ch == '\n' || ch < 0 {
- S.error(offs, "string not terminated")
+ s.error(offs, "string not terminated")
break
}
if ch == '\\' {
- S.scanEscape('"')
+ s.scanEscape('"')
}
}
- S.next()
+ s.next()
+
+ return string(s.src[offs:s.offset])
}
-func (S *Scanner) scanRawString() {
- // '`' opening already consumed
- offs := S.offset - 1
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
- for S.ch != '`' {
- ch := S.ch
- S.next()
+func (s *Scanner) scanRawString() string {
+ // '`' opening already consumed
+ offs := s.offset - 1
+
+ hasCR := false
+ for s.ch != '`' {
+ ch := s.ch
+ s.next()
+ if ch == '\r' {
+ hasCR = true
+ }
if ch < 0 {
- S.error(offs, "string not terminated")
+ s.error(offs, "string not terminated")
break
}
}
- S.next()
+ s.next()
+
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
}
-func (S *Scanner) skipWhitespace() {
- for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' && !S.insertSemi || S.ch == '\r' {
- S.next()
+func (s *Scanner) skipWhitespace() {
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' {
+ s.next()
}
}
@@ -455,35 +483,35 @@ func (S *Scanner) skipWhitespace() {
// respectively. Otherwise, the result is tok0 if there was no other
// matching character, or tok2 if the matching character was ch2.
-func (S *Scanner) switch2(tok0, tok1 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
return tok0
}
-func (S *Scanner) switch3(tok0, tok1 token.Token, ch2 int, tok2 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
- if S.ch == ch2 {
- S.next()
+ if s.ch == ch2 {
+ s.next()
return tok2
}
return tok0
}
-func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Token) token.Token {
- if S.ch == '=' {
- S.next()
+func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
return tok1
}
- if S.ch == ch2 {
- S.next()
- if S.ch == '=' {
- S.next()
+ if s.ch == ch2 {
+ s.next()
+ if s.ch == '=' {
+ s.next()
return tok3
}
return tok2
@@ -491,15 +519,24 @@ func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Toke
return tok0
}
-// Scan scans the next token and returns the token position,
-// the token, and the literal string corresponding to the
-// token. The source end is indicated by token.EOF.
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// token.EOF.
+//
+// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT,
+// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
+// has the corresponding value.
//
// If the returned token is token.SEMICOLON, the corresponding
// literal string is ";" if the semicolon was present in the source,
// and "\n" if the semicolon was inserted because of a newline or
// at EOF.
//
+// If the returned token is token.ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
// For more tolerant parsing, Scan will return a valid token if
// possible even if a syntax error was encountered. Thus, even
// if the resulting token sequence contains no illegal tokens,
@@ -511,63 +548,63 @@ func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Toke
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
-func (S *Scanner) Scan() (token.Pos, token.Token, string) {
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
scanAgain:
- S.skipWhitespace()
+ s.skipWhitespace()
// current token start
- insertSemi := false
- offs := S.offset
- tok := token.ILLEGAL
+ pos = s.file.Pos(s.offset)
// determine token value
- switch ch := S.ch; {
+ insertSemi := false
+ switch ch := s.ch; {
case isLetter(ch):
- tok = S.scanIdentifier()
+ lit = s.scanIdentifier()
+ tok = token.Lookup(lit)
switch tok {
case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
insertSemi = true
}
case digitVal(ch) < 10:
insertSemi = true
- tok = S.scanNumber(false)
+ tok, lit = s.scanNumber(false)
default:
- S.next() // always make progress
+ s.next() // always make progress
switch ch {
case -1:
- if S.insertSemi {
- S.insertSemi = false // EOF consumed
- return S.file.Pos(offs), token.SEMICOLON, "\n"
+ if s.insertSemi {
+ s.insertSemi = false // EOF consumed
+ return pos, token.SEMICOLON, "\n"
}
tok = token.EOF
case '\n':
- // we only reach here if S.insertSemi was
+ // we only reach here if s.insertSemi was
// set in the first place and exited early
- // from S.skipWhitespace()
- S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, "\n"
+ // from s.skipWhitespace()
+ s.insertSemi = false // newline consumed
+ return pos, token.SEMICOLON, "\n"
case '"':
insertSemi = true
tok = token.STRING
- S.scanString()
+ lit = s.scanString()
case '\'':
insertSemi = true
tok = token.CHAR
- S.scanChar()
+ lit = s.scanChar()
case '`':
insertSemi = true
tok = token.STRING
- S.scanRawString()
+ lit = s.scanRawString()
case ':':
- tok = S.switch2(token.COLON, token.DEFINE)
+ tok = s.switch2(token.COLON, token.DEFINE)
case '.':
- if digitVal(S.ch) < 10 {
+ if digitVal(s.ch) < 10 {
insertSemi = true
- tok = S.scanNumber(true)
- } else if S.ch == '.' {
- S.next()
- if S.ch == '.' {
- S.next()
+ tok, lit = s.scanNumber(true)
+ } else if s.ch == '.' {
+ s.next()
+ if s.ch == '.' {
+ s.next()
tok = token.ELLIPSIS
}
} else {
@@ -577,6 +614,7 @@ scanAgain:
tok = token.COMMA
case ';':
tok = token.SEMICOLON
+ lit = ";"
case '(':
tok = token.LPAREN
case ')':
@@ -593,78 +631,74 @@ scanAgain:
insertSemi = true
tok = token.RBRACE
case '+':
- tok = S.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
+ tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
if tok == token.INC {
insertSemi = true
}
case '-':
- tok = S.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
+ tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
if tok == token.DEC {
insertSemi = true
}
case '*':
- tok = S.switch2(token.MUL, token.MUL_ASSIGN)
+ tok = s.switch2(token.MUL, token.MUL_ASSIGN)
case '/':
- if S.ch == '/' || S.ch == '*' {
+ if s.ch == '/' || s.ch == '*' {
// comment
- if S.insertSemi && S.findLineEnd() {
+ if s.insertSemi && s.findLineEnd() {
// reset position to the beginning of the comment
- S.ch = '/'
- S.offset = offs
- S.rdOffset = offs + 1
- S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, "\n"
+ s.ch = '/'
+ s.offset = s.file.Offset(pos)
+ s.rdOffset = s.offset + 1
+ s.insertSemi = false // newline consumed
+ return pos, token.SEMICOLON, "\n"
}
- S.scanComment()
- if S.mode&ScanComments == 0 {
+ lit = s.scanComment()
+ if s.mode&ScanComments == 0 {
// skip comment
- S.insertSemi = false // newline consumed
+ s.insertSemi = false // newline consumed
goto scanAgain
}
tok = token.COMMENT
} else {
- tok = S.switch2(token.QUO, token.QUO_ASSIGN)
+ tok = s.switch2(token.QUO, token.QUO_ASSIGN)
}
case '%':
- tok = S.switch2(token.REM, token.REM_ASSIGN)
+ tok = s.switch2(token.REM, token.REM_ASSIGN)
case '^':
- tok = S.switch2(token.XOR, token.XOR_ASSIGN)
+ tok = s.switch2(token.XOR, token.XOR_ASSIGN)
case '<':
- if S.ch == '-' {
- S.next()
+ if s.ch == '-' {
+ s.next()
tok = token.ARROW
} else {
- tok = S.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
+ tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
}
case '>':
- tok = S.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
+ tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
case '=':
- tok = S.switch2(token.ASSIGN, token.EQL)
+ tok = s.switch2(token.ASSIGN, token.EQL)
case '!':
- tok = S.switch2(token.NOT, token.NEQ)
+ tok = s.switch2(token.NOT, token.NEQ)
case '&':
- if S.ch == '^' {
- S.next()
- tok = S.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
+ if s.ch == '^' {
+ s.next()
+ tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
} else {
- tok = S.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
+ tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
}
case '|':
- tok = S.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+ tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
default:
- if S.mode&AllowIllegalChars == 0 {
- S.error(offs, fmt.Sprintf("illegal character %#U", ch))
- }
- insertSemi = S.insertSemi // preserve insertSemi info
+ s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
+ insertSemi = s.insertSemi // preserve insertSemi info
+ tok = token.ILLEGAL
+ lit = string(ch)
}
}
-
- if S.mode&InsertSemis != 0 {
- S.insertSemi = insertSemi
+ if s.mode&dontInsertSemis == 0 {
+ s.insertSemi = insertSemi
}
- // TODO(gri): The scanner API should change such that the literal string
- // is only valid if an actual literal was scanned. This will
- // permit a more efficient implementation.
- return S.file.Pos(offs), tok, string(S.src[offs:S.offset])
+ return
}
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index eb9e1cb81..06223e23b 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -83,6 +83,8 @@ var tokens = [...]elt{
"`",
literal,
},
+ {token.STRING, "`\r`", literal},
+ {token.STRING, "`foo\r\nbar`", literal},
// Operators and delimiters
{token.ADD, "+", operator},
@@ -175,13 +177,14 @@ var tokens = [...]elt{
const whitespace = " \t \n\n\n" // to separate tokens
-type testErrorHandler struct {
- t *testing.T
-}
-
-func (h *testErrorHandler) Error(pos token.Position, msg string) {
- h.t.Errorf("Error() called (msg = %s)", msg)
-}
+var source = func() []byte {
+ var src []byte
+ for _, t := range tokens {
+ src = append(src, t.lit...)
+ src = append(src, whitespace...)
+ }
+ return src
+}()
func newlineCount(s string) int {
n := 0
@@ -212,20 +215,31 @@ func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
- var src string
- for _, e := range tokens {
- src += e.lit + whitespace
- }
- src_linecount := newlineCount(src)
+ src_linecount := newlineCount(string(source))
whitespace_linecount := newlineCount(whitespace)
+ // error handler
+ eh := func(_ token.Position, msg string) {
+ t.Errorf("error handler called (msg = %s)", msg)
+ }
+
// verify scan
var s Scanner
- s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorHandler{t}, ScanComments)
+ s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
index := 0
- epos := token.Position{"", 0, 1, 1} // expected position
+ // epos is the expected position
+ epos := token.Position{
+ Filename: "",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ }
for {
pos, tok, lit := s.Scan()
+ if lit == "" {
+ // no literal value for non-literal tokens
+ lit = tok.String()
+ }
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
@@ -237,10 +251,18 @@ func TestScan(t *testing.T) {
}
checkPos(t, lit, pos, epos)
if tok != e.tok {
- t.Errorf("bad token for %q: got %s, expected %s", lit, tok.String(), e.tok.String())
+ t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
}
- if e.tok.IsLiteral() && lit != e.lit {
- t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
+ if e.tok.IsLiteral() {
+ // no CRs in raw string literals
+ elit := e.lit
+ if elit[0] == '`' {
+ elit = string(stripCR([]byte(elit)))
+ epos.Offset += len(e.lit) - len(lit) // correct position
+ }
+ if lit != elit {
+ t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
+ }
}
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
@@ -262,7 +284,7 @@ func TestScan(t *testing.T) {
}
}
-func checkSemi(t *testing.T, line string, mode uint) {
+func checkSemi(t *testing.T, line string, mode Mode) {
var S Scanner
file := fset.AddFile("TestSemis", fset.Base(), len(line))
S.Init(file, []byte(line), nil, mode)
@@ -286,7 +308,7 @@ func checkSemi(t *testing.T, line string, mode uint) {
}
checkPos(t, line, pos, semiPos)
} else {
- t.Errorf("bad token for %q: got %s, expected ;", line, tok.String())
+ t.Errorf("bad token for %q: got %s, expected ;", line, tok)
}
} else if tok == token.SEMICOLON {
t.Errorf("bad token for %q: got ;, expected no ;", line)
@@ -420,14 +442,14 @@ var lines = []string{
func TestSemis(t *testing.T) {
for _, line := range lines {
- checkSemi(t, line, AllowIllegalChars|InsertSemis)
- checkSemi(t, line, AllowIllegalChars|InsertSemis|ScanComments)
+ checkSemi(t, line, 0)
+ checkSemi(t, line, ScanComments)
// if the input ended in newlines, the input must tokenize the
// same with or without those newlines
for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
- checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis)
- checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis|ScanComments)
+ checkSemi(t, line[0:i], 0)
+ checkSemi(t, line[0:i], ScanComments)
}
}
}
@@ -482,11 +504,16 @@ func TestLineComments(t *testing.T) {
// verify scan
var S Scanner
file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
- S.Init(file, []byte(src), nil, 0)
+ S.Init(file, []byte(src), nil, dontInsertSemis)
for _, s := range segs {
p, _, lit := S.Scan()
pos := file.Position(p)
- checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
+ checkPos(t, lit, p, token.Position{
+ Filename: s.filename,
+ Offset: pos.Offset,
+ Line: s.line,
+ Column: pos.Column,
+ })
}
if S.ErrorCount != 0 {
@@ -501,7 +528,7 @@ func TestInit(t *testing.T) {
// 1st init
src1 := "if true { }"
f1 := fset.AddFile("src1", fset.Base(), len(src1))
- s.Init(f1, []byte(src1), nil, 0)
+ s.Init(f1, []byte(src1), nil, dontInsertSemis)
if f1.Size() != len(src1) {
t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
}
@@ -509,40 +536,19 @@ func TestInit(t *testing.T) {
s.Scan() // true
_, tok, _ := s.Scan() // {
if tok != token.LBRACE {
- t.Errorf("bad token: got %s, expected %s", tok.String(), token.LBRACE)
+ t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
}
// 2nd init
src2 := "go true { ]"
f2 := fset.AddFile("src2", fset.Base(), len(src2))
- s.Init(f2, []byte(src2), nil, 0)
+ s.Init(f2, []byte(src2), nil, dontInsertSemis)
if f2.Size() != len(src2) {
t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
}
_, tok, _ = s.Scan() // go
if tok != token.GO {
- t.Errorf("bad token: got %s, expected %s", tok.String(), token.GO)
- }
-
- if s.ErrorCount != 0 {
- t.Errorf("found %d errors", s.ErrorCount)
- }
-}
-
-func TestIllegalChars(t *testing.T) {
- var s Scanner
-
- const src = "*?*$*@*"
- file := fset.AddFile("", fset.Base(), len(src))
- s.Init(file, []byte(src), &testErrorHandler{t}, AllowIllegalChars)
- for offs, ch := range src {
- pos, tok, lit := s.Scan()
- if poffs := file.Offset(pos); poffs != offs {
- t.Errorf("bad position for %s: got %d, expected %d", lit, poffs, offs)
- }
- if tok == token.ILLEGAL && lit != string(ch) {
- t.Errorf("bad token: got %s, expected %s", lit, string(ch))
- }
+ t.Errorf("bad token: got %s, expected %s", tok, token.GO)
}
if s.ErrorCount != 0 {
@@ -560,36 +566,37 @@ func TestStdErrorHander(t *testing.T) {
"//line File1:1\n" +
"@ @ @" // original file, line 1 again
- v := new(ErrorVector)
+ var list ErrorList
+ eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
+
var s Scanner
- s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), v, 0)
+ s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis)
for {
if _, tok, _ := s.Scan(); tok == token.EOF {
break
}
}
- list := v.GetErrorList(Raw)
+ if len(list) != s.ErrorCount {
+ t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
+ }
+
if len(list) != 9 {
t.Errorf("found %d raw errors, expected 9", len(list))
PrintError(os.Stderr, list)
}
- list = v.GetErrorList(Sorted)
+ list.Sort()
if len(list) != 9 {
t.Errorf("found %d sorted errors, expected 9", len(list))
PrintError(os.Stderr, list)
}
- list = v.GetErrorList(NoMultiples)
+ list.RemoveMultiples()
if len(list) != 4 {
t.Errorf("found %d one-per-line errors, expected 4", len(list))
PrintError(os.Stderr, list)
}
-
- if v.ErrorCount() != s.ErrorCount {
- t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.ErrorCount)
- }
}
type errorCollector struct {
@@ -598,16 +605,15 @@ type errorCollector struct {
pos token.Position // last error position encountered
}
-func (h *errorCollector) Error(pos token.Position, msg string) {
- h.cnt++
- h.msg = msg
- h.pos = pos
-}
-
func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
var s Scanner
var h errorCollector
- s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &h, ScanComments)
+ eh := func(pos token.Position, msg string) {
+ h.cnt++
+ h.msg = msg
+ h.pos = pos
+ }
+ s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis)
_, tok0, _ := s.Scan()
_, tok1, _ := s.Scan()
if tok0 != tok {
@@ -670,3 +676,20 @@ func TestScanErrors(t *testing.T) {
checkError(t, e.src, e.tok, e.pos, e.err)
}
}
+
+func BenchmarkScan(b *testing.B) {
+ b.StopTimer()
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(source))
+ var s Scanner
+ b.StartTimer()
+ for i := b.N - 1; i >= 0; i-- {
+ s.Init(file, source, nil, ScanComments)
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ }
+}
diff --git a/src/pkg/go/token/Makefile b/src/pkg/go/token/Makefile
deleted file mode 100644
index 4a4e64dc8..000000000
--- a/src/pkg/go/token/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/token
-GOFILES=\
- position.go\
- token.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go
index c559e19f8..647d1b770 100644
--- a/src/pkg/go/token/position.go
+++ b/src/pkg/go/token/position.go
@@ -12,6 +12,9 @@ import (
"sync"
)
+// -----------------------------------------------------------------------------
+// Positions
+
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
@@ -81,82 +84,8 @@ func (p Pos) IsValid() bool {
return p != NoPos
}
-func searchFiles(a []*File, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
-}
-
-func (s *FileSet) file(p Pos) *File {
- if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
- return f
- }
- if i := searchFiles(s.files, int(p)); i >= 0 {
- f := s.files[i]
- // f.base <= int(p) by definition of searchFiles
- if int(p) <= f.base+f.size {
- s.last = f
- return f
- }
- }
- return nil
-}
-
-// File returns the file which contains the position p.
-// If no such file is found (for instance for p == NoPos),
-// the result is nil.
-//
-func (s *FileSet) File(p Pos) (f *File) {
- if p != NoPos {
- s.mutex.RLock()
- f = s.file(p)
- s.mutex.RUnlock()
- }
- return
-}
-
-func (f *File) position(p Pos) (pos Position) {
- offset := int(p) - f.base
- pos.Offset = offset
- pos.Filename, pos.Line, pos.Column = f.info(offset)
- return
-}
-
-// Position converts a Pos in the fileset into a general Position.
-func (s *FileSet) Position(p Pos) (pos Position) {
- if p != NoPos {
- // TODO(gri) consider optimizing the case where p
- // is in the last file added, or perhaps
- // looked at - will eliminate one level
- // of search
- s.mutex.RLock()
- if f := s.file(p); f != nil {
- pos = f.position(p)
- }
- s.mutex.RUnlock()
- }
- return
-}
-
-type lineInfo struct {
- offset int
- filename string
- line int
-}
-
-// AddLineInfo adds alternative file and line number information for
-// a given file offset. The offset must be larger than the offset for
-// the previously added alternative line info and smaller than the
-// file size; otherwise the information is ignored.
-//
-// AddLineInfo is typically used to register alternative position
-// information for //line filename:line comments in source files.
-//
-func (f *File) AddLineInfo(offset int, filename string, line int) {
- f.set.mutex.Lock()
- if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset < f.size {
- f.infos = append(f.infos, lineInfo{offset, filename, line})
- }
- f.set.mutex.Unlock()
-}
+// -----------------------------------------------------------------------------
+// File
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
@@ -251,6 +180,32 @@ func (f *File) SetLinesForContent(content []byte) {
f.set.mutex.Unlock()
}
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ f.set.mutex.Lock()
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.set.mutex.Unlock()
+}
+
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
@@ -281,43 +236,8 @@ func (f *File) Line(p Pos) int {
return f.Position(p).Line
}
-// Position returns the Position value for the given file position p;
-// p must be a Pos value in that file or NoPos.
-//
-func (f *File) Position(p Pos) (pos Position) {
- if p != NoPos {
- if int(p) < f.base || int(p) > f.base+f.size {
- panic("illegal Pos value")
- }
- pos = f.position(p)
- }
- return
-}
-
-func searchInts(a []int, x int) int {
- // This function body is a manually inlined version of:
- //
- // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
- //
- // With better compiler optimizations, this may not be needed in the
- // future, but at the moment this change improves the go/printer
- // benchmark performance by ~30%. This has a direct impact on the
- // speed of gofmt and thus seems worthwhile (2011-04-29).
- i, j := 0, len(a)
- for i < j {
- h := i + (j-i)/2 // avoid overflow when computing h
- // i ≤ h < j
- if a[h] <= x {
- i = h + 1
- } else {
- j = h
- }
- }
- return i - 1
-}
-
func searchLineInfos(a []lineInfo, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].offset > x }) - 1
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
@@ -330,15 +250,38 @@ func (f *File) info(offset int) (filename string, line, column int) {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
- filename = alt.filename
- if i := searchInts(f.lines, alt.offset); i >= 0 {
- line += alt.line - i - 1
+ filename = alt.Filename
+ if i := searchInts(f.lines, alt.Offset); i >= 0 {
+ line += alt.Line - i - 1
}
}
}
return
}
+func (f *File) position(p Pos) (pos Position) {
+ offset := int(p) - f.base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.info(offset)
+ return
+}
+
+// Position returns the Position value for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p)
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
@@ -402,23 +345,91 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
return f
}
-// Files returns the files added to the file set.
-func (s *FileSet) Files() <-chan *File {
- ch := make(chan *File)
- go func() {
- for i := 0; ; i++ {
- var f *File
- s.mutex.RLock()
- if i < len(s.files) {
- f = s.files[i]
- }
- s.mutex.RUnlock()
- if f == nil {
- break
- }
- ch <- f
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+//
+func (s *FileSet) Iterate(f func(*File) bool) {
+ for i := 0; ; i++ {
+ var file *File
+ s.mutex.RLock()
+ if i < len(s.files) {
+ file = s.files[i]
}
- close(ch)
- }()
- return ch
+ s.mutex.RUnlock()
+ if file == nil || !f(file) {
+ break
+ }
+ }
+}
+
+func searchFiles(a []*File, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+ // common case: p is in last file
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ return f
+ }
+ // p is not in last file - search all files
+ if i := searchFiles(s.files, int(p)); i >= 0 {
+ f := s.files[i]
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.base+f.size {
+ s.last = f
+ return f
+ }
+ }
+ return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+//
+func (s *FileSet) File(p Pos) (f *File) {
+ if p != NoPos {
+ s.mutex.RLock()
+ f = s.file(p)
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// Position converts a Pos in the fileset into a general Position.
+func (s *FileSet) Position(p Pos) (pos Position) {
+ if p != NoPos {
+ s.mutex.RLock()
+ if f := s.file(p); f != nil {
+ pos = f.position(p)
+ }
+ s.mutex.RUnlock()
+ }
+ return
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
}
diff --git a/src/pkg/go/token/position_test.go b/src/pkg/go/token/position_test.go
index 30bec5991..160107df4 100644
--- a/src/pkg/go/token/position_test.go
+++ b/src/pkg/go/token/position_test.go
@@ -167,12 +167,13 @@ func TestFiles(t *testing.T) {
for i, test := range tests {
fset.AddFile(test.filename, fset.Base(), test.size)
j := 0
- for g := range fset.Files() {
- if g.Name() != tests[j].filename {
- t.Errorf("expected filename = %s; got %s", tests[j].filename, g.Name())
+ fset.Iterate(func(f *File) bool {
+ if f.Name() != tests[j].filename {
+ t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
}
j++
- }
+ return true
+ })
if j != i+1 {
t.Errorf("expected %d files; got %d", i+1, j)
}
diff --git a/src/pkg/go/token/serialize.go b/src/pkg/go/token/serialize.go
new file mode 100644
index 000000000..4adc8f9e3
--- /dev/null
+++ b/src/pkg/go/token/serialize.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+type serializedFile struct {
+ // fields correspond 1:1 to fields with same (lower-case) name in File
+ Name string
+ Base int
+ Size int
+ Lines []int
+ Infos []lineInfo
+}
+
+type serializedFileSet struct {
+ Base int
+ Files []serializedFile
+}
+
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
+ var ss serializedFileSet
+ if err := decode(&ss); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.base = ss.Base
+ files := make([]*File, len(ss.Files))
+ for i := 0; i < len(ss.Files); i++ {
+ f := &ss.Files[i]
+ files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
+ }
+ s.files = files
+ s.last = nil
+ s.mutex.Unlock()
+
+ return nil
+}
+
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
+ var ss serializedFileSet
+
+ s.mutex.Lock()
+ ss.Base = s.base
+ files := make([]serializedFile, len(s.files))
+ for i, f := range s.files {
+ files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
+ }
+ ss.Files = files
+ s.mutex.Unlock()
+
+ return encode(ss)
+}
diff --git a/src/pkg/go/token/serialize_test.go b/src/pkg/go/token/serialize_test.go
new file mode 100644
index 000000000..4e925adb6
--- /dev/null
+++ b/src/pkg/go/token/serialize_test.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "testing"
+)
+
+// equal returns nil if p and q describe the same file set;
+// otherwise it returns an error describing the discrepancy.
+func equal(p, q *FileSet) error {
+ if p == q {
+ // avoid deadlock if p == q
+ return nil
+ }
+
+ // not strictly needed for the test
+ p.mutex.Lock()
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ defer p.mutex.Unlock()
+
+ if p.base != q.base {
+ return fmt.Errorf("different bases: %d != %d", p.base, q.base)
+ }
+
+ if len(p.files) != len(q.files) {
+ return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
+ }
+
+ for i, f := range p.files {
+ g := q.files[i]
+ if f.set != p {
+ return fmt.Errorf("wrong fileset for %q", f.name)
+ }
+ if g.set != q {
+ return fmt.Errorf("wrong fileset for %q", g.name)
+ }
+ if f.name != g.name {
+ return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
+ }
+ if f.base != g.base {
+ return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
+ }
+ if f.size != g.size {
+ return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
+ }
+ for j, l := range f.lines {
+ m := g.lines[j]
+ if l != m {
+ return fmt.Errorf("different offsets for %q", f.name)
+ }
+ }
+ for j, l := range f.infos {
+ m := g.infos[j]
+ if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
+ return fmt.Errorf("different infos for %q", f.name)
+ }
+ }
+ }
+
+ // we don't care about .last - it's just a cache
+ return nil
+}
+
+func checkSerialize(t *testing.T, p *FileSet) {
+ var buf bytes.Buffer
+ encode := func(x interface{}) error {
+ return gob.NewEncoder(&buf).Encode(x)
+ }
+ if err := p.Write(encode); err != nil {
+ t.Errorf("writing fileset failed: %s", err)
+ return
+ }
+ q := NewFileSet()
+ decode := func(x interface{}) error {
+ return gob.NewDecoder(&buf).Decode(x)
+ }
+ if err := q.Read(decode); err != nil {
+ t.Errorf("reading fileset failed: %s", err)
+ return
+ }
+ if err := equal(p, q); err != nil {
+ t.Errorf("filesets not identical: %s", err)
+ }
+}
+
+func TestSerialization(t *testing.T) {
+ p := NewFileSet()
+ checkSerialize(t, p)
+ // add some files
+ for i := 0; i < 10; i++ {
+ f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
+ checkSerialize(t, p)
+ // add some lines and alternative file infos
+ line := 1000
+ for offs := 0; offs < f.Size(); offs += 40 + i {
+ f.AddLine(offs)
+ if offs%7 == 0 {
+ f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
+ line += 33
+ }
+ }
+ checkSerialize(t, p)
+ }
+}
diff --git a/src/pkg/go/token/token.go b/src/pkg/go/token/token.go
index 557374052..84b6314d5 100644
--- a/src/pkg/go/token/token.go
+++ b/src/pkg/go/token/token.go
@@ -283,10 +283,8 @@ func init() {
// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
//
-func Lookup(ident []byte) Token {
- // TODO Maps with []byte key are illegal because []byte does not
- // support == . Should find a more efficient solution eventually.
- if tok, is_keyword := keywords[string(ident)]; is_keyword {
+func Lookup(ident string) Token {
+ if tok, is_keyword := keywords[ident]; is_keyword {
return tok
}
return IDENT
@@ -295,16 +293,16 @@ func Lookup(ident []byte) Token {
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
-// and basic type literals; returns false otherwise.
+// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
-// delimiters; returns false otherwise.
+// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
// IsKeyword returns true for tokens corresponding to keywords;
-// returns false otherwise.
+// it returns false otherwise.
//
func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end }
diff --git a/src/pkg/go/typechecker/Makefile b/src/pkg/go/typechecker/Makefile
deleted file mode 100644
index 83af3ef4e..000000000
--- a/src/pkg/go/typechecker/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2010 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/typechecker
-GOFILES=\
- scope.go\
- type.go\
- typechecker.go\
- universe.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/typechecker/scope.go b/src/pkg/go/typechecker/scope.go
deleted file mode 100644
index d73d1a450..000000000
--- a/src/pkg/go/typechecker/scope.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// DEPRECATED FILE - WILL GO AWAY EVENTUALLY.
-//
-// Scope handling is now done in go/parser.
-// The functionality here is only present to
-// keep the typechecker running for now.
-
-package typechecker
-
-import "go/ast"
-
-func (tc *typechecker) openScope() *ast.Scope {
- tc.topScope = ast.NewScope(tc.topScope)
- return tc.topScope
-}
-
-func (tc *typechecker) closeScope() {
- tc.topScope = tc.topScope.Outer
-}
-
-// declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields.
-// It returns the newly allocated object. If an object with the same name already exists in scope, an error
-// is reported and the object is not inserted.
-func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
- obj := ast.NewObj(kind, name.Name)
- obj.Decl = decl
- //obj.N = n
- name.Obj = obj
- if name.Name != "_" {
- if alt := scope.Insert(obj); alt != nil {
- tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, tc.fset.Position(alt.Pos()).String())
- }
- }
- return obj
-}
-
-// decl is the same as declInScope(tc.topScope, ...)
-func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
- return tc.declInScope(tc.topScope, kind, name, decl, n)
-}
-
-// find returns the object with the given name if visible in the current scope hierarchy.
-// If no such object is found, an error is reported and a bad object is returned instead.
-func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) {
- for s := tc.topScope; s != nil && obj == nil; s = s.Outer {
- obj = s.Lookup(name.Name)
- }
- if obj == nil {
- tc.Errorf(name.Pos(), "%s not declared", name.Name)
- obj = ast.NewObj(ast.Bad, name.Name)
- }
- name.Obj = obj
- return
-}
-
-// findField returns the object with the given name if visible in the type's scope.
-// If no such object is found, an error is reported and a bad object is returned instead.
-func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) {
- // TODO(gri) This is simplistic at the moment and ignores anonymous fields.
- obj = typ.Scope.Lookup(name.Name)
- if obj == nil {
- tc.Errorf(name.Pos(), "%s not declared", name.Name)
- obj = ast.NewObj(ast.Bad, name.Name)
- }
- return
-}
diff --git a/src/pkg/go/typechecker/testdata/test0.src b/src/pkg/go/typechecker/testdata/test0.src
deleted file mode 100644
index 4e317f214..000000000
--- a/src/pkg/go/typechecker/testdata/test0.src
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// type declarations
-
-package P0
-
-type (
- B bool
- I int32
- A [10]P
- T struct {
- x, y P
- }
- P *T
- R *R
- F func(A) I
- Y interface {
- f(A) I
- }
- S []P
- M map[I]F
- C chan<- I
-)
-
-type (
- a/* ERROR "illegal cycle" */ a
- a/* ERROR "already declared" */ int
-
- b/* ERROR "illegal cycle" */ c
- c d
- d e
- e b /* ERROR "not a type" */
-
- t *t
-
- U V
- V W
- W *U
-
- P1 *S2
- P2 P1
-
- S1 struct {
- a, b, c int
- u, v, a/* ERROR "already declared" */ float
- }
- S2/* ERROR "illegal cycle" */ struct {
- x S2
- }
-
- L1 []L1
- L2 []int
-
- A1 [10]int
- A2/* ERROR "illegal cycle" */ [10]A2
- A3/* ERROR "illegal cycle" */ [10]struct {
- x A4
- }
- A4 [10]A3
-
- F1 func()
- F2 func(x, y, z float)
- F3 func(x, y, x /* ERROR "already declared" */ float)
- F4 func() (x, y, x /* ERROR "already declared" */ float)
- F5 func(x int) (x /* ERROR "already declared" */ float)
-
- I1 interface{}
- I2 interface {
- m1()
- }
- I3 interface {
- m1()
- m1 /* ERROR "already declared" */ ()
- }
- I4 interface {
- m1(x, y, x /* ERROR "already declared" */ float)
- m2() (x, y, x /* ERROR "already declared" */ float)
- m3(x int) (x /* ERROR "already declared" */ float)
- }
- I5 interface {
- m1(I5)
- }
-
- C1 chan int
- C2 <-chan int
- C3 chan<- C3
-
- M1 map[Last]string
- M2 map[string]M2
-
- Last int
-)
diff --git a/src/pkg/go/typechecker/testdata/test1.src b/src/pkg/go/typechecker/testdata/test1.src
deleted file mode 100644
index b5531fb9f..000000000
--- a/src/pkg/go/typechecker/testdata/test1.src
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// const and var declarations
-
-package P1
-
-const (
- c1 = 0
- c2 int = 0
- c3, c4 = 0
-)
diff --git a/src/pkg/go/typechecker/testdata/test3.src b/src/pkg/go/typechecker/testdata/test3.src
deleted file mode 100644
index 2e1a9fa8f..000000000
--- a/src/pkg/go/typechecker/testdata/test3.src
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package P3
-
-// function and method signatures
-
-func _() {}
-func _() {}
-func _(x, x /* ERROR "already declared" */ int) {}
-
-func f() {}
-func f /* ERROR "already declared" */ () {}
-
-func (*foo /* ERROR "invalid receiver" */ ) m() {}
-func (bar /* ERROR "not a type" */ ) m() {}
-
-func f1(x, _, _ int) (_, _ float) {}
-func f2(x, y, x /* ERROR "already declared" */ int) {}
-func f3(x, y int) (a, b, x /* ERROR "already declared" */ int) {}
-
-func (x *T) m1() {}
-func (x *T) m1 /* ERROR "already declared" */ () {}
-func (x T) m1 /* ERROR "already declared" */ () {}
-func (T) m1 /* ERROR "already declared" */ () {}
-
-func (x *T) m2(u, x /* ERROR "already declared" */ int) {}
-func (x *T) m3(a, b, c int) (u, x /* ERROR "already declared" */ int) {}
-// The following are disabled for now because the typechecker
-// in in the process of being rewritten and cannot handle them
-// at the moment
-//func (T) _(x, x /* "already declared" */ int) {}
-//func (T) _() (x, x /* "already declared" */ int) {}
-
-//func (PT) _() {}
-
-var bar int
-
-type T struct{}
-type PT (T)
diff --git a/src/pkg/go/typechecker/testdata/test4.src b/src/pkg/go/typechecker/testdata/test4.src
deleted file mode 100644
index 94d3558f9..000000000
--- a/src/pkg/go/typechecker/testdata/test4.src
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Constant declarations
-
-package P4
-
-const (
- c0 = 0
-)
diff --git a/src/pkg/go/typechecker/type.go b/src/pkg/go/typechecker/type.go
deleted file mode 100644
index 1b88eb54b..000000000
--- a/src/pkg/go/typechecker/type.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typechecker
-
-import "go/ast"
-
-// A Type represents a Go type.
-type Type struct {
- Form Form
- Obj *ast.Object // corresponding type name, or nil
- Scope *ast.Scope // fields and methods, always present
- N uint // basic type id, array length, number of function results, or channel direction
- Key, Elt *Type // map key and array, pointer, slice, map or channel element
- Params *ast.Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil
- Expr ast.Expr // corresponding AST expression
-}
-
-// NewType creates a new type of a given form.
-func NewType(form Form) *Type {
- return &Type{Form: form, Scope: ast.NewScope(nil)}
-}
-
-// Form describes the form of a type.
-type Form int
-
-// The list of possible type forms.
-const (
- BadType Form = iota // for error handling
- Unresolved // type not fully setup
- Basic
- Array
- Struct
- Pointer
- Function
- Method
- Interface
- Slice
- Map
- Channel
- Tuple
-)
-
-var formStrings = [...]string{
- BadType: "badType",
- Unresolved: "unresolved",
- Basic: "basic",
- Array: "array",
- Struct: "struct",
- Pointer: "pointer",
- Function: "function",
- Method: "method",
- Interface: "interface",
- Slice: "slice",
- Map: "map",
- Channel: "channel",
- Tuple: "tuple",
-}
-
-func (form Form) String() string { return formStrings[form] }
-
-// The list of basic type id's.
-const (
- Bool = iota
- Byte
- Uint
- Int
- Float
- Complex
- Uintptr
- String
-
- Uint8
- Uint16
- Uint32
- Uint64
-
- Int8
- Int16
- Int32
- Int64
-
- Float32
- Float64
-
- Complex64
- Complex128
-
- // TODO(gri) ideal types are missing
-)
-
-var BasicTypes = map[uint]string{
- Bool: "bool",
- Byte: "byte",
- Uint: "uint",
- Int: "int",
- Float: "float",
- Complex: "complex",
- Uintptr: "uintptr",
- String: "string",
-
- Uint8: "uint8",
- Uint16: "uint16",
- Uint32: "uint32",
- Uint64: "uint64",
-
- Int8: "int8",
- Int16: "int16",
- Int32: "int32",
- Int64: "int64",
-
- Float32: "float32",
- Float64: "float64",
-
- Complex64: "complex64",
- Complex128: "complex128",
-}
diff --git a/src/pkg/go/typechecker/typechecker.go b/src/pkg/go/typechecker/typechecker.go
deleted file mode 100644
index 24480165b..000000000
--- a/src/pkg/go/typechecker/typechecker.go
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// DEPRECATED PACKAGE - SEE go/types INSTEAD.
-// This package implements typechecking of a Go AST.
-// The result of the typecheck is an augmented AST
-// with object and type information for each identifier.
-//
-package typechecker
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/scanner"
- "os"
-)
-
-// TODO(gri) don't report errors for objects/types that are marked as bad.
-
-
-const debug = true // set for debugging output
-
-// An importer takes an import path and returns the data describing the
-// respective package's exported interface. The data format is TBD.
-//
-type Importer func(path string) ([]byte, os.Error)
-
-// CheckPackage typechecks a package and augments the AST by setting
-// *ast.Object, *ast.Type, and *ast.Scope fields accordingly. If an
-// importer is provided, it is used to handle imports, otherwise they
-// are ignored (likely leading to typechecking errors).
-//
-// If errors are reported, the AST may be incompletely augmented (fields
-// may be nil) or contain incomplete object, type, or scope information.
-//
-func CheckPackage(fset *token.FileSet, pkg *ast.Package, importer Importer) os.Error {
- var tc typechecker
- tc.fset = fset
- tc.importer = importer
- tc.checkPackage(pkg)
- return tc.GetError(scanner.Sorted)
-}
-
-// CheckFile typechecks a single file, but otherwise behaves like
-// CheckPackage. If the complete package consists of more than just
-// one file, the file may not typecheck without errors.
-//
-func CheckFile(fset *token.FileSet, file *ast.File, importer Importer) os.Error {
- // create a single-file dummy package
- pkg := &ast.Package{file.Name.Name, nil, nil, map[string]*ast.File{fset.Position(file.Name.NamePos).Filename: file}}
- return CheckPackage(fset, pkg, importer)
-}
-
-// ----------------------------------------------------------------------------
-// Typechecker state
-
-type typechecker struct {
- fset *token.FileSet
- scanner.ErrorVector
- importer Importer
- globals []*ast.Object // list of global objects
- topScope *ast.Scope // current top-most scope
- cyclemap map[*ast.Object]bool // for cycle detection
- iota int // current value of iota
-}
-
-func (tc *typechecker) Errorf(pos token.Pos, format string, args ...interface{}) {
- tc.Error(tc.fset.Position(pos), fmt.Sprintf(format, args...))
-}
-
-func assert(pred bool) {
- if !pred {
- panic("internal error")
- }
-}
-
-/*
-Typechecking is done in several phases:
-
-phase 1: declare all global objects; also collect all function and method declarations
- - all objects have kind, name, decl fields; the decl field permits
- quick lookup of an object's declaration
- - constant objects have an iota value
- - type objects have unresolved types with empty scopes, all others have nil types
- - report global double declarations
-
-phase 2: bind methods to their receiver base types
- - receiver base types must be declared in the package, thus for
- each method a corresponding (unresolved) type must exist
- - report method double declarations and errors with base types
-
-phase 3: resolve all global objects
- - sequentially iterate through all objects in the global scope
- - resolve types for all unresolved types and assign types to
- all attached methods
- - assign types to all other objects, possibly by evaluating
- constant and initializer expressions
- - resolution may recurse; a cyclemap is used to detect cycles
- - report global typing errors
-
-phase 4: sequentially typecheck function and method bodies
- - all global objects are declared and have types and values;
- all methods have types
- - sequentially process statements in each body; any object
- referred to must be fully defined at this point
- - report local typing errors
-*/
-
-func (tc *typechecker) checkPackage(pkg *ast.Package) {
- // setup package scope
- tc.topScope = Universe
- tc.openScope()
- defer tc.closeScope()
-
- // TODO(gri) there's no file scope at the moment since we ignore imports
-
- // phase 1: declare all global objects; also collect all function and method declarations
- var funcs []*ast.FuncDecl
- for _, file := range pkg.Files {
- for _, decl := range file.Decls {
- tc.declGlobal(decl)
- if f, isFunc := decl.(*ast.FuncDecl); isFunc {
- funcs = append(funcs, f)
- }
- }
- }
-
- // phase 2: bind methods to their receiver base types
- for _, m := range funcs {
- if m.Recv != nil {
- tc.bindMethod(m)
- }
- }
-
- // phase 3: resolve all global objects
- tc.cyclemap = make(map[*ast.Object]bool)
- for _, obj := range tc.globals {
- tc.resolve(obj)
- }
- assert(len(tc.cyclemap) == 0)
-
- // 4: sequentially typecheck function and method bodies
- for _, f := range funcs {
- ftype, _ := f.Name.Obj.Type.(*Type)
- tc.checkBlock(f.Body.List, ftype)
- }
-
- pkg.Scope = tc.topScope
-}
-
-func (tc *typechecker) declGlobal(global ast.Decl) {
- switch d := global.(type) {
- case *ast.BadDecl:
- // ignore
-
- case *ast.GenDecl:
- iota := 0
- var prev *ast.ValueSpec
- for _, spec := range d.Specs {
- switch s := spec.(type) {
- case *ast.ImportSpec:
- // TODO(gri) imports go into file scope
- case *ast.ValueSpec:
- switch d.Tok {
- case token.CONST:
- if s.Values == nil {
- // create a new spec with type and values from the previous one
- if prev != nil {
- s = &ast.ValueSpec{s.Doc, s.Names, prev.Type, prev.Values, s.Comment}
- } else {
- // TODO(gri) this should probably go into the const decl code
- tc.Errorf(s.Pos(), "missing initializer for const %s", s.Names[0].Name)
- }
- }
- for _, name := range s.Names {
- tc.globals = append(tc.globals, tc.decl(ast.Con, name, s, iota))
- }
- case token.VAR:
- for _, name := range s.Names {
- tc.globals = append(tc.globals, tc.decl(ast.Var, name, s, 0))
- }
- default:
- panic("unreachable")
- }
- prev = s
- iota++
- case *ast.TypeSpec:
- obj := tc.decl(ast.Typ, s.Name, s, 0)
- tc.globals = append(tc.globals, obj)
- // give all type objects an unresolved type so
- // that we can collect methods in the type scope
- typ := NewType(Unresolved)
- obj.Type = typ
- typ.Obj = obj
- default:
- panic("unreachable")
- }
- }
-
- case *ast.FuncDecl:
- if d.Recv == nil {
- tc.globals = append(tc.globals, tc.decl(ast.Fun, d.Name, d, 0))
- }
-
- default:
- panic("unreachable")
- }
-}
-
-// If x is of the form *T, deref returns T, otherwise it returns x.
-func deref(x ast.Expr) ast.Expr {
- if p, isPtr := x.(*ast.StarExpr); isPtr {
- x = p.X
- }
- return x
-}
-
-func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
- // a method is declared in the receiver base type's scope
- var scope *ast.Scope
- base := deref(method.Recv.List[0].Type)
- if name, isIdent := base.(*ast.Ident); isIdent {
- // if base is not an *ast.Ident, we had a syntax
- // error and the parser reported an error already
- obj := tc.topScope.Lookup(name.Name)
- if obj == nil {
- tc.Errorf(name.Pos(), "invalid receiver: %s is not declared in this package", name.Name)
- } else if obj.Kind != ast.Typ {
- tc.Errorf(name.Pos(), "invalid receiver: %s is not a type", name.Name)
- } else {
- typ := obj.Type.(*Type)
- assert(typ.Form == Unresolved)
- scope = typ.Scope
- }
- }
- if scope == nil {
- // no receiver type found; use a dummy scope
- // (we still want to type-check the method
- // body, so make sure there is a name object
- // and type)
- // TODO(gri) should we record the scope so
- // that we don't lose the receiver for type-
- // checking of the method body?
- scope = ast.NewScope(nil)
- }
- tc.declInScope(scope, ast.Fun, method.Name, method, 0)
-}
-
-func (tc *typechecker) resolve(obj *ast.Object) {
- // check for declaration cycles
- if tc.cyclemap[obj] {
- tc.Errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name)
- obj.Kind = ast.Bad
- return
- }
- tc.cyclemap[obj] = true
- defer func() {
- tc.cyclemap[obj] = false, false
- }()
-
- // resolve non-type objects
- typ, _ := obj.Type.(*Type)
- if typ == nil {
- switch obj.Kind {
- case ast.Bad:
- // ignore
-
- case ast.Con:
- tc.declConst(obj)
-
- case ast.Var:
- tc.declVar(obj)
- obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false)
-
- case ast.Fun:
- obj.Type = NewType(Function)
- t := obj.Decl.(*ast.FuncDecl).Type
- tc.declSignature(obj.Type.(*Type), nil, t.Params, t.Results)
-
- default:
- // type objects have non-nil types when resolve is called
- if debug {
- fmt.Printf("kind = %s\n", obj.Kind)
- }
- panic("unreachable")
- }
- return
- }
-
- // resolve type objects
- if typ.Form == Unresolved {
- tc.typeFor(typ, typ.Obj.Decl.(*ast.TypeSpec).Type, false)
-
- // provide types for all methods
- for _, obj := range typ.Scope.Objects {
- if obj.Kind == ast.Fun {
- assert(obj.Type == nil)
- obj.Type = NewType(Method)
- f := obj.Decl.(*ast.FuncDecl)
- t := f.Type
- tc.declSignature(obj.Type.(*Type), f.Recv, t.Params, t.Results)
- }
- }
- }
-}
-
-func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) {
- tc.openScope()
- defer tc.closeScope()
-
- // inject function/method parameters into block scope, if any
- if ftype != nil {
- for _, par := range ftype.Params.Objects {
- if par.Name != "_" {
- alt := tc.topScope.Insert(par)
- assert(alt == nil) // ftype has no double declarations
- }
- }
- }
-
- for _, stmt := range body {
- tc.checkStmt(stmt)
- }
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-// unparen removes parentheses around x, if any.
-func unparen(x ast.Expr) ast.Expr {
- if ux, hasParens := x.(*ast.ParenExpr); hasParens {
- return unparen(ux.X)
- }
- return x
-}
-
-func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref bool) (n uint) {
- if fields != nil {
- for _, f := range fields.List {
- typ := tc.typeFor(nil, f.Type, ref)
- for _, name := range f.Names {
- fld := tc.declInScope(scope, ast.Var, name, f, 0)
- fld.Type = typ
- n++
- }
- }
- }
- return n
-}
-
-func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) {
- assert((typ.Form == Method) == (recv != nil))
- typ.Params = ast.NewScope(nil)
- tc.declFields(typ.Params, recv, true)
- tc.declFields(typ.Params, params, true)
- typ.N = tc.declFields(typ.Params, results, true)
-}
-
-func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) {
- x = unparen(x)
-
- // type name
- if t, isIdent := x.(*ast.Ident); isIdent {
- obj := tc.find(t)
-
- if obj.Kind != ast.Typ {
- tc.Errorf(t.Pos(), "%s is not a type", t.Name)
- if def == nil {
- typ = NewType(BadType)
- } else {
- typ = def
- typ.Form = BadType
- }
- typ.Expr = x
- return
- }
-
- if !ref {
- tc.resolve(obj) // check for cycles even if type resolved
- }
- typ = obj.Type.(*Type)
-
- if def != nil {
- // new type declaration: copy type structure
- def.Form = typ.Form
- def.N = typ.N
- def.Key, def.Elt = typ.Key, typ.Elt
- def.Params = typ.Params
- def.Expr = x
- typ = def
- }
- return
- }
-
- // type literal
- typ = def
- if typ == nil {
- typ = NewType(BadType)
- }
- typ.Expr = x
-
- switch t := x.(type) {
- case *ast.SelectorExpr:
- if debug {
- fmt.Println("qualified identifier unimplemented")
- }
- typ.Form = BadType
-
- case *ast.StarExpr:
- typ.Form = Pointer
- typ.Elt = tc.typeFor(nil, t.X, true)
-
- case *ast.ArrayType:
- if t.Len != nil {
- typ.Form = Array
- // TODO(gri) compute the real length
- // (this may call resolve recursively)
- (*typ).N = 42
- } else {
- typ.Form = Slice
- }
- typ.Elt = tc.typeFor(nil, t.Elt, t.Len == nil)
-
- case *ast.StructType:
- typ.Form = Struct
- tc.declFields(typ.Scope, t.Fields, false)
-
- case *ast.FuncType:
- typ.Form = Function
- tc.declSignature(typ, nil, t.Params, t.Results)
-
- case *ast.InterfaceType:
- typ.Form = Interface
- tc.declFields(typ.Scope, t.Methods, true)
-
- case *ast.MapType:
- typ.Form = Map
- typ.Key = tc.typeFor(nil, t.Key, true)
- typ.Elt = tc.typeFor(nil, t.Value, true)
-
- case *ast.ChanType:
- typ.Form = Channel
- typ.N = uint(t.Dir)
- typ.Elt = tc.typeFor(nil, t.Value, true)
-
- default:
- if debug {
- fmt.Printf("x is %T\n", x)
- }
- panic("unreachable")
- }
-
- return
-}
-
-// ----------------------------------------------------------------------------
-// TODO(gri) implement these place holders
-
-func (tc *typechecker) declConst(*ast.Object) {
-}
-
-func (tc *typechecker) declVar(*ast.Object) {
-}
-
-func (tc *typechecker) checkStmt(ast.Stmt) {
-}
diff --git a/src/pkg/go/typechecker/typechecker_test.go b/src/pkg/go/typechecker/typechecker_test.go
deleted file mode 100644
index 4bad4499a..000000000
--- a/src/pkg/go/typechecker/typechecker_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a simple typechecker test harness. Packages found
-// in the testDir directory are typechecked. Error messages reported by
-// the typechecker are compared against the error messages expected for
-// the test files.
-//
-// Expected errors are indicated in the test files by putting a comment
-// of the form /* ERROR "rx" */ immediately following an offending token.
-// The harness will verify that an error matching the regular expression
-// rx is reported at that source position. Consecutive comments may be
-// used to indicate multiple errors for the same token position.
-//
-// For instance, the following test file indicates that a "not declared"
-// error should be reported for the undeclared variable x:
-//
-// package P0
-// func f() {
-// _ = x /* ERROR "not declared" */ + 1
-// }
-//
-// If the -pkg flag is set, only packages with package names matching
-// the regular expression provided via the flag value are tested.
-
-package typechecker
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "io/ioutil"
- "os"
- "regexp"
- "sort"
- "strings"
- "testing"
-)
-
-const testDir = "./testdata" // location of test packages
-
-var fset = token.NewFileSet()
-
-var (
- pkgPat = flag.String("pkg", ".*", "regular expression to select test packages by package name")
- trace = flag.Bool("trace", false, "print package names")
-)
-
-// ERROR comments must be of the form /* ERROR "rx" */ and rx is
-// a regular expression that matches the expected error message.
-var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
-
-// expectedErrors collects the regular expressions of ERROR comments
-// found in the package files of pkg and returns them in sorted order
-// (by filename and position).
-func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) {
- // scan all package files
- for filename := range pkg.Files {
- src, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Fatalf("expectedErrors(%s): %v", pkg.Name, err)
- }
-
- var s scanner.Scanner
- file := fset.AddFile(filename, fset.Base(), len(src))
- s.Init(file, src, nil, scanner.ScanComments)
- var prev token.Pos // position of last non-comment token
- loop:
- for {
- pos, tok, lit := s.Scan()
- switch tok {
- case token.EOF:
- break loop
- case token.COMMENT:
- s := errRx.FindStringSubmatch(lit)
- if len(s) == 2 {
- list = append(list, &scanner.Error{fset.Position(prev), string(s[1])})
- }
- default:
- prev = pos
- }
- }
- }
- sort.Sort(list) // multiple files may not be sorted
- return
-}
-
-func testFilter(f *os.FileInfo) bool {
- return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.'
-}
-
-func checkError(t *testing.T, expected, found *scanner.Error) {
- rx, err := regexp.Compile(expected.Msg)
- if err != nil {
- t.Errorf("%s: %v", expected.Pos, err)
- return
- }
-
- match := rx.MatchString(found.Msg)
-
- if expected.Pos.Offset != found.Pos.Offset {
- if match {
- t.Errorf("%s: expected error should have been at %s", expected.Pos, found.Pos)
- } else {
- t.Errorf("%s: error matching %q expected", expected.Pos, expected.Msg)
- return
- }
- }
-
- if !match {
- t.Errorf("%s: %q does not match %q", expected.Pos, expected.Msg, found.Msg)
- }
-}
-
-func TestTypeCheck(t *testing.T) {
- flag.Parse()
- pkgRx, err := regexp.Compile(*pkgPat)
- if err != nil {
- t.Fatalf("illegal flag value %q: %s", *pkgPat, err)
- }
-
- pkgs, err := parser.ParseDir(fset, testDir, testFilter, 0)
- if err != nil {
- scanner.PrintError(os.Stderr, err)
- t.Fatalf("packages in %s contain syntax errors", testDir)
- }
-
- for _, pkg := range pkgs {
- if !pkgRx.MatchString(pkg.Name) {
- continue // only test selected packages
- }
-
- if *trace {
- fmt.Println(pkg.Name)
- }
-
- xlist := expectedErrors(t, pkg)
- err := CheckPackage(fset, pkg, nil)
- if err != nil {
- if elist, ok := err.(scanner.ErrorList); ok {
- // verify that errors match
- for i := 0; i < len(xlist) && i < len(elist); i++ {
- checkError(t, xlist[i], elist[i])
- }
- // the correct number or errors must have been found
- if len(xlist) != len(elist) {
- fmt.Fprintf(os.Stderr, "%s\n", pkg.Name)
- scanner.PrintError(os.Stderr, elist)
- fmt.Fprintln(os.Stderr)
- t.Errorf("TypeCheck(%s): %d errors expected but %d reported", pkg.Name, len(xlist), len(elist))
- }
- } else {
- t.Errorf("TypeCheck(%s): %v", pkg.Name, err)
- }
- } else if len(xlist) > 0 {
- t.Errorf("TypeCheck(%s): %d errors expected but 0 reported", pkg.Name, len(xlist))
- }
- }
-}
diff --git a/src/pkg/go/typechecker/universe.go b/src/pkg/go/typechecker/universe.go
deleted file mode 100644
index 81c14a05e..000000000
--- a/src/pkg/go/typechecker/universe.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typechecker
-
-import "go/ast"
-
-// TODO(gri) should this be in package ast?
-
-// The Universe scope contains all predeclared identifiers.
-var Universe *ast.Scope
-
-func def(obj *ast.Object) {
- alt := Universe.Insert(obj)
- if alt != nil {
- panic("object declared twice")
- }
-}
-
-func init() {
- Universe = ast.NewScope(nil)
-
- // basic types
- for n, name := range BasicTypes {
- typ := NewType(Basic)
- typ.N = n
- obj := ast.NewObj(ast.Typ, name)
- obj.Type = typ
- typ.Obj = obj
- def(obj)
- }
-
- // built-in functions
- // TODO(gri) implement this
-}
diff --git a/src/pkg/go/types/Makefile b/src/pkg/go/types/Makefile
deleted file mode 100644
index 4ca707c73..000000000
--- a/src/pkg/go/types/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2010 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-include ../../../Make.inc
-
-TARG=go/types
-GOFILES=\
- check.go\
- const.go\
- exportdata.go\
- gcimporter.go\
- types.go\
- universe.go\
-
-include ../../../Make.pkg
diff --git a/src/pkg/go/types/check.go b/src/pkg/go/types/check.go
deleted file mode 100644
index 87e3e93da..000000000
--- a/src/pkg/go/types/check.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements the Check function, which typechecks a package.
-
-package types
-
-import (
- "fmt"
- "go/ast"
- "go/scanner"
- "go/token"
- "os"
- "strconv"
-)
-
-const debug = false
-
-type checker struct {
- fset *token.FileSet
- scanner.ErrorVector
- types map[ast.Expr]Type
-}
-
-func (c *checker) errorf(pos token.Pos, format string, args ...interface{}) string {
- msg := fmt.Sprintf(format, args...)
- c.Error(c.fset.Position(pos), msg)
- return msg
-}
-
-// collectFields collects struct fields tok = token.STRUCT), interface methods
-// (tok = token.INTERFACE), and function arguments/results (tok = token.FUNC).
-func (c *checker) collectFields(tok token.Token, list *ast.FieldList, cycleOk bool) (fields ObjList, tags []string, isVariadic bool) {
- if list != nil {
- for _, field := range list.List {
- ftype := field.Type
- if t, ok := ftype.(*ast.Ellipsis); ok {
- ftype = t.Elt
- isVariadic = true
- }
- typ := c.makeType(ftype, cycleOk)
- tag := ""
- if field.Tag != nil {
- assert(field.Tag.Kind == token.STRING)
- tag, _ = strconv.Unquote(field.Tag.Value)
- }
- if len(field.Names) > 0 {
- // named fields
- for _, name := range field.Names {
- obj := name.Obj
- obj.Type = typ
- fields = append(fields, obj)
- if tok == token.STRUCT {
- tags = append(tags, tag)
- }
- }
- } else {
- // anonymous field
- switch tok {
- case token.STRUCT:
- tags = append(tags, tag)
- fallthrough
- case token.FUNC:
- obj := ast.NewObj(ast.Var, "")
- obj.Type = typ
- fields = append(fields, obj)
- case token.INTERFACE:
- utyp := Underlying(typ)
- if typ, ok := utyp.(*Interface); ok {
- // TODO(gri) This is not good enough. Check for double declarations!
- fields = append(fields, typ.Methods...)
- } else if _, ok := utyp.(*Bad); !ok {
- // if utyp is Bad, don't complain (the root cause was reported before)
- c.errorf(ftype.Pos(), "interface contains embedded non-interface type")
- }
- default:
- panic("unreachable")
- }
- }
- }
- }
- return
-}
-
-// makeType makes a new type for an AST type specification x or returns
-// the type referred to by a type name x. If cycleOk is set, a type may
-// refer to itself directly or indirectly; otherwise cycles are errors.
-//
-func (c *checker) makeType(x ast.Expr, cycleOk bool) (typ Type) {
- if debug {
- fmt.Printf("makeType (cycleOk = %v)\n", cycleOk)
- ast.Print(c.fset, x)
- defer func() {
- fmt.Printf("-> %T %v\n\n", typ, typ)
- }()
- }
-
- switch t := x.(type) {
- case *ast.BadExpr:
- return &Bad{}
-
- case *ast.Ident:
- // type name
- obj := t.Obj
- if obj == nil {
- // unresolved identifier (error has been reported before)
- return &Bad{Msg: "unresolved identifier"}
- }
- if obj.Kind != ast.Typ {
- msg := c.errorf(t.Pos(), "%s is not a type", t.Name)
- return &Bad{Msg: msg}
- }
- c.checkObj(obj, cycleOk)
- if !cycleOk && obj.Type.(*Name).Underlying == nil {
- // TODO(gri) Enable this message again once its position
- // is independent of the underlying map implementation.
- // msg := c.errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name)
- msg := "illegal cycle"
- return &Bad{Msg: msg}
- }
- return obj.Type.(Type)
-
- case *ast.ParenExpr:
- return c.makeType(t.X, cycleOk)
-
- case *ast.SelectorExpr:
- // qualified identifier
- // TODO (gri) eventually, this code belongs to expression
- // type checking - here for the time being
- if ident, ok := t.X.(*ast.Ident); ok {
- if obj := ident.Obj; obj != nil {
- if obj.Kind != ast.Pkg {
- msg := c.errorf(ident.Pos(), "%s is not a package", obj.Name)
- return &Bad{Msg: msg}
- }
- // TODO(gri) we have a package name but don't
- // have the mapping from package name to package
- // scope anymore (created in ast.NewPackage).
- return &Bad{} // for now
- }
- }
- // TODO(gri) can this really happen (the parser should have excluded this)?
- msg := c.errorf(t.Pos(), "expected qualified identifier")
- return &Bad{Msg: msg}
-
- case *ast.StarExpr:
- return &Pointer{Base: c.makeType(t.X, true)}
-
- case *ast.ArrayType:
- if t.Len != nil {
- // TODO(gri) compute length
- return &Array{Elt: c.makeType(t.Elt, cycleOk)}
- }
- return &Slice{Elt: c.makeType(t.Elt, true)}
-
- case *ast.StructType:
- fields, tags, _ := c.collectFields(token.STRUCT, t.Fields, cycleOk)
- return &Struct{Fields: fields, Tags: tags}
-
- case *ast.FuncType:
- params, _, _ := c.collectFields(token.FUNC, t.Params, true)
- results, _, isVariadic := c.collectFields(token.FUNC, t.Results, true)
- return &Func{Recv: nil, Params: params, Results: results, IsVariadic: isVariadic}
-
- case *ast.InterfaceType:
- methods, _, _ := c.collectFields(token.INTERFACE, t.Methods, cycleOk)
- methods.Sort()
- return &Interface{Methods: methods}
-
- case *ast.MapType:
- return &Map{Key: c.makeType(t.Key, true), Elt: c.makeType(t.Key, true)}
-
- case *ast.ChanType:
- return &Chan{Dir: t.Dir, Elt: c.makeType(t.Value, true)}
- }
-
- panic(fmt.Sprintf("unreachable (%T)", x))
-}
-
-// checkObj type checks an object.
-func (c *checker) checkObj(obj *ast.Object, ref bool) {
- if obj.Type != nil {
- // object has already been type checked
- return
- }
-
- switch obj.Kind {
- case ast.Bad:
- // ignore
-
- case ast.Con:
- // TODO(gri) complete this
-
- case ast.Typ:
- typ := &Name{Obj: obj}
- obj.Type = typ // "mark" object so recursion terminates
- typ.Underlying = Underlying(c.makeType(obj.Decl.(*ast.TypeSpec).Type, ref))
-
- case ast.Var:
- // TODO(gri) complete this
-
- case ast.Fun:
- // TODO(gri) complete this
-
- default:
- panic("unreachable")
- }
-}
-
-// Check typechecks a package.
-// It augments the AST by assigning types to all ast.Objects and returns a map
-// of types for all expression nodes in statements, and a scanner.ErrorList if
-// there are errors.
-//
-func Check(fset *token.FileSet, pkg *ast.Package) (types map[ast.Expr]Type, err os.Error) {
- var c checker
- c.fset = fset
- c.types = make(map[ast.Expr]Type)
-
- for _, obj := range pkg.Scope.Objects {
- c.checkObj(obj, false)
- }
-
- return c.types, c.GetError(scanner.NoMultiples)
-}
diff --git a/src/pkg/go/types/check_test.go b/src/pkg/go/types/check_test.go
deleted file mode 100644
index 8be653fcb..000000000
--- a/src/pkg/go/types/check_test.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a typechecker test harness. The packages specified
-// in tests are typechecked. Error messages reported by the typechecker are
-// compared against the error messages expected in the test files.
-//
-// Expected errors are indicated in the test files by putting a comment
-// of the form /* ERROR "rx" */ immediately following an offending token.
-// The harness will verify that an error matching the regular expression
-// rx is reported at that source position. Consecutive comments may be
-// used to indicate multiple errors for the same token position.
-//
-// For instance, the following test file indicates that a "not declared"
-// error should be reported for the undeclared variable x:
-//
-// package p
-// func f() {
-// _ = x /* ERROR "not declared" */ + 1
-// }
-
-package types
-
-import (
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "io/ioutil"
- "os"
- "regexp"
- "testing"
-)
-
-// The test filenames do not end in .go so that they are invisible
-// to gofmt since they contain comments that must not change their
-// positions relative to surrounding tokens.
-
-var tests = []struct {
- name string
- files []string
-}{
- {"test0", []string{"testdata/test0.src"}},
-}
-
-var fset = token.NewFileSet()
-
-// TODO(gri) This functionality should be in token.Fileset.
-func getFile(filename string) *token.File {
- for f := range fset.Files() {
- if f.Name() == filename {
- return f
- }
- }
- return nil
-}
-
-// TODO(gri) This functionality should be in token.Fileset.
-func getPos(filename string, offset int) token.Pos {
- if f := getFile(filename); f != nil {
- return f.Pos(offset)
- }
- return token.NoPos
-}
-
-// TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
-// or a similar function instead.
-func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, os.Error) {
- files := make(map[string]*ast.File)
- var errors scanner.ErrorList
- for _, filename := range filenames {
- if _, exists := files[filename]; exists {
- t.Fatalf("%s: duplicate file %s", testname, filename)
- }
- file, err := parser.ParseFile(fset, filename, nil, parser.DeclarationErrors)
- if file == nil {
- t.Fatalf("%s: could not parse file %s", testname, filename)
- }
- files[filename] = file
- if err != nil {
- // if the parser returns a non-scanner.ErrorList error
- // the file couldn't be read in the first place and
- // file == nil; in that case we shouldn't reach here
- errors = append(errors, err.(scanner.ErrorList)...)
- }
-
- }
- return files, errors
-}
-
-// ERROR comments must be of the form /* ERROR "rx" */ and rx is
-// a regular expression that matches the expected error message.
-//
-var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
-
-// expectedErrors collects the regular expressions of ERROR comments found
-// in files and returns them as a map of error positions to error messages.
-//
-func expectedErrors(t *testing.T, testname string, files map[string]*ast.File) map[token.Pos]string {
- errors := make(map[token.Pos]string)
- for filename := range files {
- src, err := ioutil.ReadFile(filename)
- if err != nil {
- t.Fatalf("%s: could not read %s", testname, filename)
- }
-
- var s scanner.Scanner
- // file was parsed already - do not add it again to the file
- // set otherwise the position information returned here will
- // not match the position information collected by the parser
- s.Init(getFile(filename), src, nil, scanner.ScanComments)
- var prev token.Pos // position of last non-comment token
-
- scanFile:
- for {
- pos, tok, lit := s.Scan()
- switch tok {
- case token.EOF:
- break scanFile
- case token.COMMENT:
- s := errRx.FindStringSubmatch(lit)
- if len(s) == 2 {
- errors[prev] = string(s[1])
- }
- default:
- prev = pos
- }
- }
- }
- return errors
-}
-
-func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) {
- if errors == nil {
- return
- }
- for _, error := range errors.(scanner.ErrorList) {
- // error.Pos is a token.Position, but we want
- // a token.Pos so we can do a map lookup
- // TODO(gri) Need to move scanner.Errors over
- // to use token.Pos and file set info.
- pos := getPos(error.Pos.Filename, error.Pos.Offset)
- if msg, found := expected[pos]; found {
- // we expect a message at pos; check if it matches
- rx, err := regexp.Compile(msg)
- if err != nil {
- t.Errorf("%s: %v", error.Pos, err)
- continue
- }
- if match := rx.MatchString(error.Msg); !match {
- t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
- continue
- }
- // we have a match - eliminate this error
- expected[pos] = "", false
- } else {
- // To keep in mind when analyzing failed test output:
- // If the same error position occurs multiple times in errors,
- // this message will be triggered (because the first error at
- // the position removes this position from the expected errors).
- t.Errorf("%s: no (multiple?) error expected, but found: %s", error.Pos, error.Msg)
- }
- }
-}
-
-func check(t *testing.T, testname string, testfiles []string) {
- // TODO(gri) Eventually all these different phases should be
- // subsumed into a single function call that takes
- // a set of files and creates a fully resolved and
- // type-checked AST.
-
- files, err := parseFiles(t, testname, testfiles)
-
- // we are expecting the following errors
- // (collect these after parsing the files so that
- // they are found in the file set)
- errors := expectedErrors(t, testname, files)
-
- // verify errors returned by the parser
- eliminate(t, errors, err)
-
- // verify errors returned after resolving identifiers
- pkg, err := ast.NewPackage(fset, files, GcImporter, Universe)
- eliminate(t, errors, err)
-
- // verify errors returned by the typechecker
- _, err = Check(fset, pkg)
- eliminate(t, errors, err)
-
- // there should be no expected errors left
- if len(errors) > 0 {
- t.Errorf("%s: %d errors not reported:", testname, len(errors))
- for pos, msg := range errors {
- t.Errorf("%s: %s\n", fset.Position(pos), msg)
- }
- }
-}
-
-func TestCheck(t *testing.T) {
- // For easy debugging w/o changing the testing code,
- // if there is a local test file, only test that file.
- const testfile = "test.go"
- if fi, err := os.Stat(testfile); err == nil && fi.IsRegular() {
- fmt.Printf("WARNING: Testing only %s (remove it to run all tests)\n", testfile)
- check(t, testfile, []string{testfile})
- return
- }
-
- // Otherwise, run all the tests.
- for _, test := range tests {
- check(t, test.name, test.files)
- }
-}
diff --git a/src/pkg/go/types/const.go b/src/pkg/go/types/const.go
deleted file mode 100644
index 1ef95d9f9..000000000
--- a/src/pkg/go/types/const.go
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements operations on ideal constants.
-
-package types
-
-import (
- "big"
- "go/token"
- "strconv"
-)
-
-// TODO(gri) Consider changing the API so Const is an interface
-// and operations on consts don't have to type switch.
-
-// A Const implements an ideal constant Value.
-// The zero value z for a Const is not a valid constant value.
-type Const struct {
- // representation of constant values:
- // ideal bool -> bool
- // ideal int -> *big.Int
- // ideal float -> *big.Rat
- // ideal complex -> cmplx
- // ideal string -> string
- val interface{}
-}
-
-// Representation of complex values.
-type cmplx struct {
- re, im *big.Rat
-}
-
-func assert(cond bool) {
- if !cond {
- panic("go/types internal error: assertion failed")
- }
-}
-
-// MakeConst makes an ideal constant from a literal
-// token and the corresponding literal string.
-func MakeConst(tok token.Token, lit string) Const {
- switch tok {
- case token.INT:
- var x big.Int
- _, ok := x.SetString(lit, 0)
- assert(ok)
- return Const{&x}
- case token.FLOAT:
- var y big.Rat
- _, ok := y.SetString(lit)
- assert(ok)
- return Const{&y}
- case token.IMAG:
- assert(lit[len(lit)-1] == 'i')
- var im big.Rat
- _, ok := im.SetString(lit[0 : len(lit)-1])
- assert(ok)
- return Const{cmplx{big.NewRat(0, 1), &im}}
- case token.CHAR:
- assert(lit[0] == '\'' && lit[len(lit)-1] == '\'')
- code, _, _, err := strconv.UnquoteChar(lit[1:len(lit)-1], '\'')
- assert(err == nil)
- return Const{big.NewInt(int64(code))}
- case token.STRING:
- s, err := strconv.Unquote(lit)
- assert(err == nil)
- return Const{s}
- }
- panic("unreachable")
-}
-
-// MakeZero returns the zero constant for the given type.
-func MakeZero(typ *Type) Const {
- // TODO(gri) fix this
- return Const{0}
-}
-
-// Match attempts to match the internal constant representations of x and y.
-// If the attempt is successful, the result is the values of x and y,
-// if necessary converted to have the same internal representation; otherwise
-// the results are invalid.
-func (x Const) Match(y Const) (u, v Const) {
- switch a := x.val.(type) {
- case bool:
- if _, ok := y.val.(bool); ok {
- u, v = x, y
- }
- case *big.Int:
- switch y.val.(type) {
- case *big.Int:
- u, v = x, y
- case *big.Rat:
- var z big.Rat
- z.SetInt(a)
- u, v = Const{&z}, y
- case cmplx:
- var z big.Rat
- z.SetInt(a)
- u, v = Const{cmplx{&z, big.NewRat(0, 1)}}, y
- }
- case *big.Rat:
- switch y.val.(type) {
- case *big.Int:
- v, u = y.Match(x)
- case *big.Rat:
- u, v = x, y
- case cmplx:
- u, v = Const{cmplx{a, big.NewRat(0, 0)}}, y
- }
- case cmplx:
- switch y.val.(type) {
- case *big.Int, *big.Rat:
- v, u = y.Match(x)
- case cmplx:
- u, v = x, y
- }
- case string:
- if _, ok := y.val.(string); ok {
- u, v = x, y
- }
- default:
- panic("unreachable")
- }
- return
-}
-
-// Convert attempts to convert the constant x to a given type.
-// If the attempt is successful, the result is the new constant;
-// otherwise the result is invalid.
-func (x Const) Convert(typ *Type) Const {
- // TODO(gri) implement this
- switch x := x.val.(type) {
- case bool:
- case *big.Int:
- case *big.Rat:
- case cmplx:
- case string:
- }
- return x
-}
-
-func (x Const) String() string {
- switch x := x.val.(type) {
- case bool:
- if x {
- return "true"
- }
- return "false"
- case *big.Int:
- return x.String()
- case *big.Rat:
- return x.FloatString(10) // 10 digits of precision after decimal point seems fine
- case cmplx:
- // TODO(gri) don't print 0 components
- return x.re.FloatString(10) + " + " + x.im.FloatString(10) + "i"
- case string:
- return x
- }
- panic("unreachable")
-}
-
-func (x Const) UnaryOp(op token.Token) Const {
- panic("unimplemented")
-}
-
-func (x Const) BinaryOp(op token.Token, y Const) Const {
- var z interface{}
- switch x := x.val.(type) {
- case bool:
- z = binaryBoolOp(x, op, y.val.(bool))
- case *big.Int:
- z = binaryIntOp(x, op, y.val.(*big.Int))
- case *big.Rat:
- z = binaryFloatOp(x, op, y.val.(*big.Rat))
- case cmplx:
- z = binaryCmplxOp(x, op, y.val.(cmplx))
- case string:
- z = binaryStringOp(x, op, y.val.(string))
- default:
- panic("unreachable")
- }
- return Const{z}
-}
-
-func binaryBoolOp(x bool, op token.Token, y bool) interface{} {
- switch op {
- case token.EQL:
- return x == y
- case token.NEQ:
- return x != y
- }
- panic("unreachable")
-}
-
-func binaryIntOp(x *big.Int, op token.Token, y *big.Int) interface{} {
- var z big.Int
- switch op {
- case token.ADD:
- return z.Add(x, y)
- case token.SUB:
- return z.Sub(x, y)
- case token.MUL:
- return z.Mul(x, y)
- case token.QUO:
- return z.Quo(x, y)
- case token.REM:
- return z.Rem(x, y)
- case token.AND:
- return z.And(x, y)
- case token.OR:
- return z.Or(x, y)
- case token.XOR:
- return z.Xor(x, y)
- case token.AND_NOT:
- return z.AndNot(x, y)
- case token.SHL:
- panic("unimplemented")
- case token.SHR:
- panic("unimplemented")
- case token.EQL:
- return x.Cmp(y) == 0
- case token.NEQ:
- return x.Cmp(y) != 0
- case token.LSS:
- return x.Cmp(y) < 0
- case token.LEQ:
- return x.Cmp(y) <= 0
- case token.GTR:
- return x.Cmp(y) > 0
- case token.GEQ:
- return x.Cmp(y) >= 0
- }
- panic("unreachable")
-}
-
-func binaryFloatOp(x *big.Rat, op token.Token, y *big.Rat) interface{} {
- var z big.Rat
- switch op {
- case token.ADD:
- return z.Add(x, y)
- case token.SUB:
- return z.Sub(x, y)
- case token.MUL:
- return z.Mul(x, y)
- case token.QUO:
- return z.Quo(x, y)
- case token.EQL:
- return x.Cmp(y) == 0
- case token.NEQ:
- return x.Cmp(y) != 0
- case token.LSS:
- return x.Cmp(y) < 0
- case token.LEQ:
- return x.Cmp(y) <= 0
- case token.GTR:
- return x.Cmp(y) > 0
- case token.GEQ:
- return x.Cmp(y) >= 0
- }
- panic("unreachable")
-}
-
-func binaryCmplxOp(x cmplx, op token.Token, y cmplx) interface{} {
- a, b := x.re, x.im
- c, d := y.re, y.im
- switch op {
- case token.ADD:
- // (a+c) + i(b+d)
- var re, im big.Rat
- re.Add(a, c)
- im.Add(b, d)
- return cmplx{&re, &im}
- case token.SUB:
- // (a-c) + i(b-d)
- var re, im big.Rat
- re.Sub(a, c)
- im.Sub(b, d)
- return cmplx{&re, &im}
- case token.MUL:
- // (ac-bd) + i(bc+ad)
- var ac, bd, bc, ad big.Rat
- ac.Mul(a, c)
- bd.Mul(b, d)
- bc.Mul(b, c)
- ad.Mul(a, d)
- var re, im big.Rat
- re.Sub(&ac, &bd)
- im.Add(&bc, &ad)
- return cmplx{&re, &im}
- case token.QUO:
- // (ac+bd)/s + i(bc-ad)/s, with s = cc + dd
- var ac, bd, bc, ad, s big.Rat
- ac.Mul(a, c)
- bd.Mul(b, d)
- bc.Mul(b, c)
- ad.Mul(a, d)
- s.Add(c.Mul(c, c), d.Mul(d, d))
- var re, im big.Rat
- re.Add(&ac, &bd)
- re.Quo(&re, &s)
- im.Sub(&bc, &ad)
- im.Quo(&im, &s)
- return cmplx{&re, &im}
- case token.EQL:
- return a.Cmp(c) == 0 && b.Cmp(d) == 0
- case token.NEQ:
- return a.Cmp(c) != 0 || b.Cmp(d) != 0
- }
- panic("unreachable")
-}
-
-func binaryStringOp(x string, op token.Token, y string) interface{} {
- switch op {
- case token.ADD:
- return x + y
- case token.EQL:
- return x == y
- case token.NEQ:
- return x != y
- case token.LSS:
- return x < y
- case token.LEQ:
- return x <= y
- case token.GTR:
- return x > y
- case token.GEQ:
- return x >= y
- }
- panic("unreachable")
-}
diff --git a/src/pkg/go/types/exportdata.go b/src/pkg/go/types/exportdata.go
deleted file mode 100644
index 383520320..000000000
--- a/src/pkg/go/types/exportdata.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements ExportData.
-
-package types
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) {
- // See $GOROOT/include/ar.h.
- hdr := make([]byte, 64+12+6+6+8+10+2)
- _, err = io.ReadFull(buf, hdr)
- if err != nil {
- return
- }
- if trace {
- fmt.Printf("header: %s", hdr)
- }
- s := strings.TrimSpace(string(hdr[64+12+6+6+8:][:10]))
- size, err = strconv.Atoi(s)
- if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
- err = os.NewError("invalid archive header")
- return
- }
- name = strings.TrimSpace(string(hdr[:64]))
- return
-}
-
-type dataReader struct {
- *bufio.Reader
- io.Closer
-}
-
-// ExportData returns a readCloser positioned at the beginning of the
-// export data section of the given object/archive file, or an error.
-// It is the caller's responsibility to close the readCloser.
-//
-func ExportData(filename string) (rc io.ReadCloser, err os.Error) {
- file, err := os.Open(filename)
- if err != nil {
- return
- }
-
- defer func() {
- if err != nil {
- file.Close()
- // Add file name to error.
- err = fmt.Errorf("reading export data: %s: %v", filename, err)
- }
- }()
-
- buf := bufio.NewReader(file)
-
- // Read first line to make sure this is an object file.
- line, err := buf.ReadSlice('\n')
- if err != nil {
- return
- }
- if string(line) == "!<arch>\n" {
- // Archive file. Scan to __.PKGDEF, which should
- // be second archive entry.
- var name string
- var size int
-
- // First entry should be __.SYMDEF.
- // Read and discard.
- if name, size, err = readGopackHeader(buf); err != nil {
- return
- }
- if name != "__.SYMDEF" {
- err = os.NewError("go archive does not begin with __.SYMDEF")
- return
- }
- const block = 4096
- tmp := make([]byte, block)
- for size > 0 {
- n := size
- if n > block {
- n = block
- }
- _, err = io.ReadFull(buf, tmp[:n])
- if err != nil {
- return
- }
- size -= n
- }
-
- // Second entry should be __.PKGDEF.
- if name, size, err = readGopackHeader(buf); err != nil {
- return
- }
- if name != "__.PKGDEF" {
- err = os.NewError("go archive is missing __.PKGDEF")
- return
- }
-
- // Read first line of __.PKGDEF data, so that line
- // is once again the first line of the input.
- line, err = buf.ReadSlice('\n')
- if err != nil {
- return
- }
- }
-
- // Now at __.PKGDEF in archive or still at beginning of file.
- // Either way, line should begin with "go object ".
- if !strings.HasPrefix(string(line), "go object ") {
- err = os.NewError("not a go object file")
- return
- }
-
- // Skip over object header to export data.
- // Begins after first line with $$.
- for line[0] != '$' {
- line, err = buf.ReadSlice('\n')
- if err != nil {
- return
- }
- }
-
- rc = &dataReader{buf, file}
- return
-}
diff --git a/src/pkg/go/types/gcimporter.go b/src/pkg/go/types/gcimporter.go
deleted file mode 100644
index 6ab1806b6..000000000
--- a/src/pkg/go/types/gcimporter.go
+++ /dev/null
@@ -1,799 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements an ast.Importer for gc generated object files.
-// TODO(gri) Eventually move this into a separate package outside types.
-
-package types
-
-import (
- "big"
- "fmt"
- "go/ast"
- "go/token"
- "io"
- "os"
- "path/filepath"
- "runtime"
- "scanner"
- "strconv"
-)
-
-const trace = false // set to true for debugging
-
-var (
- pkgRoot = filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH)
- pkgExts = [...]string{".a", ".5", ".6", ".8"}
-)
-
-// findPkg returns the filename and package id for an import path.
-// If no file was found, an empty filename is returned.
-func findPkg(path string) (filename, id string) {
- if len(path) == 0 {
- return
- }
-
- id = path
- var noext string
- switch path[0] {
- default:
- // "x" -> "$GOROOT/pkg/$GOOS_$GOARCH/x.ext", "x"
- noext = filepath.Join(pkgRoot, path)
-
- case '.':
- // "./x" -> "/this/directory/x.ext", "/this/directory/x"
- cwd, err := os.Getwd()
- if err != nil {
- return
- }
- noext = filepath.Join(cwd, path)
- id = noext
-
- case '/':
- // "/x" -> "/x.ext", "/x"
- noext = path
- }
-
- // try extensions
- for _, ext := range pkgExts {
- filename = noext + ext
- if f, err := os.Stat(filename); err == nil && f.IsRegular() {
- return
- }
- }
-
- filename = "" // not found
- return
-}
-
-// gcParser parses the exports inside a gc compiler-produced
-// object/archive file and populates its scope with the results.
-type gcParser struct {
- scanner scanner.Scanner
- tok int // current token
- lit string // literal string; only valid for Ident, Int, String tokens
- id string // package id of imported package
- imports map[string]*ast.Object // package id -> package object
-}
-
-func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]*ast.Object) {
- p.scanner.Init(src)
- p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
- p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
- p.scanner.Whitespace = 1<<'\t' | 1<<' '
- p.scanner.Filename = filename // for good error messages
- p.next()
- p.id = id
- p.imports = imports
-}
-
-func (p *gcParser) next() {
- p.tok = p.scanner.Scan()
- switch p.tok {
- case scanner.Ident, scanner.Int, scanner.String:
- p.lit = p.scanner.TokenText()
- default:
- p.lit = ""
- }
- if trace {
- fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
- }
-}
-
-// GcImporter implements the ast.Importer signature.
-func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err os.Error) {
- if path == "unsafe" {
- return Unsafe, nil
- }
-
- defer func() {
- if r := recover(); r != nil {
- err = r.(importError) // will re-panic if r is not an importError
- if trace {
- panic(err) // force a stack trace
- }
- }
- }()
-
- filename, id := findPkg(path)
- if filename == "" {
- err = os.NewError("can't find import: " + id)
- return
- }
-
- if pkg = imports[id]; pkg != nil {
- return // package was imported before
- }
-
- buf, err := ExportData(filename)
- if err != nil {
- return
- }
- defer buf.Close()
-
- if trace {
- fmt.Printf("importing %s (%s)\n", id, filename)
- }
-
- var p gcParser
- p.init(filename, id, buf, imports)
- pkg = p.parseExport()
- return
-}
-
-// ----------------------------------------------------------------------------
-// Error handling
-
-// Internal errors are boxed as importErrors.
-type importError struct {
- pos scanner.Position
- err os.Error
-}
-
-func (e importError) String() string {
- return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
-}
-
-func (p *gcParser) error(err interface{}) {
- if s, ok := err.(string); ok {
- err = os.NewError(s)
- }
- // panic with a runtime.Error if err is not an os.Error
- panic(importError{p.scanner.Pos(), err.(os.Error)})
-}
-
-func (p *gcParser) errorf(format string, args ...interface{}) {
- p.error(fmt.Sprintf(format, args...))
-}
-
-func (p *gcParser) expect(tok int) string {
- lit := p.lit
- if p.tok != tok {
- p.errorf("expected %q, got %q (%q)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
- }
- p.next()
- return lit
-}
-
-func (p *gcParser) expectSpecial(tok string) {
- sep := 'x' // not white space
- i := 0
- for i < len(tok) && p.tok == int(tok[i]) && sep > ' ' {
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- i++
- }
- if i < len(tok) {
- p.errorf("expected %q, got %q", tok, tok[0:i])
- }
-}
-
-func (p *gcParser) expectKeyword(keyword string) {
- lit := p.expect(scanner.Ident)
- if lit != keyword {
- p.errorf("expected keyword %s, got %q", keyword, lit)
- }
-}
-
-// ----------------------------------------------------------------------------
-// Import declarations
-
-// ImportPath = string_lit .
-//
-func (p *gcParser) parsePkgId() *ast.Object {
- id, err := strconv.Unquote(p.expect(scanner.String))
- if err != nil {
- p.error(err)
- }
-
- switch id {
- case "":
- // id == "" stands for the imported package id
- // (only known at time of package installation)
- id = p.id
- case "unsafe":
- // package unsafe is not in the imports map - handle explicitly
- return Unsafe
- }
-
- pkg := p.imports[id]
- if pkg == nil {
- scope = ast.NewScope(nil)
- pkg = ast.NewObj(ast.Pkg, "")
- pkg.Data = scope
- p.imports[id] = pkg
- }
-
- return pkg
-}
-
-// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
-func (p *gcParser) parseDotIdent() string {
- ident := ""
- if p.tok != scanner.Int {
- sep := 'x' // not white space
- for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
- ident += p.lit
- sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
- p.next()
- }
- }
- if ident == "" {
- p.expect(scanner.Ident) // use expect() for error handling
- }
- return ident
-}
-
-// ExportedName = ImportPath "." dotIdentifier .
-//
-func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object {
- pkg := p.parsePkgId()
- p.expect('.')
- name := p.parseDotIdent()
-
- // a type may have been declared before - if it exists
- // already in the respective package scope, return that
- // type
- scope := pkg.Data.(*ast.Scope)
- if kind == ast.Typ {
- if obj := scope.Lookup(name); obj != nil {
- assert(obj.Kind == ast.Typ)
- return obj
- }
- }
-
- // any other object must be a newly declared object -
- // create it and insert it into the package scope
- obj := ast.NewObj(kind, name)
- if scope.Insert(obj) != nil {
- p.errorf("already declared: %s", obj.Name)
- }
-
- // a new type object is a named type and may be referred
- // to before the underlying type is known - set it up
- if kind == ast.Typ {
- obj.Type = &Name{Obj: obj}
- }
-
- return obj
-}
-
-// ----------------------------------------------------------------------------
-// Types
-
-// BasicType = identifier .
-//
-func (p *gcParser) parseBasicType() Type {
- obj := Universe.Lookup(p.expect(scanner.Ident))
- if obj == nil || obj.Kind != ast.Typ {
- p.errorf("not a basic type: %s", obj.Name)
- }
- return obj.Type.(Type)
-}
-
-// ArrayType = "[" int_lit "]" Type .
-//
-func (p *gcParser) parseArrayType() Type {
- // "[" already consumed and lookahead known not to be "]"
- lit := p.expect(scanner.Int)
- p.expect(']')
- elt := p.parseType()
- n, err := strconv.Atoui64(lit)
- if err != nil {
- p.error(err)
- }
- return &Array{Len: n, Elt: elt}
-}
-
-// MapType = "map" "[" Type "]" Type .
-//
-func (p *gcParser) parseMapType() Type {
- p.expectKeyword("map")
- p.expect('[')
- key := p.parseType()
- p.expect(']')
- elt := p.parseType()
- return &Map{Key: key, Elt: elt}
-}
-
-// Name = identifier | "?" .
-//
-func (p *gcParser) parseName() (name string) {
- switch p.tok {
- case scanner.Ident:
- name = p.lit
- p.next()
- case '?':
- // anonymous
- p.next()
- default:
- p.error("name expected")
- }
- return
-}
-
-// Field = Name Type [ ":" string_lit ] .
-//
-func (p *gcParser) parseField() (fld *ast.Object, tag string) {
- name := p.parseName()
- ftyp := p.parseType()
- if name == "" {
- // anonymous field - ftyp must be T or *T and T must be a type name
- if _, ok := Deref(ftyp).(*Name); !ok {
- p.errorf("anonymous field expected")
- }
- }
- if p.tok == ':' {
- p.next()
- tag = p.expect(scanner.String)
- }
- fld = ast.NewObj(ast.Var, name)
- fld.Type = ftyp
- return
-}
-
-// StructType = "struct" "{" [ FieldList ] "}" .
-// FieldList = Field { ";" Field } .
-//
-func (p *gcParser) parseStructType() Type {
- var fields []*ast.Object
- var tags []string
-
- parseField := func() {
- fld, tag := p.parseField()
- fields = append(fields, fld)
- tags = append(tags, tag)
- }
-
- p.expectKeyword("struct")
- p.expect('{')
- if p.tok != '}' {
- parseField()
- for p.tok == ';' {
- p.next()
- parseField()
- }
- }
- p.expect('}')
-
- return &Struct{Fields: fields, Tags: tags}
-}
-
-// Parameter = ( identifier | "?" ) [ "..." ] Type [ ":" string_lit ] .
-//
-func (p *gcParser) parseParameter() (par *ast.Object, isVariadic bool) {
- name := p.parseName()
- if name == "" {
- name = "_" // cannot access unnamed identifiers
- }
- if p.tok == '.' {
- p.expectSpecial("...")
- isVariadic = true
- }
- ptyp := p.parseType()
- // ignore argument tag
- if p.tok == ':' {
- p.next()
- p.expect(scanner.String)
- }
- par = ast.NewObj(ast.Var, name)
- par.Type = ptyp
- return
-}
-
-// Parameters = "(" [ ParameterList ] ")" .
-// ParameterList = { Parameter "," } Parameter .
-//
-func (p *gcParser) parseParameters() (list []*ast.Object, isVariadic bool) {
- parseParameter := func() {
- par, variadic := p.parseParameter()
- list = append(list, par)
- if variadic {
- if isVariadic {
- p.error("... not on final argument")
- }
- isVariadic = true
- }
- }
-
- p.expect('(')
- if p.tok != ')' {
- parseParameter()
- for p.tok == ',' {
- p.next()
- parseParameter()
- }
- }
- p.expect(')')
-
- return
-}
-
-// Signature = Parameters [ Result ] .
-// Result = Type | Parameters .
-//
-func (p *gcParser) parseSignature() *Func {
- params, isVariadic := p.parseParameters()
-
- // optional result type
- var results []*ast.Object
- switch p.tok {
- case scanner.Ident, scanner.String, '[', '*', '<':
- // single, unnamed result
- result := ast.NewObj(ast.Var, "_")
- result.Type = p.parseType()
- results = []*ast.Object{result}
- case '(':
- // named or multiple result(s)
- var variadic bool
- results, variadic = p.parseParameters()
- if variadic {
- p.error("... not permitted on result type")
- }
- }
-
- return &Func{Params: params, Results: results, IsVariadic: isVariadic}
-}
-
-// MethodSpec = identifier Signature .
-//
-func (p *gcParser) parseMethodSpec() *ast.Object {
- if p.tok == scanner.Ident {
- p.expect(scanner.Ident)
- } else {
- // TODO(gri) should this be parseExportedName here?
- p.parsePkgId()
- p.expect('.')
- p.parseDotIdent()
- }
- p.parseSignature()
-
- // TODO(gri) compute method object
- return ast.NewObj(ast.Fun, "_")
-}
-
-// InterfaceType = "interface" "{" [ MethodList ] "}" .
-// MethodList = MethodSpec { ";" MethodSpec } .
-//
-func (p *gcParser) parseInterfaceType() Type {
- var methods ObjList
-
- parseMethod := func() {
- meth := p.parseMethodSpec()
- methods = append(methods, meth)
- }
-
- p.expectKeyword("interface")
- p.expect('{')
- if p.tok != '}' {
- parseMethod()
- for p.tok == ';' {
- p.next()
- parseMethod()
- }
- }
- p.expect('}')
-
- methods.Sort()
- return &Interface{Methods: methods}
-}
-
-// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
-//
-func (p *gcParser) parseChanType() Type {
- dir := ast.SEND | ast.RECV
- if p.tok == scanner.Ident {
- p.expectKeyword("chan")
- if p.tok == '<' {
- p.expectSpecial("<-")
- dir = ast.SEND
- }
- } else {
- p.expectSpecial("<-")
- p.expectKeyword("chan")
- dir = ast.RECV
- }
- elt := p.parseType()
- return &Chan{Dir: dir, Elt: elt}
-}
-
-// Type =
-// BasicType | TypeName | ArrayType | SliceType | StructType |
-// PointerType | FuncType | InterfaceType | MapType | ChanType |
-// "(" Type ")" .
-// BasicType = ident .
-// TypeName = ExportedName .
-// SliceType = "[" "]" Type .
-// PointerType = "*" Type .
-// FuncType = "func" Signature .
-//
-func (p *gcParser) parseType() Type {
- switch p.tok {
- case scanner.Ident:
- switch p.lit {
- default:
- return p.parseBasicType()
- case "struct":
- return p.parseStructType()
- case "func":
- // FuncType
- p.next()
- return p.parseSignature()
- case "interface":
- return p.parseInterfaceType()
- case "map":
- return p.parseMapType()
- case "chan":
- return p.parseChanType()
- }
- case scanner.String:
- // TypeName
- return p.parseExportedName(ast.Typ).Type.(Type)
- case '[':
- p.next() // look ahead
- if p.tok == ']' {
- // SliceType
- p.next()
- return &Slice{Elt: p.parseType()}
- }
- return p.parseArrayType()
- case '*':
- // PointerType
- p.next()
- return &Pointer{Base: p.parseType()}
- case '<':
- return p.parseChanType()
- case '(':
- // "(" Type ")"
- p.next()
- typ := p.parseType()
- p.expect(')')
- return typ
- }
- p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
- return nil
-}
-
-// ----------------------------------------------------------------------------
-// Declarations
-
-// ImportDecl = "import" identifier string_lit .
-//
-func (p *gcParser) parseImportDecl() {
- p.expectKeyword("import")
- // The identifier has no semantic meaning in the import data.
- // It exists so that error messages can print the real package
- // name: binary.ByteOrder instead of "encoding/binary".ByteOrder.
- name := p.expect(scanner.Ident)
- pkg := p.parsePkgId()
- assert(pkg.Name == "" || pkg.Name == name)
- pkg.Name = name
-}
-
-// int_lit = [ "+" | "-" ] { "0" ... "9" } .
-//
-func (p *gcParser) parseInt() (sign, val string) {
- switch p.tok {
- case '-':
- p.next()
- sign = "-"
- case '+':
- p.next()
- }
- val = p.expect(scanner.Int)
- return
-}
-
-// number = int_lit [ "p" int_lit ] .
-//
-func (p *gcParser) parseNumber() Const {
- // mantissa
- sign, val := p.parseInt()
- mant, ok := new(big.Int).SetString(sign+val, 10)
- assert(ok)
-
- if p.lit == "p" {
- // exponent (base 2)
- p.next()
- sign, val = p.parseInt()
- exp, err := strconv.Atoui(val)
- if err != nil {
- p.error(err)
- }
- if sign == "-" {
- denom := big.NewInt(1)
- denom.Lsh(denom, exp)
- return Const{new(big.Rat).SetFrac(mant, denom)}
- }
- if exp > 0 {
- mant.Lsh(mant, exp)
- }
- return Const{new(big.Rat).SetInt(mant)}
- }
-
- return Const{mant}
-}
-
-// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
-// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
-// bool_lit = "true" | "false" .
-// complex_lit = "(" float_lit "+" float_lit ")" .
-// string_lit = `"` { unicode_char } `"` .
-//
-func (p *gcParser) parseConstDecl() {
- p.expectKeyword("const")
- obj := p.parseExportedName(ast.Con)
- var x Const
- var typ Type
- if p.tok != '=' {
- obj.Type = p.parseType()
- }
- p.expect('=')
- switch p.tok {
- case scanner.Ident:
- // bool_lit
- if p.lit != "true" && p.lit != "false" {
- p.error("expected true or false")
- }
- x = Const{p.lit == "true"}
- typ = Bool.Underlying
- p.next()
- case '-', scanner.Int:
- // int_lit
- x = p.parseNumber()
- typ = Int.Underlying
- if _, ok := x.val.(*big.Rat); ok {
- typ = Float64.Underlying
- }
- case '(':
- // complex_lit
- p.next()
- re := p.parseNumber()
- p.expect('+')
- im := p.parseNumber()
- p.expect(')')
- x = Const{cmplx{re.val.(*big.Rat), im.val.(*big.Rat)}}
- typ = Complex128.Underlying
- case scanner.String:
- // string_lit
- x = MakeConst(token.STRING, p.lit)
- p.next()
- typ = String.Underlying
- default:
- p.error("expected literal")
- }
- if obj.Type == nil {
- obj.Type = typ
- }
- obj.Data = x
-}
-
-// TypeDecl = "type" ExportedName Type .
-//
-func (p *gcParser) parseTypeDecl() {
- p.expectKeyword("type")
- obj := p.parseExportedName(ast.Typ)
-
- // The type object may have been imported before and thus already
- // have a type associated with it. We still need to parse the type
- // structure, but throw it away if the object already has a type.
- // This ensures that all imports refer to the same type object for
- // a given type declaration.
- typ := p.parseType()
-
- if name := obj.Type.(*Name); name.Underlying == nil {
- assert(Underlying(typ) == typ)
- name.Underlying = typ
- }
-}
-
-// VarDecl = "var" ExportedName Type .
-//
-func (p *gcParser) parseVarDecl() {
- p.expectKeyword("var")
- obj := p.parseExportedName(ast.Var)
- obj.Type = p.parseType()
-}
-
-// FuncDecl = "func" ExportedName Signature .
-//
-func (p *gcParser) parseFuncDecl() {
- // "func" already consumed
- obj := p.parseExportedName(ast.Fun)
- obj.Type = p.parseSignature()
-}
-
-// MethodDecl = "func" Receiver identifier Signature .
-// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
-//
-func (p *gcParser) parseMethodDecl() {
- // "func" already consumed
- p.expect('(')
- p.parseParameter() // receiver
- p.expect(')')
- p.expect(scanner.Ident)
- p.parseSignature()
-}
-
-// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
-//
-func (p *gcParser) parseDecl() {
- switch p.lit {
- case "import":
- p.parseImportDecl()
- case "const":
- p.parseConstDecl()
- case "type":
- p.parseTypeDecl()
- case "var":
- p.parseVarDecl()
- case "func":
- p.next() // look ahead
- if p.tok == '(' {
- p.parseMethodDecl()
- } else {
- p.parseFuncDecl()
- }
- }
- p.expect('\n')
-}
-
-// ----------------------------------------------------------------------------
-// Export
-
-// Export = "PackageClause { Decl } "$$" .
-// PackageClause = "package" identifier [ "safe" ] "\n" .
-//
-func (p *gcParser) parseExport() *ast.Object {
- p.expectKeyword("package")
- name := p.expect(scanner.Ident)
- if p.tok != '\n' {
- // A package is safe if it was compiled with the -u flag,
- // which disables the unsafe package.
- // TODO(gri) remember "safe" package
- p.expectKeyword("safe")
- }
- p.expect('\n')
-
- assert(p.imports[p.id] == nil)
- pkg := ast.NewObj(ast.Pkg, name)
- pkg.Data = ast.NewScope(nil)
- p.imports[p.id] = pkg
-
- for p.tok != '$' && p.tok != scanner.EOF {
- p.parseDecl()
- }
-
- if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
- // don't call next()/expect() since reading past the
- // export data may cause scanner errors (e.g. NUL chars)
- p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
- }
-
- if n := p.scanner.ErrorCount; n != 0 {
- p.errorf("expected no scanner errors, got %d", n)
- }
-
- return pkg
-}
diff --git a/src/pkg/go/types/gcimporter_test.go b/src/pkg/go/types/gcimporter_test.go
deleted file mode 100644
index ec87f5d51..000000000
--- a/src/pkg/go/types/gcimporter_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-import (
- "exec"
- "go/ast"
- "io/ioutil"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
- "time"
-)
-
-var gcName, gcPath string // compiler name and path
-
-func init() {
- // determine compiler
- switch runtime.GOARCH {
- case "386":
- gcName = "8g"
- case "amd64":
- gcName = "6g"
- case "arm":
- gcName = "5g"
- default:
- gcName = "unknown-GOARCH-compiler"
- gcPath = gcName
- return
- }
- gcPath, _ = exec.LookPath(gcName)
-}
-
-func compile(t *testing.T, dirname, filename string) {
- cmd := exec.Command(gcPath, filename)
- cmd.Dir = dirname
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Errorf("%s %s failed: %s", gcName, filename, err)
- return
- }
- t.Logf("%s", string(out))
-}
-
-// Use the same global imports map for all tests. The effect is
-// as if all tested packages were imported into a single package.
-var imports = make(map[string]*ast.Object)
-
-func testPath(t *testing.T, path string) bool {
- _, err := GcImporter(imports, path)
- if err != nil {
- t.Errorf("testPath(%s): %s", path, err)
- return false
- }
- return true
-}
-
-const maxTime = 3e9 // maximum allotted testing time in ns
-
-func testDir(t *testing.T, dir string, endTime int64) (nimports int) {
- dirname := filepath.Join(pkgRoot, dir)
- list, err := ioutil.ReadDir(dirname)
- if err != nil {
- t.Errorf("testDir(%s): %s", dirname, err)
- }
- for _, f := range list {
- if time.Nanoseconds() >= endTime {
- t.Log("testing time used up")
- return
- }
- switch {
- case f.IsRegular():
- // try extensions
- for _, ext := range pkgExts {
- if strings.HasSuffix(f.Name, ext) {
- name := f.Name[0 : len(f.Name)-len(ext)] // remove extension
- if testPath(t, filepath.Join(dir, name)) {
- nimports++
- }
- }
- }
- case f.IsDirectory():
- nimports += testDir(t, filepath.Join(dir, f.Name), endTime)
- }
- }
- return
-}
-
-func TestGcImport(t *testing.T) {
- compile(t, "testdata", "exports.go")
-
- nimports := 0
- if testPath(t, "./testdata/exports") {
- nimports++
- }
- nimports += testDir(t, "", time.Nanoseconds()+maxTime) // installed packages
- t.Logf("tested %d imports", nimports)
-}
diff --git a/src/pkg/go/types/testdata/exports.go b/src/pkg/go/types/testdata/exports.go
deleted file mode 100644
index ed63bf9ad..000000000
--- a/src/pkg/go/types/testdata/exports.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is used to generate an object file which
-// serves as test file for gcimporter_test.go.
-
-package exports
-
-import (
- "go/ast"
-)
-
-const (
- C0 int = 0
- C1 = 3.14159265
- C2 = 2.718281828i
- C3 = -123.456e-789
- C4 = +123.456E+789
- C5 = 1234i
- C6 = "foo\n"
- C7 = `bar\n`
-)
-
-type (
- T1 int
- T2 [10]int
- T3 []int
- T4 *int
- T5 chan int
- T6a chan<- int
- T6b chan (<-chan int)
- T6c chan<- (chan int)
- T7 <-chan *ast.File
- T8 struct{}
- T9 struct {
- a int
- b, c float32
- d []string `go:"tag"`
- }
- T10 struct {
- T8
- T9
- _ *T10
- }
- T11 map[int]string
- T12 interface{}
- T13 interface {
- m1()
- m2(int) float32
- }
- T14 interface {
- T12
- T13
- m3(x ...struct{}) []T9
- }
- T15 func()
- T16 func(int)
- T17 func(x int)
- T18 func() float32
- T19 func() (x float32)
- T20 func(...interface{})
- T21 struct{ next *T21 }
- T22 struct{ link *T23 }
- T23 struct{ link *T22 }
- T24 *T24
- T25 *T26
- T26 *T27
- T27 *T25
- T28 func(T28) T28
-)
-
-var (
- V0 int
- V1 = -991.0
-)
-
-func F1() {}
-func F2(x int) {}
-func F3() int { return 0 }
-func F4() float32 { return 0 }
-func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
-
-func (p *T1) M1()
diff --git a/src/pkg/go/types/testdata/test0.src b/src/pkg/go/types/testdata/test0.src
deleted file mode 100644
index 84a1abe27..000000000
--- a/src/pkg/go/types/testdata/test0.src
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// type declarations
-
-package test0
-
-import "unsafe"
-
-const pi = 3.1415
-
-type (
- N undeclared /* ERROR "undeclared" */
- B bool
- I int32
- A [10]P
- T struct {
- x, y P
- }
- P *T
- R (*R)
- F func(A) I
- Y interface {
- f(A) I
- }
- S [](((P)))
- M map[I]F
- C chan<- I
-)
-
-
-type (
- p1 pi /* ERROR "not a package" */ .foo
- p2 unsafe.Pointer
-)
-
-
-type (
- Pi pi /* ERROR "not a type" */
-
- a /* DISABLED "illegal cycle" */ a
- a /* ERROR "redeclared" */ int
-
- // where the cycle error appears depends on the
- // order in which declarations are processed
- // (which depends on the order in which a map
- // is iterated through)
- b c
- c /* DISABLED "illegal cycle" */ d
- d e
- e b
-
- t *t
-
- U V
- V *W
- W U
-
- P1 *S2
- P2 P1
-
- S0 struct {
- }
- S1 struct {
- a, b, c int
- u, v, a /* ERROR "redeclared" */ float32
- }
- S2 struct {
- U // anonymous field
- // TODO(gri) recognize double-declaration below
- // U /* ERROR "redeclared" */ int
- }
- S3 struct {
- x S2
- }
- S4/* DISABLED "illegal cycle" */ struct {
- S4
- }
- S5 struct {
- S6
- }
- S6 /* DISABLED "illegal cycle" */ struct {
- field S7
- }
- S7 struct {
- S5
- }
-
- L1 []L1
- L2 []int
-
- A1 [10]int
- A2 /* DISABLED "illegal cycle" */ [10]A2
- A3 /* DISABLED "illegal cycle" */ [10]struct {
- x A4
- }
- A4 [10]A3
-
- F1 func()
- F2 func(x, y, z float32)
- F3 func(x, y, x /* ERROR "redeclared" */ float32)
- F4 func() (x, y, x /* ERROR "redeclared" */ float32)
- F5 func(x int) (x /* ERROR "redeclared" */ float32)
- F6 func(x ...int)
-
- I1 interface{}
- I2 interface {
- m1()
- }
- I3 interface {
- m1()
- m1 /* ERROR "redeclared" */ ()
- }
- I4 interface {
- m1(x, y, x /* ERROR "redeclared" */ float32)
- m2() (x, y, x /* ERROR "redeclared" */ float32)
- m3(x int) (x /* ERROR "redeclared" */ float32)
- }
- I5 interface {
- m1(I5)
- }
- I6 interface {
- S0 /* ERROR "non-interface" */
- }
- I7 interface {
- I1
- I1
- }
- I8 /* DISABLED "illegal cycle" */ interface {
- I8
- }
- I9 /* DISABLED "illegal cycle" */ interface {
- I10
- }
- I10 interface {
- I11
- }
- I11 interface {
- I9
- }
-
- C1 chan int
- C2 <-chan int
- C3 chan<- C3
- C4 chan C5
- C5 chan C6
- C6 chan C4
-
- M1 map[Last]string
- M2 map[string]M2
-
- Last int
-)
diff --git a/src/pkg/go/types/types.go b/src/pkg/go/types/types.go
deleted file mode 100644
index 3aa896892..000000000
--- a/src/pkg/go/types/types.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// PACKAGE UNDER CONSTRUCTION. ANY AND ALL PARTS MAY CHANGE.
-// Package types declares the types used to represent Go types.
-//
-package types
-
-import (
- "go/ast"
- "sort"
-)
-
-// All types implement the Type interface.
-type Type interface {
- isType()
-}
-
-// All concrete types embed ImplementsType which
-// ensures that all types implement the Type interface.
-type ImplementsType struct{}
-
-func (t *ImplementsType) isType() {}
-
-// A Bad type is a non-nil placeholder type when we don't know a type.
-type Bad struct {
- ImplementsType
- Msg string // for better error reporting/debugging
-}
-
-// A Basic represents a (unnamed) basic type.
-type Basic struct {
- ImplementsType
- // TODO(gri) need a field specifying the exact basic type
-}
-
-// An Array represents an array type [Len]Elt.
-type Array struct {
- ImplementsType
- Len uint64
- Elt Type
-}
-
-// A Slice represents a slice type []Elt.
-type Slice struct {
- ImplementsType
- Elt Type
-}
-
-// A Struct represents a struct type struct{...}.
-// Anonymous fields are represented by objects with empty names.
-type Struct struct {
- ImplementsType
- Fields ObjList // struct fields; or nil
- Tags []string // corresponding tags; or nil
- // TODO(gri) This type needs some rethinking:
- // - at the moment anonymous fields are marked with "" object names,
- // and their names have to be reconstructed
- // - there is no scope for fast lookup (but the parser creates one)
-}
-
-// A Pointer represents a pointer type *Base.
-type Pointer struct {
- ImplementsType
- Base Type
-}
-
-// A Func represents a function type func(...) (...).
-// Unnamed parameters are represented by objects with empty names.
-type Func struct {
- ImplementsType
- Recv *ast.Object // nil if not a method
- Params ObjList // (incoming) parameters from left to right; or nil
- Results ObjList // (outgoing) results from left to right; or nil
- IsVariadic bool // true if the last parameter's type is of the form ...T
-}
-
-// An Interface represents an interface type interface{...}.
-type Interface struct {
- ImplementsType
- Methods ObjList // interface methods sorted by name; or nil
-}
-
-// A Map represents a map type map[Key]Elt.
-type Map struct {
- ImplementsType
- Key, Elt Type
-}
-
-// A Chan represents a channel type chan Elt, <-chan Elt, or chan<-Elt.
-type Chan struct {
- ImplementsType
- Dir ast.ChanDir
- Elt Type
-}
-
-// A Name represents a named type as declared in a type declaration.
-type Name struct {
- ImplementsType
- Underlying Type // nil if not fully declared
- Obj *ast.Object // corresponding declared object
- // TODO(gri) need to remember fields and methods.
-}
-
-// If typ is a pointer type, Deref returns the pointer's base type;
-// otherwise it returns typ.
-func Deref(typ Type) Type {
- if typ, ok := typ.(*Pointer); ok {
- return typ.Base
- }
- return typ
-}
-
-// Underlying returns the underlying type of a type.
-func Underlying(typ Type) Type {
- if typ, ok := typ.(*Name); ok {
- utyp := typ.Underlying
- if _, ok := utyp.(*Basic); !ok {
- return utyp
- }
- // the underlying type of a type name referring
- // to an (untyped) basic type is the basic type
- // name
- }
- return typ
-}
-
-// An ObjList represents an ordered (in some fashion) list of objects.
-type ObjList []*ast.Object
-
-// ObjList implements sort.Interface.
-func (list ObjList) Len() int { return len(list) }
-func (list ObjList) Less(i, j int) bool { return list[i].Name < list[j].Name }
-func (list ObjList) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
-
-// Sort sorts an object list by object name.
-func (list ObjList) Sort() { sort.Sort(list) }
-
-// identicalTypes returns true if both lists a and b have the
-// same length and corresponding objects have identical types.
-func identicalTypes(a, b ObjList) bool {
- if len(a) == len(b) {
- for i, x := range a {
- y := b[i]
- if !Identical(x.Type.(Type), y.Type.(Type)) {
- return false
- }
- }
- return true
- }
- return false
-}
-
-// Identical returns true if two types are identical.
-func Identical(x, y Type) bool {
- if x == y {
- return true
- }
-
- switch x := x.(type) {
- case *Bad:
- // A Bad type is always identical to any other type
- // (to avoid spurious follow-up errors).
- return true
-
- case *Basic:
- if y, ok := y.(*Basic); ok {
- panic("unimplemented")
- _ = y
- }
-
- case *Array:
- // Two array types are identical if they have identical element types
- // and the same array length.
- if y, ok := y.(*Array); ok {
- return x.Len == y.Len && Identical(x.Elt, y.Elt)
- }
-
- case *Slice:
- // Two slice types are identical if they have identical element types.
- if y, ok := y.(*Slice); ok {
- return Identical(x.Elt, y.Elt)
- }
-
- case *Struct:
- // Two struct types are identical if they have the same sequence of fields,
- // and if corresponding fields have the same names, and identical types,
- // and identical tags. Two anonymous fields are considered to have the same
- // name. Lower-case field names from different packages are always different.
- if y, ok := y.(*Struct); ok {
- // TODO(gri) handle structs from different packages
- if identicalTypes(x.Fields, y.Fields) {
- for i, f := range x.Fields {
- g := y.Fields[i]
- if f.Name != g.Name || x.Tags[i] != y.Tags[i] {
- return false
- }
- }
- return true
- }
- }
-
- case *Pointer:
- // Two pointer types are identical if they have identical base types.
- if y, ok := y.(*Pointer); ok {
- return Identical(x.Base, y.Base)
- }
-
- case *Func:
- // Two function types are identical if they have the same number of parameters
- // and result values, corresponding parameter and result types are identical,
- // and either both functions are variadic or neither is. Parameter and result
- // names are not required to match.
- if y, ok := y.(*Func); ok {
- return identicalTypes(x.Params, y.Params) &&
- identicalTypes(x.Results, y.Results) &&
- x.IsVariadic == y.IsVariadic
- }
-
- case *Interface:
- // Two interface types are identical if they have the same set of methods with
- // the same names and identical function types. Lower-case method names from
- // different packages are always different. The order of the methods is irrelevant.
- if y, ok := y.(*Interface); ok {
- return identicalTypes(x.Methods, y.Methods) // methods are sorted
- }
-
- case *Map:
- // Two map types are identical if they have identical key and value types.
- if y, ok := y.(*Map); ok {
- return Identical(x.Key, y.Key) && Identical(x.Elt, y.Elt)
- }
-
- case *Chan:
- // Two channel types are identical if they have identical value types
- // and the same direction.
- if y, ok := y.(*Chan); ok {
- return x.Dir == y.Dir && Identical(x.Elt, y.Elt)
- }
-
- case *Name:
- // Two named types are identical if their type names originate
- // in the same type declaration.
- if y, ok := y.(*Name); ok {
- return x.Obj == y.Obj ||
- // permit bad objects to be equal to avoid
- // follow up errors
- x.Obj != nil && x.Obj.Kind == ast.Bad ||
- y.Obj != nil && y.Obj.Kind == ast.Bad
- }
- }
-
- return false
-}
diff --git a/src/pkg/go/types/universe.go b/src/pkg/go/types/universe.go
deleted file mode 100644
index 6ae88e5f9..000000000
--- a/src/pkg/go/types/universe.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// FILE UNDER CONSTRUCTION. ANY AND ALL PARTS MAY CHANGE.
-// This file implements the universe and unsafe package scopes.
-
-package types
-
-import "go/ast"
-
-var (
- scope *ast.Scope // current scope to use for initialization
- Universe *ast.Scope
- Unsafe *ast.Object // package unsafe
-)
-
-func define(kind ast.ObjKind, name string) *ast.Object {
- obj := ast.NewObj(kind, name)
- if scope.Insert(obj) != nil {
- panic("types internal error: double declaration")
- }
- return obj
-}
-
-func defType(name string) *Name {
- obj := define(ast.Typ, name)
- typ := &Name{Underlying: &Basic{}, Obj: obj}
- obj.Type = typ
- return typ
-}
-
-func defConst(name string) {
- obj := define(ast.Con, name)
- _ = obj // TODO(gri) fill in other properties
-}
-
-func defFun(name string) {
- obj := define(ast.Fun, name)
- _ = obj // TODO(gri) fill in other properties
-}
-
-var (
- Bool,
- Int,
- Float64,
- Complex128,
- String *Name
-)
-
-func init() {
- scope = ast.NewScope(nil)
- Universe = scope
-
- Bool = defType("bool")
- defType("byte") // TODO(gri) should be an alias for uint8
- defType("complex64")
- Complex128 = defType("complex128")
- defType("float32")
- Float64 = defType("float64")
- defType("int8")
- defType("int16")
- defType("int32")
- defType("int64")
- String = defType("string")
- defType("uint8")
- defType("uint16")
- defType("uint32")
- defType("uint64")
- Int = defType("int")
- defType("uint")
- defType("uintptr")
-
- defConst("true")
- defConst("false")
- defConst("iota")
- defConst("nil")
-
- defFun("append")
- defFun("cap")
- defFun("close")
- defFun("complex")
- defFun("copy")
- defFun("imag")
- defFun("len")
- defFun("make")
- defFun("new")
- defFun("panic")
- defFun("print")
- defFun("println")
- defFun("real")
- defFun("recover")
-
- scope = ast.NewScope(nil)
- Unsafe = ast.NewObj(ast.Pkg, "unsafe")
- Unsafe.Data = scope
-
- defType("Pointer")
-
- defFun("Alignof")
- defFun("New")
- defFun("NewArray")
- defFun("Offsetof")
- defFun("Reflect")
- defFun("Sizeof")
- defFun("Typeof")
- defFun("Unreflect")
-}