summaryrefslogtreecommitdiff
path: root/src/pkg/go
diff options
context:
space:
mode:
authorMichael Stapelberg <stapelberg@debian.org>2013-03-04 21:27:36 +0100
committerMichael Stapelberg <michael@stapelberg.de>2013-03-04 21:27:36 +0100
commit04b08da9af0c450d645ab7389d1467308cfc2db8 (patch)
treedb247935fa4f2f94408edc3acd5d0d4f997aa0d8 /src/pkg/go
parent917c5fb8ec48e22459d77e3849e6d388f93d3260 (diff)
downloadgolang-04b08da9af0c450d645ab7389d1467308cfc2db8.tar.gz
Imported Upstream version 1.1~hg20130304upstream/1.1_hg20130304
Diffstat (limited to 'src/pkg/go')
-rw-r--r--src/pkg/go/ast/ast.go3
-rw-r--r--src/pkg/go/ast/commentmap.go332
-rw-r--r--src/pkg/go/ast/commentmap_test.go143
-rw-r--r--src/pkg/go/ast/filter.go2
-rw-r--r--src/pkg/go/ast/import.go2
-rw-r--r--src/pkg/go/ast/print.go79
-rw-r--r--src/pkg/go/ast/print_test.go21
-rw-r--r--src/pkg/go/ast/resolve.go4
-rw-r--r--src/pkg/go/ast/scope.go14
-rw-r--r--src/pkg/go/ast/walk.go7
-rw-r--r--src/pkg/go/build/build.go144
-rw-r--r--src/pkg/go/build/build_test.go42
-rw-r--r--src/pkg/go/build/deps_test.go28
-rw-r--r--src/pkg/go/build/doc.go28
-rw-r--r--src/pkg/go/build/read.go238
-rw-r--r--src/pkg/go/build/read_test.go226
-rw-r--r--src/pkg/go/doc/comment.go5
-rw-r--r--src/pkg/go/doc/doc.go12
-rw-r--r--src/pkg/go/doc/doc_test.go2
-rw-r--r--src/pkg/go/doc/example.go270
-rw-r--r--src/pkg/go/doc/example_test.go111
-rw-r--r--src/pkg/go/doc/exports.go2
-rw-r--r--src/pkg/go/doc/reader.go113
-rw-r--r--src/pkg/go/doc/synopsis.go41
-rw-r--r--src/pkg/go/doc/synopsis_test.go5
-rw-r--r--src/pkg/go/doc/testdata/a.0.golden11
-rw-r--r--src/pkg/go/doc/testdata/a.1.golden11
-rw-r--r--src/pkg/go/doc/testdata/a.2.golden11
-rw-r--r--src/pkg/go/doc/testdata/a0.go9
-rw-r--r--src/pkg/go/doc/testdata/a1.go4
-rw-r--r--src/pkg/go/doc/testdata/benchmark.go4
-rw-r--r--src/pkg/go/doc/testdata/e.go2
-rw-r--r--src/pkg/go/doc/testdata/error2.1.golden2
-rw-r--r--src/pkg/go/doc/testdata/error2.go2
-rw-r--r--src/pkg/go/doc/testdata/template.txt7
-rw-r--r--src/pkg/go/doc/testdata/testing.1.golden2
-rw-r--r--src/pkg/go/doc/testdata/testing.go4
-rw-r--r--src/pkg/go/format/format.go200
-rw-r--r--src/pkg/go/format/format_test.go125
-rw-r--r--src/pkg/go/parser/error_test.go10
-rw-r--r--src/pkg/go/parser/interface.go48
-rw-r--r--src/pkg/go/parser/parser.go249
-rw-r--r--src/pkg/go/parser/parser_test.go47
-rw-r--r--src/pkg/go/parser/performance_test.go30
-rw-r--r--src/pkg/go/parser/short_test.go13
-rw-r--r--src/pkg/go/printer/nodes.go164
-rw-r--r--src/pkg/go/printer/performance_test.go4
-rw-r--r--src/pkg/go/printer/printer.go92
-rw-r--r--src/pkg/go/printer/printer_test.go271
-rw-r--r--src/pkg/go/printer/testdata/comments.golden11
-rw-r--r--src/pkg/go/printer/testdata/comments.input10
-rw-r--r--src/pkg/go/printer/testdata/comments2.golden79
-rw-r--r--src/pkg/go/printer/testdata/comments2.input79
-rw-r--r--src/pkg/go/printer/testdata/declarations.golden54
-rw-r--r--src/pkg/go/printer/testdata/declarations.input54
-rw-r--r--src/pkg/go/printer/testdata/expressions.golden15
-rw-r--r--src/pkg/go/printer/testdata/expressions.input15
-rw-r--r--src/pkg/go/printer/testdata/expressions.raw15
-rw-r--r--src/pkg/go/printer/testdata/statements.golden108
-rw-r--r--src/pkg/go/printer/testdata/statements.input86
-rw-r--r--src/pkg/go/scanner/errors.go2
-rw-r--r--src/pkg/go/scanner/scanner.go39
-rw-r--r--src/pkg/go/scanner/scanner_test.go116
-rw-r--r--src/pkg/go/token/position.go22
-rw-r--r--src/pkg/go/token/position_test.go51
-rw-r--r--src/pkg/go/types/api.go105
-rw-r--r--src/pkg/go/types/builtins.go455
-rw-r--r--src/pkg/go/types/check.go507
-rw-r--r--src/pkg/go/types/check_test.go259
-rw-r--r--src/pkg/go/types/const.go718
-rw-r--r--src/pkg/go/types/conversions.go129
-rw-r--r--src/pkg/go/types/errors.go335
-rw-r--r--src/pkg/go/types/exportdata.go111
-rw-r--r--src/pkg/go/types/expr.go1520
-rw-r--r--src/pkg/go/types/gcimporter.go950
-rw-r--r--src/pkg/go/types/gcimporter_test.go180
-rw-r--r--src/pkg/go/types/objects.go186
-rw-r--r--src/pkg/go/types/operand.go411
-rw-r--r--src/pkg/go/types/predicates.go303
-rw-r--r--src/pkg/go/types/resolve.go197
-rw-r--r--src/pkg/go/types/resolver_test.go167
-rw-r--r--src/pkg/go/types/scope.go78
-rw-r--r--src/pkg/go/types/sizes.go162
-rw-r--r--src/pkg/go/types/stmt.go743
-rw-r--r--src/pkg/go/types/testdata/builtins.src401
-rw-r--r--src/pkg/go/types/testdata/const0.src215
-rw-r--r--src/pkg/go/types/testdata/conversions.src18
-rw-r--r--src/pkg/go/types/testdata/decls0.src187
-rw-r--r--src/pkg/go/types/testdata/decls1.src132
-rw-r--r--src/pkg/go/types/testdata/decls2a.src67
-rw-r--r--src/pkg/go/types/testdata/decls2b.src28
-rw-r--r--src/pkg/go/types/testdata/decls3.src253
-rw-r--r--src/pkg/go/types/testdata/exports.go89
-rw-r--r--src/pkg/go/types/testdata/expr0.src161
-rw-r--r--src/pkg/go/types/testdata/expr1.src7
-rw-r--r--src/pkg/go/types/testdata/expr2.src23
-rw-r--r--src/pkg/go/types/testdata/expr3.src463
-rw-r--r--src/pkg/go/types/testdata/stmt0.src288
-rw-r--r--src/pkg/go/types/types.go236
-rw-r--r--src/pkg/go/types/types_test.go171
-rw-r--r--src/pkg/go/types/universe.go146
101 files changed, 13855 insertions, 498 deletions
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go
index d2e75dc1c..bf533d1d2 100644
--- a/src/pkg/go/ast/ast.go
+++ b/src/pkg/go/ast/ast.go
@@ -407,6 +407,7 @@ type (
// A ChanType node represents a channel type.
ChanType struct {
Begin token.Pos // position of "chan" keyword or "<-" (whichever comes first)
+ Arrow token.Pos // position of "<-" (noPos if there is no "<-")
Dir ChanDir // channel direction
Value Expr // value type
}
@@ -554,7 +555,7 @@ type (
// A DeclStmt node represents a declaration in a statement list.
DeclStmt struct {
- Decl Decl
+ Decl Decl // *GenDecl with CONST, TYPE, or VAR token
}
// An EmptyStmt node represents an empty statement.
diff --git a/src/pkg/go/ast/commentmap.go b/src/pkg/go/ast/commentmap.go
new file mode 100644
index 000000000..252d460af
--- /dev/null
+++ b/src/pkg/go/ast/commentmap.go
@@ -0,0 +1,332 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "sort"
+)
+
+type byPos []*CommentGroup
+
+func (a byPos) Len() int { return len(a) }
+func (a byPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() }
+func (a byPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// sortComments sorts the list of comment groups in source order.
+//
+func sortComments(list []*CommentGroup) {
+ // TODO(gri): Does it make sense to check for sorted-ness
+ // first (because we know that sorted-ness is
+ // very likely)?
+ if orderedList := byPos(list); !sort.IsSorted(orderedList) {
+ sort.Sort(orderedList)
+ }
+}
+
+// A CommentMap maps an AST node to a list of comment groups
+// associated with it. See NewCommentMap for a description of
+// the association.
+//
+type CommentMap map[Node][]*CommentGroup
+
+func (cmap CommentMap) addComment(n Node, c *CommentGroup) {
+ list := cmap[n]
+ if len(list) == 0 {
+ list = []*CommentGroup{c}
+ } else {
+ list = append(list, c)
+ }
+ cmap[n] = list
+}
+
+type byInterval []Node
+
+func (a byInterval) Len() int { return len(a) }
+func (a byInterval) Less(i, j int) bool {
+ pi, pj := a[i].Pos(), a[j].Pos()
+ return pi < pj || pi == pj && a[i].End() > a[j].End()
+}
+func (a byInterval) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// nodeList returns the list of nodes of the AST n in source order.
+//
+func nodeList(n Node) []Node {
+ var list []Node
+ Inspect(n, func(n Node) bool {
+ // don't collect comments
+ switch n.(type) {
+ case nil, *CommentGroup, *Comment:
+ return false
+ }
+ list = append(list, n)
+ return true
+ })
+ // Note: The current implementation assumes that Inspect traverses the
+ // AST in depth-first and thus _source_ order. If AST traversal
+ // does not follow source order, the sorting call below will be
+ // required.
+ // sort.Sort(byInterval(list))
+ return list
+}
+
+// A commentListReader helps iterating through a list of comment groups.
+//
+type commentListReader struct {
+ fset *token.FileSet
+ list []*CommentGroup
+ index int
+ comment *CommentGroup // comment group at current index
+ pos, end token.Position // source interval of comment group at current index
+}
+
+func (r *commentListReader) eol() bool {
+ return r.index >= len(r.list)
+}
+
+func (r *commentListReader) next() {
+ if !r.eol() {
+ r.comment = r.list[r.index]
+ r.pos = r.fset.Position(r.comment.Pos())
+ r.end = r.fset.Position(r.comment.End())
+ r.index++
+ }
+}
+
+// A nodeStack keeps track of nested nodes.
+// A node lower on the stack lexically contains the nodes higher on the stack.
+//
+type nodeStack []Node
+
+// push pops all nodes that appear lexically before n
+// and then pushes n on the stack.
+//
+func (s *nodeStack) push(n Node) {
+ s.pop(n.Pos())
+ *s = append((*s), n)
+}
+
+// pop pops all nodes that appear lexically before pos
+// (i.e., whose lexical extent has ended before or at pos).
+// It returns the last node popped.
+//
+func (s *nodeStack) pop(pos token.Pos) (top Node) {
+ i := len(*s)
+ for i > 0 && (*s)[i-1].End() <= pos {
+ top = (*s)[i-1]
+ i--
+ }
+ *s = (*s)[0:i]
+ return top
+}
+
+// NewCommentMap creates a new comment map by associating comment groups
+// of the comments list with the nodes of the AST specified by node.
+//
+// A comment group g is associated with a node n if:
+//
+// - g starts on the same line as n ends
+// - g starts on the line immediately following n, and there is
+// at least one empty line after g and before the next node
+// - g starts before n and is not associated to the node before n
+// via the previous rules
+//
+// NewCommentMap tries to associate a comment group to the "largest"
+// node possible: For instance, if the comment is a line comment
+// trailing an assignment, the comment is associated with the entire
+// assignment rather than just the last operand in the assignment.
+//
+func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap {
+ if len(comments) == 0 {
+ return nil // no comments to map
+ }
+
+ cmap := make(CommentMap)
+
+ // set up comment reader r
+ tmp := make([]*CommentGroup, len(comments))
+ copy(tmp, comments) // don't change incomming comments
+ sortComments(tmp)
+ r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0
+ r.next()
+
+ // create node list in lexical order
+ nodes := nodeList(node)
+ nodes = append(nodes, nil) // append sentinel
+
+ // set up iteration variables
+ var (
+ p Node // previous node
+ pend token.Position // end of p
+ pg Node // previous node group (enclosing nodes of "importance")
+ pgend token.Position // end of pg
+ stack nodeStack // stack of node groups
+ )
+
+ for _, q := range nodes {
+ var qpos token.Position
+ if q != nil {
+ qpos = fset.Position(q.Pos()) // current node position
+ } else {
+ // set fake sentinel position to infinity so that
+ // all comments get processed before the sentinel
+ const infinity = 1 << 30
+ qpos.Offset = infinity
+ qpos.Line = infinity
+ }
+
+ // process comments before current node
+ for r.end.Offset <= qpos.Offset {
+ // determine recent node group
+ if top := stack.pop(r.comment.Pos()); top != nil {
+ pg = top
+ pgend = fset.Position(pg.End())
+ }
+ // Try to associate a comment first with a node group
+ // (i.e., a node of "importance" such as a declaration);
+ // if that fails, try to associate it with the most recent
+ // node.
+ // TODO(gri) try to simplify the logic below
+ var assoc Node
+ switch {
+ case pg != nil &&
+ (pgend.Line == r.pos.Line ||
+ pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line):
+ // 1) comment starts on same line as previous node group ends, or
+ // 2) comment starts on the line immediately after the
+ // previous node group and there is an empty line before
+ // the current node
+ // => associate comment with previous node group
+ assoc = pg
+ case p != nil &&
+ (pend.Line == r.pos.Line ||
+ pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line ||
+ q == nil):
+ // same rules apply as above for p rather than pg,
+ // but also associate with p if we are at the end (q == nil)
+ assoc = p
+ default:
+ // otherwise, associate comment with current node
+ if q == nil {
+ // we can only reach here if there was no p
+ // which would imply that there were no nodes
+ panic("internal error: no comments should be associated with sentinel")
+ }
+ assoc = q
+ }
+ cmap.addComment(assoc, r.comment)
+ if r.eol() {
+ return cmap
+ }
+ r.next()
+ }
+
+ // update previous node
+ p = q
+ pend = fset.Position(p.End())
+
+ // update previous node group if we see an "important" node
+ switch q.(type) {
+ case *File, *Field, Decl, Spec, Stmt:
+ stack.push(q)
+ }
+ }
+
+ return cmap
+}
+
+// Update replaces an old node in the comment map with the new node
+// and returns the new node. Comments that were associated with the
+// old node are associated with the new node.
+//
+func (cmap CommentMap) Update(old, new Node) Node {
+ if list := cmap[old]; len(list) > 0 {
+ delete(cmap, old)
+ cmap[new] = append(cmap[new], list...)
+ }
+ return new
+}
+
+// Filter returns a new comment map consisting of only those
+// entries of cmap for which a corresponding node exists in
+// the AST specified by node.
+//
+func (cmap CommentMap) Filter(node Node) CommentMap {
+ umap := make(CommentMap)
+ Inspect(node, func(n Node) bool {
+ if g := cmap[n]; len(g) > 0 {
+ umap[n] = g
+ }
+ return true
+ })
+ return umap
+}
+
+// Comments returns the list of comment groups in the comment map.
+// The result is sorted is source order.
+//
+func (cmap CommentMap) Comments() []*CommentGroup {
+ list := make([]*CommentGroup, 0, len(cmap))
+ for _, e := range cmap {
+ list = append(list, e...)
+ }
+ sortComments(list)
+ return list
+}
+
+func summary(list []*CommentGroup) string {
+ const maxLen = 40
+ var buf bytes.Buffer
+
+ // collect comments text
+loop:
+ for _, group := range list {
+ // Note: CommentGroup.Text() does too much work for what we
+ // need and would only replace this innermost loop.
+ // Just do it explicitly.
+ for _, comment := range group.List {
+ if buf.Len() >= maxLen {
+ break loop
+ }
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // truncate if too long
+ if buf.Len() > maxLen {
+ buf.Truncate(maxLen - 3)
+ buf.WriteString("...")
+ }
+
+ // replace any invisibles with blanks
+ bytes := buf.Bytes()
+ for i, b := range bytes {
+ switch b {
+ case '\t', '\n', '\r':
+ bytes[i] = ' '
+ }
+ }
+
+ return string(bytes)
+}
+
+func (cmap CommentMap) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "CommentMap {")
+ for node, comment := range cmap {
+ // print name of identifiers; print node type for other nodes
+ var s string
+ if ident, ok := node.(*Ident); ok {
+ s = ident.Name
+ } else {
+ s = fmt.Sprintf("%T", node)
+ }
+ fmt.Fprintf(&buf, "\t%p %20s: %s\n", node, s, summary(comment))
+ }
+ fmt.Fprintln(&buf, "}")
+ return buf.String()
+}
diff --git a/src/pkg/go/ast/commentmap_test.go b/src/pkg/go/ast/commentmap_test.go
new file mode 100644
index 000000000..e372eab74
--- /dev/null
+++ b/src/pkg/go/ast/commentmap_test.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To avoid a cyclic dependency with go/parser, this file is in a separate package.
+
+package ast_test
+
+import (
+ "bytes"
+ "fmt"
+ . "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "testing"
+)
+
+const src = `
+// the very first comment
+
+// package p
+package p /* the name is p */
+
+// imports
+import (
+ "bytes" // bytes
+ "fmt" // fmt
+ "go/ast"
+ "go/parser"
+)
+
+// T
+type T struct {
+ a, b, c int // associated with a, b, c
+ // associated with x, y
+ x, y float64 // float values
+ z complex128 // complex value
+}
+// also associated with T
+
+// x
+var x = 0 // x = 0
+// also associated with x
+
+// f1
+func f1() {
+ /* associated with s1 */
+ s1()
+ // also associated with s1
+
+ // associated with s2
+
+ // also associated with s2
+ s2() // line comment for s2
+}
+// associated with f1
+// also associated with f1
+
+// associated with f2
+
+// f2
+func f2() {
+}
+
+func f3() {
+ i := 1 /* 1 */ + 2 // addition
+ _ = i
+}
+
+// the very last comment
+`
+
+// res maps a key of the form "line number: node type"
+// to the associated comments' text.
+//
+var res = map[string]string{
+ " 5: *ast.File": "the very first comment\npackage p\n",
+ " 5: *ast.Ident": " the name is p\n",
+ " 8: *ast.GenDecl": "imports\n",
+ " 9: *ast.ImportSpec": "bytes\n",
+ "10: *ast.ImportSpec": "fmt\n",
+ "16: *ast.GenDecl": "T\nalso associated with T\n",
+ "17: *ast.Field": "associated with a, b, c\n",
+ "19: *ast.Field": "associated with x, y\nfloat values\n",
+ "20: *ast.Field": "complex value\n",
+ "25: *ast.GenDecl": "x\nx = 0\nalso associated with x\n",
+ "29: *ast.FuncDecl": "f1\nassociated with f1\nalso associated with f1\n",
+ "31: *ast.ExprStmt": " associated with s1\nalso associated with s1\n",
+ "37: *ast.ExprStmt": "associated with s2\nalso associated with s2\nline comment for s2\n",
+ "45: *ast.FuncDecl": "associated with f2\nf2\n",
+ "49: *ast.AssignStmt": "addition\n",
+ "49: *ast.BasicLit": " 1\n",
+ "50: *ast.Ident": "the very last comment\n",
+}
+
+func ctext(list []*CommentGroup) string {
+ var buf bytes.Buffer
+ for _, g := range list {
+ buf.WriteString(g.Text())
+ }
+ return buf.String()
+}
+
+func TestCommentMap(t *testing.T) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmap := NewCommentMap(fset, f, f.Comments)
+
+ // very correct association of comments
+ for n, list := range cmap {
+ key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
+ got := ctext(list)
+ want := res[key]
+ if got != want {
+ t.Errorf("%s: got %q; want %q", key, got, want)
+ }
+ }
+
+ // verify that no comments got lost
+ if n := len(cmap.Comments()); n != len(f.Comments) {
+ t.Errorf("got %d comment groups in map; want %d", n, len(f.Comments))
+ }
+
+ // support code to update test:
+ // set genMap to true to generate res map
+ const genMap = false
+ if genMap {
+ out := make([]string, 0, len(cmap))
+ for n, list := range cmap {
+ out = append(out, fmt.Sprintf("\t\"%2d: %T\":\t%q,", fset.Position(n.Pos()).Line, n, ctext(list)))
+ }
+ sort.Strings(out)
+ for _, s := range out {
+ fmt.Println(s)
+ }
+ }
+}
+
+// TODO(gri): add tests for Filter.
diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go
index 4a89b8909..4db5814cb 100644
--- a/src/pkg/go/ast/filter.go
+++ b/src/pkg/go/ast/filter.go
@@ -414,7 +414,7 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
if path := imp.Path.Value; !seen[path] {
// TODO: consider handling cases where:
// - 2 imports exist with the same import path but
- // have different local names (one should probably
+ // have different local names (one should probably
// keep both of them)
// - 2 imports exist but only one has a comment
// - 2 imports exist and they both have (possibly
diff --git a/src/pkg/go/ast/import.go b/src/pkg/go/ast/import.go
index 2d4f69aae..a68a4840f 100644
--- a/src/pkg/go/ast/import.go
+++ b/src/pkg/go/ast/import.go
@@ -20,7 +20,7 @@ func SortImports(fset *token.FileSet, f *File) {
break
}
- if d.Lparen == token.NoPos {
+ if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
diff --git a/src/pkg/go/ast/print.go b/src/pkg/go/ast/print.go
index 02cf9e022..4a1ce480f 100644
--- a/src/pkg/go/ast/print.go
+++ b/src/pkg/go/ast/print.go
@@ -34,7 +34,8 @@ func NotNilFilter(_ string, v reflect.Value) bool {
//
// A non-nil FieldFilter f may be provided to control the output:
// struct fields for which f(fieldname, fieldvalue) is true are
-// are printed; all others are filtered from the output.
+// are printed; all others are filtered from the output. Unexported
+// struct fields are never printed.
//
func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (err error) {
// setup printer
@@ -107,8 +108,10 @@ func (p *printer) Write(data []byte) (n int, err error) {
}
p.last = b
}
- m, err = p.output.Write(data[n:])
- n += m
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
return
}
@@ -145,15 +148,18 @@ func (p *printer) print(x reflect.Value) {
p.print(x.Elem())
case reflect.Map:
- p.printf("%s (len = %d) {\n", x.Type(), x.Len())
- p.indent++
- for _, key := range x.MapKeys() {
- p.print(key)
- p.printf(": ")
- p.print(x.MapIndex(key))
+ p.printf("%s (len = %d) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
p.printf("\n")
+ for _, key := range x.MapKeys() {
+ p.print(key)
+ p.printf(": ")
+ p.print(x.MapIndex(key))
+ p.printf("\n")
+ }
+ p.indent--
}
- p.indent--
p.printf("}")
case reflect.Ptr:
@@ -169,32 +175,57 @@ func (p *printer) print(x reflect.Value) {
p.print(x.Elem())
}
+ case reflect.Array:
+ p.printf("%s {", x.Type())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.print(x.Index(i))
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
case reflect.Slice:
if s, ok := x.Interface().([]byte); ok {
p.printf("%#q", s)
return
}
- p.printf("%s (len = %d) {\n", x.Type(), x.Len())
- p.indent++
- for i, n := 0, x.Len(); i < n; i++ {
- p.printf("%d: ", i)
- p.print(x.Index(i))
+ p.printf("%s (len = %d) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.print(x.Index(i))
+ p.printf("\n")
+ }
+ p.indent--
}
- p.indent--
p.printf("}")
case reflect.Struct:
- p.printf("%s {\n", x.Type())
- p.indent++
t := x.Type()
+ p.printf("%s {", t)
+ p.indent++
+ first := true
for i, n := 0, t.NumField(); i < n; i++ {
- name := t.Field(i).Name
- value := x.Field(i)
- if p.filter == nil || p.filter(name, value) {
- p.printf("%s: ", name)
- p.print(value)
- p.printf("\n")
+ // exclude non-exported fields because their
+ // values cannot be accessed via reflection
+ if name := t.Field(i).Name; IsExported(name) {
+ value := x.Field(i)
+ if p.filter == nil || p.filter(name, value) {
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.print(value)
+ p.printf("\n")
+ }
}
}
p.indent--
diff --git a/src/pkg/go/ast/print_test.go b/src/pkg/go/ast/print_test.go
index 71c028e75..210f16430 100644
--- a/src/pkg/go/ast/print_test.go
+++ b/src/pkg/go/ast/print_test.go
@@ -23,6 +23,7 @@ var tests = []struct {
{"foobar", "0 \"foobar\""},
// maps
+ {map[Expr]string{}, `0 map[ast.Expr]string (len = 0) {}`},
{map[string]int{"a": 1},
`0 map[string]int (len = 1) {
1 . "a": 1
@@ -31,7 +32,21 @@ var tests = []struct {
// pointers
{new(int), "0 *0"},
+ // arrays
+ {[0]int{}, `0 [0]int {}`},
+ {[3]int{1, 2, 3},
+ `0 [3]int {
+ 1 . 0: 1
+ 2 . 1: 2
+ 3 . 2: 3
+ 4 }`},
+ {[...]int{42},
+ `0 [1]int {
+ 1 . 0: 42
+ 2 }`},
+
// slices
+ {[]int{}, `0 []int (len = 0) {}`},
{[]int{1, 2, 3},
`0 []int (len = 3) {
1 . 0: 1
@@ -40,6 +55,12 @@ var tests = []struct {
4 }`},
// structs
+ {struct{}{}, `0 struct {} {}`},
+ {struct{ x int }{007}, `0 struct { x int } {}`},
+ {struct{ X, y int }{42, 991},
+ `0 struct { X int; y int } {
+ 1 . X: 42
+ 2 }`},
{struct{ X, Y int }{42, 991},
`0 struct { X int; Y int } {
1 . X: 42
diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go
index 908e61c5d..0406bfc58 100644
--- a/src/pkg/go/ast/resolve.go
+++ b/src/pkg/go/ast/resolve.go
@@ -57,7 +57,7 @@ func resolve(scope *Scope, ident *Ident) bool {
// An Importer must determine the canonical import path and
// check the map to see if it is already present in the imports map.
// If so, the Importer can return the map entry. Otherwise, the
-// Importer should load the package data for the given path into
+// Importer should load the package data for the given path into
// a new *Object (pkg), record pkg in the imports map, and then
// return pkg.
type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
@@ -136,7 +136,7 @@ func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer,
for _, obj := range pkg.Data.(*Scope).Objects {
p.declare(fileScope, pkgScope, obj)
}
- } else {
+ } else if name != "_" {
// declare imported package object in file scope
// (do not re-use pkg in the file scope but create
// a new object instead; the Decl field is different
diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go
index 11e6b13f1..8df5b2c65 100644
--- a/src/pkg/go/ast/scope.go
+++ b/src/pkg/go/ast/scope.go
@@ -64,18 +64,16 @@ func (s *Scope) String() string {
// ----------------------------------------------------------------------------
// Objects
-// TODO(gri) Consider replacing the Object struct with an interface
-// and a corresponding set of object implementations.
-
// An Object describes a named language entity such as a package,
// constant, type, variable, function (incl. methods), or label.
//
// The Data fields contains object-specific data:
//
-// Kind Data type Data value
-// Pkg *Scope package scope
-// Con int iota for the respective declaration
-// Con != nil constant value
+// Kind Data type Data value
+// Pkg *types.Package package scope
+// Con int iota for the respective declaration
+// Con != nil constant value
+// Typ *Scope (used as method scope during type checking - transient)
//
type Object struct {
Kind ObjKind
@@ -137,7 +135,7 @@ func (obj *Object) Pos() token.Pos {
return token.NoPos
}
-// ObKind describes what an object represents.
+// ObjKind describes what an object represents.
type ObjKind int
// The list of possible Object kinds.
diff --git a/src/pkg/go/ast/walk.go b/src/pkg/go/ast/walk.go
index 181cfd149..fef2503c3 100644
--- a/src/pkg/go/ast/walk.go
+++ b/src/pkg/go/ast/walk.go
@@ -158,7 +158,9 @@ func Walk(v Visitor, node Node) {
Walk(v, n.Fields)
case *FuncType:
- Walk(v, n.Params)
+ if n.Params != nil {
+ Walk(v, n.Params)
+ }
if n.Results != nil {
Walk(v, n.Results)
}
@@ -344,9 +346,6 @@ func Walk(v Visitor, node Node) {
}
Walk(v, n.Name)
walkDeclList(v, n.Decls)
- for _, g := range n.Comments {
- Walk(v, g)
- }
// don't walk n.Comments - they have been
// visited already through the individual
// nodes
diff --git a/src/pkg/go/build/build.go b/src/pkg/go/build/build.go
index 7a81d5030..16c3da458 100644
--- a/src/pkg/go/build/build.go
+++ b/src/pkg/go/build/build.go
@@ -33,6 +33,7 @@ type Context struct {
GOPATH string // Go path
CgoEnabled bool // whether cgo can be used
BuildTags []string // additional tags to recognize in +build lines
+ InstallTag string // package install directory suffix
UseAllFiles bool // use files regardless of +build lines, file names
Compiler string // compiler to assume when computing target paths
@@ -116,12 +117,27 @@ func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
return f(root, dir)
}
- if p, err := filepath.EvalSymlinks(root); err == nil {
- root = p
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
}
- if p, err := filepath.EvalSymlinks(dir); err == nil {
- dir = p
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
}
+ return hasSubdir(rootSym, dirSym)
+}
+
+func hasSubdir(root, dir string) (rel string, ok bool) {
const sep = string(filepath.Separator)
root = filepath.Clean(root)
if !strings.HasSuffix(root, sep) {
@@ -180,6 +196,21 @@ func (ctxt *Context) gopath() []string {
// Do not get confused by this common mistake.
continue
}
+ if strings.Contains(p, "~") && runtime.GOOS != "windows" {
+ // Path segments containing ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
all = append(all, p)
}
return all
@@ -213,10 +244,16 @@ var Default Context = defaultContext()
var cgoEnabled = map[string]bool{
"darwin/386": true,
"darwin/amd64": true,
- "linux/386": true,
- "linux/amd64": true,
"freebsd/386": true,
"freebsd/amd64": true,
+ "linux/386": true,
+ "linux/amd64": true,
+ "linux/arm": true,
+ "netbsd/386": true,
+ "netbsd/amd64": true,
+ "netbsd/arm": true,
+ "openbsd/386": true,
+ "openbsd/amd64": true,
"windows/386": true,
"windows/amd64": true,
}
@@ -278,12 +315,15 @@ type Package struct {
PkgObj string // installed .a file
// Source files
- GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
- CgoFiles []string // .go source files that import "C"
- CFiles []string // .c source files
- HFiles []string // .h source files
- SFiles []string // .s source files
- SysoFiles []string // .syso system object files to add to archive
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ IgnoredGoFiles []string // .go source files ignored for this build
+ CFiles []string // .c source files
+ HFiles []string // .h source files
+ SFiles []string // .s source files
+ SysoFiles []string // .syso system object files to add to archive
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
// Cgo directives
CgoPkgConfig []string // Cgo pkg-config directives
@@ -346,6 +386,9 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
p := &Package{
ImportPath: path,
}
+ if path == "" {
+ return p, fmt.Errorf("import %q: invalid import path", path)
+ }
var pkga string
var pkgerr error
@@ -354,7 +397,11 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
dir, elem := pathpkg.Split(p.ImportPath)
pkga = "pkg/gccgo/" + dir + "lib" + elem + ".a"
case "gc":
- pkga = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + "/" + p.ImportPath + ".a"
+ tag := ""
+ if ctxt.InstallTag != "" {
+ tag = "_" + ctxt.InstallTag
+ }
+ pkga = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + tag + "/" + p.ImportPath + ".a"
default:
// Save error for end of function.
pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
@@ -410,6 +457,13 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
if strings.HasPrefix(path, "/") {
return p, fmt.Errorf("import %q: cannot import absolute path", path)
}
+
+ // tried records the location of unsucsessful package lookups
+ var tried struct {
+ goroot string
+ gopath []string
+ }
+
// Determine directory from import path.
if ctxt.GOROOT != "" {
dir := ctxt.joinPath(ctxt.GOROOT, "src", "pkg", path)
@@ -421,6 +475,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
p.Root = ctxt.GOROOT
goto Found
}
+ tried.goroot = dir
}
for _, root := range ctxt.gopath() {
dir := ctxt.joinPath(root, "src", path)
@@ -431,8 +486,28 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
p.Root = root
goto Found
}
+ tried.gopath = append(tried.gopath, dir)
+ }
+
+ // package was not found
+ var paths []string
+ if tried.goroot != "" {
+ paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
+ } else {
+ paths = append(paths, "\t($GOROOT not set)")
}
- return p, fmt.Errorf("import %q: cannot find package", path)
+ var i int
+ var format = "\t%s (from $GOPATH)"
+ for ; i < len(tried.gopath); i++ {
+ if i > 0 {
+ format = "\t%s"
+ }
+ paths = append(paths, fmt.Sprintf(format, tried.gopath[i]))
+ }
+ if i == 0 {
+ paths = append(paths, "\t($GOPATH not set)")
+ }
+ return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
}
Found:
@@ -476,17 +551,22 @@ Found:
strings.HasPrefix(name, ".") {
continue
}
- if !ctxt.UseAllFiles && !ctxt.goodOSArchFile(name) {
- continue
- }
i := strings.LastIndex(name, ".")
if i < 0 {
i = len(name)
}
ext := name[i:]
+
+ if !ctxt.UseAllFiles && !ctxt.goodOSArchFile(name) {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
+ continue
+ }
+
switch ext {
- case ".go", ".c", ".s", ".h", ".S":
+ case ".go", ".c", ".s", ".h", ".S", ".swig", ".swigcxx":
// tentatively okay - read to make sure
case ".syso":
// binary objects to add to package archive
@@ -504,7 +584,13 @@ Found:
if err != nil {
return p, err
}
- data, err := ioutil.ReadAll(f)
+
+ var data []byte
+ if strings.HasSuffix(filename, ".go") {
+ data, err = readImports(f, false)
+ } else {
+ data, err = readComments(f)
+ }
f.Close()
if err != nil {
return p, fmt.Errorf("read %s: %v", filename, err)
@@ -512,6 +598,9 @@ Found:
// Look for +build comments to accept or reject the file.
if !ctxt.UseAllFiles && !ctxt.shouldBuild(data) {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
continue
}
@@ -529,6 +618,12 @@ Found:
case ".S":
Sfiles = append(Sfiles, name)
continue
+ case ".swig":
+ p.SwigFiles = append(p.SwigFiles, name)
+ continue
+ case ".swigcxx":
+ p.SwigCXXFiles = append(p.SwigCXXFiles, name)
+ continue
}
pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
@@ -536,8 +631,9 @@ Found:
return p, err
}
- pkg := string(pf.Name.Name)
+ pkg := pf.Name.Name
if pkg == "documentation" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
continue
}
@@ -570,7 +666,7 @@ Found:
if !ok {
continue
}
- quoted := string(spec.Path.Value)
+ quoted := spec.Path.Value
path, err := strconv.Unquote(quoted)
if err != nil {
log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
@@ -678,7 +774,7 @@ func (ctxt *Context) shouldBuild(content []byte) bool {
}
line = bytes.TrimSpace(line)
if len(line) == 0 { // Blank line
- end = cap(content) - cap(line) // &line[0] - &content[0]
+ end = len(content) - len(p)
continue
}
if !bytes.HasPrefix(line, slashslash) { // Not comment line
@@ -872,6 +968,8 @@ func splitQuoted(s string) (r []string, err error) {
// $GOARCH
// cgo (if cgo is enabled)
// !cgo (if cgo is disabled)
+// ctxt.Compiler
+// !ctxt.Compiler
// tag (if tag is listed in ctxt.BuildTags)
// !tag (if tag is not listed in ctxt.BuildTags)
// a comma-separated list of any of these
@@ -903,7 +1001,7 @@ func (ctxt *Context) match(name string) bool {
if ctxt.CgoEnabled && name == "cgo" {
return true
}
- if name == ctxt.GOOS || name == ctxt.GOARCH {
+ if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
return true
}
diff --git a/src/pkg/go/build/build_test.go b/src/pkg/go/build/build_test.go
index 560ebad5c..d8cf98840 100644
--- a/src/pkg/go/build/build_test.go
+++ b/src/pkg/go/build/build_test.go
@@ -61,6 +61,19 @@ func TestDotSlashImport(t *testing.T) {
}
}
+func TestEmptyImport(t *testing.T) {
+ p, err := Import("", Default.GOROOT, FindOnly)
+ if err == nil {
+ t.Fatal(`Import("") returned nil error.`)
+ }
+ if p == nil {
+ t.Fatal(`Import("") returned nil package.`)
+ }
+ if p.ImportPath != "" {
+ t.Fatalf("ImportPath=%q, want %q.", p.ImportPath, "")
+ }
+}
+
func TestLocalDirectory(t *testing.T) {
cwd, err := os.Getwd()
if err != nil {
@@ -75,3 +88,32 @@ func TestLocalDirectory(t *testing.T) {
t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "go/build")
}
}
+
+func TestShouldBuild(t *testing.T) {
+ const file1 = "// +build tag1\n\n" +
+ "package main\n"
+
+ const file2 = "// +build cgo\n\n" +
+ "// This package implements parsing of tags like\n" +
+ "// +build tag1\n" +
+ "package build"
+
+ const file3 = "// Copyright The Go Authors.\n\n" +
+ "package build\n\n" +
+ "// shouldBuild checks tags given by lines of the form\n" +
+ "// +build tag\n" +
+ "func shouldBuild(content []byte)\n"
+
+ ctx := &Context{BuildTags: []string{"tag1"}}
+ if !ctx.shouldBuild([]byte(file1)) {
+ t.Errorf("should not build file1, expected the contrary")
+ }
+ if ctx.shouldBuild([]byte(file2)) {
+ t.Errorf("should build file2, expected the contrary")
+ }
+
+ ctx = &Context{BuildTags: nil}
+ if !ctx.shouldBuild([]byte(file3)) {
+ t.Errorf("should not build file3, expected the contrary")
+ }
+}
diff --git a/src/pkg/go/build/deps_test.go b/src/pkg/go/build/deps_test.go
index 4e9f32a03..9a715ba60 100644
--- a/src/pkg/go/build/deps_test.go
+++ b/src/pkg/go/build/deps_test.go
@@ -5,10 +5,9 @@
// This file exercises the import parser but also checks that
// some low-level packages do not have new dependencies added.
-package build_test
+package build
import (
- "go/build"
"sort"
"testing"
)
@@ -24,13 +23,13 @@ import (
// be used as dependencies by other rules.
//
// DO NOT CHANGE THIS DATA TO FIX BUILDS.
-//
+//
var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages.
"errors": {},
"io": {"errors", "sync"},
"runtime": {"unsafe"},
- "sync": {"sync/atomic"},
+ "sync": {"sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
@@ -142,7 +141,7 @@ var pkgDeps = map[string][]string{
// Packages used by testing must be low-level (L2+fmt).
"regexp": {"L2", "regexp/syntax"},
"regexp/syntax": {"L2"},
- "runtime/debug": {"L2", "fmt", "io/ioutil", "os"},
+ "runtime/debug": {"L2", "fmt", "io/ioutil", "os", "time"},
"runtime/pprof": {"L2", "fmt", "text/tabwriter"},
"text/tabwriter": {"L2"},
@@ -177,7 +176,7 @@ var pkgDeps = map[string][]string{
},
// One of a kind.
- "archive/tar": {"L4", "OS"},
+ "archive/tar": {"L4", "OS", "syscall"},
"archive/zip": {"L4", "OS", "compress/flate"},
"compress/bzip2": {"L4"},
"compress/flate": {"L4"},
@@ -249,18 +248,23 @@ var pkgDeps = map[string][]string{
"net/mail": {"L4", "NET", "OS"},
"net/textproto": {"L4", "OS", "net"},
+ // Support libraries for crypto that aren't L2.
+ "CRYPTO-SUPPORT": {
+ "crypto/subtle",
+ },
+
// Core crypto.
"crypto/aes": {"L3"},
"crypto/des": {"L3"},
- "crypto/hmac": {"L3"},
+ "crypto/hmac": {"L3", "CRYPTO-SUPPORT"},
"crypto/md5": {"L3"},
"crypto/rc4": {"L3"},
"crypto/sha1": {"L3"},
"crypto/sha256": {"L3"},
"crypto/sha512": {"L3"},
- "crypto/subtle": {"L3"},
"CRYPTO": {
+ "CRYPTO-SUPPORT",
"crypto/aes",
"crypto/des",
"crypto/hmac",
@@ -269,7 +273,6 @@ var pkgDeps = map[string][]string{
"crypto/sha1",
"crypto/sha256",
"crypto/sha512",
- "crypto/subtle",
},
// Random byte, number generation.
@@ -300,7 +303,10 @@ var pkgDeps = map[string][]string{
"L4", "CRYPTO-MATH", "CGO", "OS",
"crypto/x509", "encoding/pem", "net", "syscall",
},
- "crypto/x509": {"L4", "CRYPTO-MATH", "OS", "CGO", "crypto/x509/pkix", "encoding/pem", "syscall"},
+ "crypto/x509": {
+ "L4", "CRYPTO-MATH", "OS", "CGO",
+ "crypto/x509/pkix", "encoding/pem", "encoding/hex", "net", "syscall",
+ },
"crypto/x509/pkix": {"L4", "CRYPTO-MATH"},
// Simple net+crypto-aware packages.
@@ -375,7 +381,7 @@ func TestDependencies(t *testing.T) {
}
sort.Strings(all)
- ctxt := build.Default
+ ctxt := Default
test := func(mustImport bool) {
for _, pkg := range all {
if isMacro(pkg) {
diff --git a/src/pkg/go/build/doc.go b/src/pkg/go/build/doc.go
index 67c26ac7f..c562d05d0 100644
--- a/src/pkg/go/build/doc.go
+++ b/src/pkg/go/build/doc.go
@@ -23,12 +23,12 @@
// As in the Go tree, each target operating system and
// architecture pair has its own subdirectory of pkg
// (pkg/GOOS_GOARCH).
-//
+//
// If DIR is a directory listed in the Go path, a package with
// source in DIR/src/foo/bar can be imported as "foo/bar" and
// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
-//
+//
// The bin/ directory holds compiled commands.
// Each command is named for its source directory, but only
// using the final element, not the entire path. That is, the
@@ -36,11 +36,11 @@
// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
// so that you can add DIR/bin to your PATH to get at the
// installed commands.
-//
+//
// Here's an example directory layout:
-//
+//
// GOPATH=/home/user/gocode
-//
+//
// /home/user/gocode/
// src/
// foo/
@@ -60,9 +60,12 @@
// A build constraint is a line comment beginning with the directive +build
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
-// they must be appear near the top of the file, preceded
+// they must appear near the top of the file, preceded
// only by blank lines and other line comments.
//
+// To distinguish build constraints from package documentation, a series of
+// build constraints must be followed by a blank line.
+//
// A build constraint is evaluated as the OR of space-separated options;
// each option evaluates as the AND of its comma-separated terms;
// and each term is an alphanumeric word or, preceded by !, its negation.
@@ -74,10 +77,21 @@
//
// (linux AND 386) OR (darwin AND (NOT cgo))
//
+// A file may have multiple build constraints. The overall constraint is the AND
+// of the individual constraints. That is, the build constraints:
+//
+// // +build linux darwin
+// // +build 386
+//
+// corresponds to the boolean formula:
+//
+// (linux OR darwin) AND 386
+//
// During a particular build, the following words are satisfied:
//
// - the target operating system, as spelled by runtime.GOOS
// - the target architecture, as spelled by runtime.GOARCH
+// - the compiler being used, currently either "gc" or "gccgo"
// - "cgo", if ctxt.CgoEnabled is true
// - any additional words listed in ctxt.BuildTags
//
@@ -100,7 +114,7 @@
// default functionality for other systems, which in this case would
// carry the constraint:
//
-// // +build !linux !darwin !cgo
+// // +build !linux,!darwin !cgo
//
// Naming a file dns_windows.go will cause it to be included only when
// building the package for Windows; similarly, math_386.s will be included
diff --git a/src/pkg/go/build/read.go b/src/pkg/go/build/read.go
new file mode 100644
index 000000000..c8079dfd1
--- /dev/null
+++ b/src/pkg/go/build/read.go
@@ -0,0 +1,238 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "bufio"
+ "errors"
+ "io"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= 0x80
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '`':
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// readComments is like ioutil.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+ r := &importReader{b: bufio.NewReader(f)}
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// readImports is like ioutil.ReadAll, except that it expects a Go file as input
+// and stops reading the input once the imports have completed.
+func readImports(f io.Reader, reportSyntaxError bool) ([]byte, error) {
+ r := &importReader{b: bufio.NewReader(f)}
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ return r.buf[:len(r.buf)-1], nil
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax && !reportSyntaxError {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ }
+
+ return r.buf, r.err
+}
diff --git a/src/pkg/go/build/read_test.go b/src/pkg/go/build/read_test.go
new file mode 100644
index 000000000..2dcc1208f
--- /dev/null
+++ b/src/pkg/go/build/read_test.go
@@ -0,0 +1,226 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "io"
+ "strings"
+ "testing"
+)
+
+const quote = "`"
+
+type readTest struct {
+ // Test input contains ℙ where readImports should stop.
+ in string
+ err string
+}
+
+var readImportsTests = []readTest{
+ {
+ `package p`,
+ "",
+ },
+ {
+ `package p; import "x"`,
+ "",
+ },
+ {
+ `package p; import . "x"`,
+ "",
+ },
+ {
+ `package p; import "x";ℙvar x = 1`,
+ "",
+ },
+ {
+ `package p
+
+ // comment
+
+ import "x"
+ import _ "x"
+ import a "x"
+
+ /* comment */
+
+ import (
+ "x" /* comment */
+ _ "x"
+ a "x" // comment
+ ` + quote + `x` + quote + `
+ _ /*comment*/ ` + quote + `x` + quote + `
+ a ` + quote + `x` + quote + `
+ )
+ import (
+ )
+ import ()
+ import()import()import()
+ import();import();import()
+
+ ℙvar x = 1
+ `,
+ "",
+ },
+}
+
+var readCommentsTests = []readTest{
+ {
+ `ℙpackage p`,
+ "",
+ },
+ {
+ `ℙpackage p; import "x"`,
+ "",
+ },
+ {
+ `ℙpackage p; import . "x"`,
+ "",
+ },
+ {
+ `// foo
+
+ /* bar */
+
+ /* quux */ // baz
+
+ /*/ zot */
+
+ // asdf
+ ℙHello, world`,
+ "",
+ },
+}
+
+func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) {
+ for i, tt := range tests {
+ var in, testOut string
+ j := strings.Index(tt.in, "ℙ")
+ if j < 0 {
+ in = tt.in
+ testOut = tt.in
+ } else {
+ in = tt.in[:j] + tt.in[j+len("ℙ"):]
+ testOut = tt.in[:j]
+ }
+ r := strings.NewReader(in)
+ buf, err := read(r)
+ if err != nil {
+ if tt.err == "" {
+ t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf))
+ continue
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("#%d: err=%q, expected %q", i, err, tt.err)
+ continue
+ }
+ continue
+ }
+ if err == nil && tt.err != "" {
+ t.Errorf("#%d: success, expected %q", i, tt.err)
+ continue
+ }
+
+ out := string(buf)
+ if out != testOut {
+ t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut)
+ }
+ }
+}
+
+func TestReadImports(t *testing.T) {
+ testRead(t, readImportsTests, func(r io.Reader) ([]byte, error) { return readImports(r, true) })
+}
+
+func TestReadComments(t *testing.T) {
+ testRead(t, readCommentsTests, readComments)
+}
+
+var readFailuresTests = []readTest{
+ {
+ `package`,
+ "syntax error",
+ },
+ {
+ "package p\n\x00\nimport `math`\n",
+ "unexpected NUL in input",
+ },
+ {
+ `package p; import`,
+ "syntax error",
+ },
+ {
+ `package p; import "`,
+ "syntax error",
+ },
+ {
+ "package p; import ` \n\n",
+ "syntax error",
+ },
+ {
+ `package p; import "x`,
+ "syntax error",
+ },
+ {
+ `package p; import _`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "x`,
+ "syntax error",
+ },
+ {
+ `package p; import .`,
+ "syntax error",
+ },
+ {
+ `package p; import . "`,
+ "syntax error",
+ },
+ {
+ `package p; import . "x`,
+ "syntax error",
+ },
+ {
+ `package p; import (`,
+ "syntax error",
+ },
+ {
+ `package p; import ("`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x"`,
+ "syntax error",
+ },
+}
+
+func TestReadFailures(t *testing.T) {
+ // Errors should be reported (true arg to readImports).
+ testRead(t, readFailuresTests, func(r io.Reader) ([]byte, error) { return readImports(r, true) })
+}
+
+func TestReadFailuresIgnored(t *testing.T) {
+ // Syntax errors should not be reported (false arg to readImports).
+ // Instead, entire file should be the output and no error.
+ // Convert tests not to return syntax errors.
+ tests := make([]readTest, len(readFailuresTests))
+ copy(tests, readFailuresTests)
+ for i := range tests {
+ tt := &tests[i]
+ if !strings.Contains(tt.err, "NUL") {
+ tt.err = ""
+ }
+ }
+ testRead(t, tests, func(r io.Reader) ([]byte, error) { return readImports(r, false) })
+}
diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go
index 6f0edd4ba..c4b7e6ae6 100644
--- a/src/pkg/go/doc/comment.go
+++ b/src/pkg/go/doc/comment.go
@@ -174,7 +174,7 @@ func unindent(block []string) {
}
// heading returns the trimmed line if it passes as a section heading;
-// otherwise it returns the empty string.
+// otherwise it returns the empty string.
func heading(line string) string {
line = strings.TrimSpace(line)
if len(line) == 0 {
@@ -229,7 +229,8 @@ type block struct {
var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
func anchorID(line string) string {
- return nonAlphaNumRx.ReplaceAllString(line, "_")
+ // Add a "hdr-" prefix to avoid conflicting with IDs used for package symbols.
+ return "hdr-" + nonAlphaNumRx.ReplaceAllString(line, "_")
}
// ToHTML converts comment text to formatted HTML.
diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go
index 9c606315d..65b1b83eb 100644
--- a/src/pkg/go/doc/doc.go
+++ b/src/pkg/go/doc/doc.go
@@ -17,7 +17,16 @@ type Package struct {
ImportPath string
Imports []string
Filenames []string
- Bugs []string
+ // DEPRECATED. For backward compatibility Bugs is still populated,
+ // but all new code should use Notes instead.
+ Bugs []string
+
+ // Notes such as TODO(userid): or SECURITY(userid):
+ // along the lines of BUG(userid). Any marker with 2 or more upper
+ // case [A-Z] letters is recognised.
+ // BUG is explicitly not included in these notes but will
+ // be in a subsequent change when the Bugs field above is removed.
+ Notes map[string][]string
// declarations
Consts []*Value
@@ -89,6 +98,7 @@ func New(pkg *ast.Package, importPath string, mode Mode) *Package {
Imports: sortedKeys(r.imports),
Filenames: r.filenames,
Bugs: r.bugs,
+ Notes: r.notes,
Consts: sortedValues(r.values, token.CONST),
Types: sortedTypes(r.types, mode&AllMethods != 0),
Vars: sortedValues(r.values, token.VAR),
diff --git a/src/pkg/go/doc/doc_test.go b/src/pkg/go/doc/doc_test.go
index f957ede4a..8043038b4 100644
--- a/src/pkg/go/doc/doc_test.go
+++ b/src/pkg/go/doc/doc_test.go
@@ -123,7 +123,7 @@ func test(t *testing.T, mode Mode) {
}
// compare
- if bytes.Compare(got, want) != 0 {
+ if !bytes.Equal(got, want) {
t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
}
}
diff --git a/src/pkg/go/doc/example.go b/src/pkg/go/doc/example.go
index a7e0e250a..693ad5b94 100644
--- a/src/pkg/go/doc/example.go
+++ b/src/pkg/go/doc/example.go
@@ -9,21 +9,29 @@ package doc
import (
"go/ast"
"go/token"
+ "path"
"regexp"
"sort"
+ "strconv"
"strings"
"unicode"
"unicode/utf8"
)
+// An Example represents an example function found in a source files.
type Example struct {
- Name string // name of the item being exemplified
- Doc string // example function doc string
- Code ast.Node
- Comments []*ast.CommentGroup
- Output string // expected output
+ Name string // name of the item being exemplified
+ Doc string // example function doc string
+ Code ast.Node
+ Play *ast.File // a whole program version of the example
+ Comments []*ast.CommentGroup
+ Output string // expected output
+ EmptyOutput bool // expect empty output
+ Order int // original source code order
}
+// Examples returns the examples found in the files, sorted by Name field.
+// The Order fields record the order in which the examples were encountered.
func Examples(files ...*ast.File) []*Example {
var list []*Example
for _, file := range files {
@@ -52,12 +60,16 @@ func Examples(files ...*ast.File) []*Example {
if f.Doc != nil {
doc = f.Doc.Text()
}
+ output, hasOutput := exampleOutput(f.Body, file.Comments)
flist = append(flist, &Example{
- Name: name[len("Example"):],
- Doc: doc,
- Code: f.Body,
- Comments: file.Comments,
- Output: exampleOutput(f, file.Comments),
+ Name: name[len("Example"):],
+ Doc: doc,
+ Code: f.Body,
+ Play: playExample(file, f.Body),
+ Comments: file.Comments,
+ Output: output,
+ EmptyOutput: output == "" && hasOutput,
+ Order: len(flist),
})
}
if !hasTests && numDecl > 1 && len(flist) == 1 {
@@ -65,6 +77,7 @@ func Examples(files ...*ast.File) []*Example {
// other top-level declarations, and no tests or
// benchmarks, use the whole file as the example.
flist[0].Code = file
+ flist[0].Play = playExampleFile(file)
}
list = append(list, flist...)
}
@@ -74,26 +87,22 @@ func Examples(files ...*ast.File) []*Example {
var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*output:`)
-func exampleOutput(fun *ast.FuncDecl, comments []*ast.CommentGroup) string {
- // find the last comment in the function
- var last *ast.CommentGroup
- for _, cg := range comments {
- if cg.Pos() < fun.Pos() {
- continue
- }
- if cg.End() > fun.End() {
- break
- }
- last = cg
- }
- if last != nil {
+// Extracts the expected output and whether there was a valid output comment
+func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, ok bool) {
+ if _, last := lastComment(b, comments); last != nil {
// test that it begins with the correct prefix
text := last.Text()
if loc := outputPrefix.FindStringIndex(text); loc != nil {
- return strings.TrimSpace(text[loc[1]:])
+ text = text[loc[1]:]
+ // Strip zero or more spaces followed by \n or a single space.
+ text = strings.TrimLeft(text, " ")
+ if len(text) > 0 && text[0] == '\n' {
+ text = text[1:]
+ }
+ return text, true
}
}
- return "" // no suitable comment found
+ return "", false // no suitable comment found
}
// isTest tells whether name looks like a test, example, or benchmark.
@@ -115,3 +124,214 @@ type exampleByName []*Example
func (s exampleByName) Len() int { return len(s) }
func (s exampleByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s exampleByName) Less(i, j int) bool { return s[i].Name < s[j].Name }
+
+// playExample synthesizes a new *ast.File based on the provided
+// file with the provided function body as the body of main.
+func playExample(file *ast.File, body *ast.BlockStmt) *ast.File {
+ if !strings.HasSuffix(file.Name.Name, "_test") {
+ // We don't support examples that are part of the
+ // greater package (yet).
+ return nil
+ }
+
+ // Find top-level declarations in the file.
+ topDecls := make(map[*ast.Object]bool)
+ for _, decl := range file.Decls {
+ switch d := decl.(type) {
+ case *ast.FuncDecl:
+ topDecls[d.Name.Obj] = true
+ case *ast.GenDecl:
+ for _, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ topDecls[s.Name.Obj] = true
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ topDecls[id.Obj] = true
+ }
+ }
+ }
+ }
+ }
+
+ // Find unresolved identifiers and uses of top-level declarations.
+ unresolved := make(map[string]bool)
+ usesTopDecl := false
+ var inspectFunc func(ast.Node) bool
+ inspectFunc = func(n ast.Node) bool {
+ // For selector expressions, only inspect the left hand side.
+ // (For an expression like fmt.Println, only add "fmt" to the
+ // set of unresolved names, not "Println".)
+ if e, ok := n.(*ast.SelectorExpr); ok {
+ ast.Inspect(e.X, inspectFunc)
+ return false
+ }
+ if id, ok := n.(*ast.Ident); ok {
+ if id.Obj == nil {
+ unresolved[id.Name] = true
+ } else if topDecls[id.Obj] {
+ usesTopDecl = true
+ }
+ }
+ return true
+ }
+ ast.Inspect(body, inspectFunc)
+ if usesTopDecl {
+ // We don't support examples that are not self-contained (yet).
+ return nil
+ }
+
+ // Remove predeclared identifiers from unresolved list.
+ for n := range unresolved {
+ if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
+ delete(unresolved, n)
+ }
+ }
+
+ // Use unresolved identifiers to determine the imports used by this
+ // example. The heuristic assumes package names match base import
+ // paths for imports w/o renames (should be good enough most of the time).
+ namedImports := make(map[string]string) // [name]path
+ var blankImports []ast.Spec // _ imports
+ for _, s := range file.Imports {
+ p, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ continue
+ }
+ n := path.Base(p)
+ if s.Name != nil {
+ n = s.Name.Name
+ switch n {
+ case "_":
+ blankImports = append(blankImports, s)
+ continue
+ case ".":
+ // We can't resolve dot imports (yet).
+ return nil
+ }
+ }
+ if unresolved[n] {
+ namedImports[n] = p
+ delete(unresolved, n)
+ }
+ }
+
+ // If there are other unresolved identifiers, give up because this
+ // synthesized file is not going to build.
+ if len(unresolved) > 0 {
+ return nil
+ }
+
+ // Include documentation belonging to blank imports.
+ var comments []*ast.CommentGroup
+ for _, s := range blankImports {
+ if c := s.(*ast.ImportSpec).Doc; c != nil {
+ comments = append(comments, c)
+ }
+ }
+
+ // Include comments that are inside the function body.
+ for _, c := range file.Comments {
+ if body.Pos() <= c.Pos() && c.End() <= body.End() {
+ comments = append(comments, c)
+ }
+ }
+
+ // Strip "Output:" commment and adjust body end position.
+ body, comments = stripOutputComment(body, comments)
+
+ // Synthesize import declaration.
+ importDecl := &ast.GenDecl{
+ Tok: token.IMPORT,
+ Lparen: 1, // Need non-zero Lparen and Rparen so that printer
+ Rparen: 1, // treats this as a factored import.
+ }
+ for n, p := range namedImports {
+ s := &ast.ImportSpec{Path: &ast.BasicLit{Value: strconv.Quote(p)}}
+ if path.Base(p) != n {
+ s.Name = ast.NewIdent(n)
+ }
+ importDecl.Specs = append(importDecl.Specs, s)
+ }
+ importDecl.Specs = append(importDecl.Specs, blankImports...)
+
+ // Synthesize main function.
+ funcDecl := &ast.FuncDecl{
+ Name: ast.NewIdent("main"),
+ Type: &ast.FuncType{},
+ Body: body,
+ }
+
+ // Synthesize file.
+ return &ast.File{
+ Name: ast.NewIdent("main"),
+ Decls: []ast.Decl{importDecl, funcDecl},
+ Comments: comments,
+ }
+}
+
+// playExampleFile takes a whole file example and synthesizes a new *ast.File
+// such that the example is function main in package main.
+func playExampleFile(file *ast.File) *ast.File {
+ // Strip copyright comment if present.
+ comments := file.Comments
+ if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
+ comments = comments[1:]
+ }
+
+ // Copy declaration slice, rewriting the ExampleX function to main.
+ var decls []ast.Decl
+ for _, d := range file.Decls {
+ if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
+ // Copy the FuncDecl, as it may be used elsewhere.
+ newF := *f
+ newF.Name = ast.NewIdent("main")
+ newF.Body, comments = stripOutputComment(f.Body, comments)
+ d = &newF
+ }
+ decls = append(decls, d)
+ }
+
+ // Copy the File, as it may be used elsewhere.
+ f := *file
+ f.Name = ast.NewIdent("main")
+ f.Decls = decls
+ f.Comments = comments
+ return &f
+}
+
+// stripOutputComment finds and removes an "Output:" commment from body
+// and comments, and adjusts the body block's end position.
+func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
+ // Do nothing if no "Output:" comment found.
+ i, last := lastComment(body, comments)
+ if last == nil || !outputPrefix.MatchString(last.Text()) {
+ return body, comments
+ }
+
+ // Copy body and comments, as the originals may be used elsewhere.
+ newBody := &ast.BlockStmt{
+ Lbrace: body.Lbrace,
+ List: body.List,
+ Rbrace: last.Pos(),
+ }
+ newComments := make([]*ast.CommentGroup, len(comments)-1)
+ copy(newComments, comments[:i])
+ copy(newComments[i:], comments[i+1:])
+ return newBody, newComments
+}
+
+// lastComment returns the last comment inside the provided block.
+func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
+ pos, end := b.Pos(), b.End()
+ for j, cg := range c {
+ if cg.Pos() < pos {
+ continue
+ }
+ if cg.End() > end {
+ break
+ }
+ i, last = j, cg
+ }
+ return
+}
diff --git a/src/pkg/go/doc/example_test.go b/src/pkg/go/doc/example_test.go
new file mode 100644
index 000000000..b70efd93d
--- /dev/null
+++ b/src/pkg/go/doc/example_test.go
@@ -0,0 +1,111 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc_test
+
+import (
+ "bytes"
+ "go/doc"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+)
+
+const exampleTestFile = `
+package foo_test
+
+import (
+ "fmt"
+ "log"
+ "os/exec"
+)
+
+func ExampleHello() {
+ fmt.Println("Hello, world!")
+ // Output: Hello, world!
+}
+
+func ExampleImport() {
+ out, err := exec.Command("date").Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("The date is %s\n", out)
+}
+`
+
+var exampleTestCases = []struct {
+ Name, Play, Output string
+}{
+ {
+ Name: "Hello",
+ Play: exampleHelloPlay,
+ Output: "Hello, world!\n",
+ },
+ {
+ Name: "Import",
+ Play: exampleImportPlay,
+ },
+}
+
+const exampleHelloPlay = `package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ fmt.Println("Hello, world!")
+}
+`
+const exampleImportPlay = `package main
+
+import (
+ "fmt"
+ "log"
+ "os/exec"
+)
+
+func main() {
+ out, err := exec.Command("date").Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("The date is %s\n", out)
+}
+`
+
+func TestExamples(t *testing.T) {
+ fs := token.NewFileSet()
+ file, err := parser.ParseFile(fs, "test.go", strings.NewReader(exampleTestFile), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, e := range doc.Examples(file) {
+ c := exampleTestCases[i]
+ if e.Name != c.Name {
+ t.Errorf("got Name == %q, want %q", e.Name, c.Name)
+ }
+ if w := c.Play; w != "" {
+ var g string // hah
+ if e.Play == nil {
+ g = "<nil>"
+ } else {
+ b := new(bytes.Buffer)
+ if err := format.Node(b, fs, e.Play); err != nil {
+ t.Fatal(err)
+ }
+ g = b.String()
+ }
+ if g != w {
+ t.Errorf("%s: got Play == %q, want %q", c.Name, g, w)
+ }
+ }
+ if g, w := e.Output, c.Output; g != w {
+ t.Errorf("%s: got Output == %q, want %q", c.Name, g, w)
+ }
+ }
+}
diff --git a/src/pkg/go/doc/exports.go b/src/pkg/go/doc/exports.go
index 146be5d87..ff01285d4 100644
--- a/src/pkg/go/doc/exports.go
+++ b/src/pkg/go/doc/exports.go
@@ -107,7 +107,7 @@ func (r *reader) filterParamList(fields *ast.FieldList) {
// filterType strips any unexported struct fields or method types from typ
// in place. If fields (or methods) have been removed, the corresponding
-// struct or interface type has the Incomplete field set to true.
+// struct or interface type has the Incomplete field set to true.
//
func (r *reader) filterType(parent *namedType, typ ast.Expr) {
switch t := typ.(type) {
diff --git a/src/pkg/go/doc/reader.go b/src/pkg/go/doc/reader.go
index 5eaae37b7..dd6a57299 100644
--- a/src/pkg/go/doc/reader.go
+++ b/src/pkg/go/doc/reader.go
@@ -46,7 +46,7 @@ func (mset methodSet) set(f *ast.FuncDecl) {
// since it has documentation, assume f is simply another
// implementation and ignore it. This does not happen if the
// caller is using go/build.ScanDir to determine the list of
- // files implementing a package.
+ // files implementing a package.
return
}
// function doesn't exist or has no documentation; use f
@@ -149,6 +149,7 @@ type reader struct {
doc string // package documentation, if any
filenames []string
bugs []string
+ notes map[string][]string
// declarations
imports map[string]int
@@ -400,10 +401,23 @@ func (r *reader) readFunc(fun *ast.FuncDecl) {
}
var (
- bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid):
- bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
+ noteMarker = regexp.MustCompile(`^/[/*][ \t]*([A-Z][A-Z]+)\(.+\):[ \t]*(.*)`) // MARKER(uid)
+ noteContent = regexp.MustCompile(`[^ \n\r\t]+`) // at least one non-whitespace char
)
+func readNote(c *ast.CommentGroup) (marker, annotation string) {
+ text := c.List[0].Text
+ if m := noteMarker.FindStringSubmatch(text); m != nil {
+ if btxt := m[2]; noteContent.MatchString(btxt) {
+ // non-empty MARKER comment; collect comment without the MARKER prefix
+ list := append([]*ast.Comment(nil), c.List...) // make a copy
+ list[0].Text = m[2]
+ return m[1], (&ast.CommentGroup{List: list}).Text()
+ }
+ }
+ return "", ""
+}
+
// readFile adds the AST for a source file to the reader.
//
func (r *reader) readFile(src *ast.File) {
@@ -469,16 +483,12 @@ func (r *reader) readFile(src *ast.File) {
}
}
- // collect BUG(...) comments
+ // collect MARKER(...): annotations
for _, c := range src.Comments {
- text := c.List[0].Text
- if m := bug_markers.FindStringIndex(text); m != nil {
- // found a BUG comment; maybe empty
- if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
- // non-empty BUG comment; collect comment without BUG prefix
- list := append([]*ast.Comment(nil), c.List...) // make a copy
- list[0].Text = text[m[1]:]
- r.bugs = append(r.bugs, (&ast.CommentGroup{List: list}).Text())
+ if marker, text := readNote(c); marker != "" {
+ r.notes[marker] = append(r.notes[marker], text)
+ if marker == "BUG" {
+ r.bugs = append(r.bugs, text)
}
}
}
@@ -492,9 +502,10 @@ func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
r.mode = mode
r.types = make(map[string]*namedType)
r.funcs = make(methodSet)
+ r.notes = make(map[string][]string)
// sort package files before reading them so that the
- // result result does not depend on map iteration order
+ // result does not depend on map iteration order
i := 0
for filename := range pkg.Files {
r.filenames[i] = filename
@@ -515,29 +526,6 @@ func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
// ----------------------------------------------------------------------------
// Types
-var predeclaredTypes = map[string]bool{
- "bool": true,
- "byte": true,
- "complex64": true,
- "complex128": true,
- "error": true,
- "float32": true,
- "float64": true,
- "int": true,
- "int8": true,
- "int16": true,
- "int32": true,
- "int64": true,
- "rune": true,
- "string": true,
- "uint": true,
- "uint8": true,
- "uint16": true,
- "uint32": true,
- "uint64": true,
- "uintptr": true,
-}
-
func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
return f // shouldn't happen, but be safe
@@ -620,7 +608,7 @@ func (r *reader) computeMethodSets() {
// types that have no declaration. Instead, these functions and methods
// are shown at the package level. It also removes types with missing
// declarations or which are not visible.
-//
+//
func (r *reader) cleanupTypes() {
for _, t := range r.types {
visible := r.isVisible(t.name)
@@ -772,3 +760,54 @@ func sortedFuncs(m methodSet, allMethods bool) []*Func {
)
return list
}
+
+// ----------------------------------------------------------------------------
+// Predeclared identifiers
+
+var predeclaredTypes = map[string]bool{
+ "bool": true,
+ "byte": true,
+ "complex64": true,
+ "complex128": true,
+ "error": true,
+ "float32": true,
+ "float64": true,
+ "int": true,
+ "int8": true,
+ "int16": true,
+ "int32": true,
+ "int64": true,
+ "rune": true,
+ "string": true,
+ "uint": true,
+ "uint8": true,
+ "uint16": true,
+ "uint32": true,
+ "uint64": true,
+ "uintptr": true,
+}
+
+var predeclaredFuncs = map[string]bool{
+ "append": true,
+ "cap": true,
+ "close": true,
+ "complex": true,
+ "copy": true,
+ "delete": true,
+ "imag": true,
+ "len": true,
+ "make": true,
+ "new": true,
+ "panic": true,
+ "print": true,
+ "println": true,
+ "real": true,
+ "recover": true,
+}
+
+var predeclaredConstants = map[string]bool{
+ "false": true,
+ "iota": true,
+ "nil": true,
+ "true": true,
+}
diff --git a/src/pkg/go/doc/synopsis.go b/src/pkg/go/doc/synopsis.go
index 2192d78c0..2d1817439 100644
--- a/src/pkg/go/doc/synopsis.go
+++ b/src/pkg/go/doc/synopsis.go
@@ -4,7 +4,10 @@
package doc
-import "unicode"
+import (
+ "strings"
+ "unicode"
+)
// firstSentenceLen returns the length of the first sentence in s.
// The sentence ends after the first period followed by space and
@@ -24,17 +27,12 @@ func firstSentenceLen(s string) int {
return len(s)
}
-// Synopsis returns a cleaned version of the first sentence in s.
-// That sentence ends after the first period followed by space and
-// not preceded by exactly one uppercase letter. The result string
-// has no \n, \r, or \t characters and uses only single spaces between
-// words.
-//
-func Synopsis(s string) string {
- n := firstSentenceLen(s)
+// clean replaces each sequence of space, \n, \r, or \t characters
+// with a single space and removes any trailing and leading spaces.
+func clean(s string) string {
var b []byte
p := byte(' ')
- for i := 0; i < n; i++ {
+ for i := 0; i < len(s); i++ {
q := s[i]
if q == '\n' || q == '\r' || q == '\t' {
q = ' '
@@ -50,3 +48,26 @@ func Synopsis(s string) string {
}
return string(b)
}
+
+// Synopsis returns a cleaned version of the first sentence in s.
+// That sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter. The result string
+// has no \n, \r, or \t characters and uses only single spaces between
+// words. If s starts with any of the IllegalPrefixes, the result
+// is the empty string.
+//
+func Synopsis(s string) string {
+ s = clean(s[0:firstSentenceLen(s)])
+ for _, prefix := range IllegalPrefixes {
+ if strings.HasPrefix(strings.ToLower(s), prefix) {
+ return ""
+ }
+ }
+ return s
+}
+
+var IllegalPrefixes = []string{
+ "copyright",
+ "all rights",
+ "author",
+}
diff --git a/src/pkg/go/doc/synopsis_test.go b/src/pkg/go/doc/synopsis_test.go
index dfc6598af..fd7081a07 100644
--- a/src/pkg/go/doc/synopsis_test.go
+++ b/src/pkg/go/doc/synopsis_test.go
@@ -28,6 +28,11 @@ var tests = []struct {
{"P. Q. ", 8, "P. Q."},
{"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
{"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
+ {"Package foo does bar.", 21, "Package foo does bar."},
+ {"Copyright 2012 Google, Inc. Package foo does bar.", 27, ""},
+ {"All Rights reserved. Package foo does bar.", 20, ""},
+ {"All rights reserved. Package foo does bar.", 20, ""},
+ {"Authors: foo@bar.com. Package foo does bar.", 21, ""},
}
func TestSynopsis(t *testing.T) {
diff --git a/src/pkg/go/doc/testdata/a.0.golden b/src/pkg/go/doc/testdata/a.0.golden
index 24db02d34..ae3756c84 100644
--- a/src/pkg/go/doc/testdata/a.0.golden
+++ b/src/pkg/go/doc/testdata/a.0.golden
@@ -8,6 +8,17 @@ FILENAMES
testdata/a0.go
testdata/a1.go
+BUGS .Bugs is now deprecated, please use .Notes instead
+ // bug0
+ // bug1
+
BUGS
// bug0
// bug1
+
+SECBUGS
+ // sec hole 0 need to fix asap
+
+TODOS
+ // todo0
+ // todo1
diff --git a/src/pkg/go/doc/testdata/a.1.golden b/src/pkg/go/doc/testdata/a.1.golden
index 24db02d34..ae3756c84 100644
--- a/src/pkg/go/doc/testdata/a.1.golden
+++ b/src/pkg/go/doc/testdata/a.1.golden
@@ -8,6 +8,17 @@ FILENAMES
testdata/a0.go
testdata/a1.go
+BUGS .Bugs is now deprecated, please use .Notes instead
+ // bug0
+ // bug1
+
BUGS
// bug0
// bug1
+
+SECBUGS
+ // sec hole 0 need to fix asap
+
+TODOS
+ // todo0
+ // todo1
diff --git a/src/pkg/go/doc/testdata/a.2.golden b/src/pkg/go/doc/testdata/a.2.golden
index 24db02d34..ae3756c84 100644
--- a/src/pkg/go/doc/testdata/a.2.golden
+++ b/src/pkg/go/doc/testdata/a.2.golden
@@ -8,6 +8,17 @@ FILENAMES
testdata/a0.go
testdata/a1.go
+BUGS .Bugs is now deprecated, please use .Notes instead
+ // bug0
+ // bug1
+
BUGS
// bug0
// bug1
+
+SECBUGS
+ // sec hole 0 need to fix asap
+
+TODOS
+ // todo0
+ // todo1
diff --git a/src/pkg/go/doc/testdata/a0.go b/src/pkg/go/doc/testdata/a0.go
index dc552989e..71af470ee 100644
--- a/src/pkg/go/doc/testdata/a0.go
+++ b/src/pkg/go/doc/testdata/a0.go
@@ -6,3 +6,12 @@
package a
//BUG(uid): bug0
+
+//TODO(uid): todo0
+
+// A note with some spaces after it, should be ignored (watch out for
+// emacs modes that remove trailing whitespace).
+//NOTE(uid):
+
+// SECBUG(uid): sec hole 0
+// need to fix asap
diff --git a/src/pkg/go/doc/testdata/a1.go b/src/pkg/go/doc/testdata/a1.go
index 098776c1b..9fad1e09b 100644
--- a/src/pkg/go/doc/testdata/a1.go
+++ b/src/pkg/go/doc/testdata/a1.go
@@ -6,3 +6,7 @@
package a
//BUG(uid): bug1
+
+//TODO(uid): todo1
+
+//TODO(): ignored
diff --git a/src/pkg/go/doc/testdata/benchmark.go b/src/pkg/go/doc/testdata/benchmark.go
index 0aded5bb4..905e49644 100644
--- a/src/pkg/go/doc/testdata/benchmark.go
+++ b/src/pkg/go/doc/testdata/benchmark.go
@@ -13,7 +13,7 @@ import (
)
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
-var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
// An internal type but exported because it is cross-package; part of the implementation
// of go test.
@@ -151,7 +151,7 @@ func (b *B) launch() {
b.runN(n)
// Run the benchmark for at least the specified amount of time.
- d := time.Duration(*benchTime * float64(time.Second))
+ d := *benchTime
for !b.failed && b.duration < d && n < 1e9 {
last := n
// Predict iterations/sec.
diff --git a/src/pkg/go/doc/testdata/e.go b/src/pkg/go/doc/testdata/e.go
index 19dd138cf..ec432e3e5 100644
--- a/src/pkg/go/doc/testdata/e.go
+++ b/src/pkg/go/doc/testdata/e.go
@@ -106,7 +106,7 @@ type U4 struct {
*u5
}
-// U4.M should appear as method of U4.
+// U4.M should appear as method of U4.
func (*U4) M() {}
type u5 struct {
diff --git a/src/pkg/go/doc/testdata/error2.1.golden b/src/pkg/go/doc/testdata/error2.1.golden
index 776bd1b3e..dbcc1b03e 100644
--- a/src/pkg/go/doc/testdata/error2.1.golden
+++ b/src/pkg/go/doc/testdata/error2.1.golden
@@ -10,7 +10,7 @@ FILENAMES
TYPES
//
type I0 interface {
- // When embedded, the the locally declared error interface
+ // When embedded, the locally-declared error interface
// is only visible if all declarations are shown.
error
}
diff --git a/src/pkg/go/doc/testdata/error2.go b/src/pkg/go/doc/testdata/error2.go
index 6cc36feef..6ee96c245 100644
--- a/src/pkg/go/doc/testdata/error2.go
+++ b/src/pkg/go/doc/testdata/error2.go
@@ -5,7 +5,7 @@
package error2
type I0 interface {
- // When embedded, the the locally declared error interface
+ // When embedded, the locally-declared error interface
// is only visible if all declarations are shown.
error
}
diff --git a/src/pkg/go/doc/testdata/template.txt b/src/pkg/go/doc/testdata/template.txt
index 32e331cdd..d3882b6b9 100644
--- a/src/pkg/go/doc/testdata/template.txt
+++ b/src/pkg/go/doc/testdata/template.txt
@@ -60,6 +60,9 @@ TYPES
{{end}}{{end}}{{end}}{{/*
*/}}{{with .Bugs}}
-BUGS
+BUGS .Bugs is now deprecated, please use .Notes instead
{{range .}} {{synopsis .}}
-{{end}}{{end}} \ No newline at end of file
+{{end}}{{end}}{{with .Notes}}{{range $marker, $content := .}}
+{{$marker}}S
+{{range $content}} {{synopsis .}}
+{{end}}{{end}}{{end}} \ No newline at end of file
diff --git a/src/pkg/go/doc/testdata/testing.1.golden b/src/pkg/go/doc/testdata/testing.1.golden
index d26a4685c..ffdb5c3b5 100644
--- a/src/pkg/go/doc/testdata/testing.1.golden
+++ b/src/pkg/go/doc/testdata/testing.1.golden
@@ -45,7 +45,7 @@ VARIABLES
)
//
- var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds")
+ var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
//
var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
diff --git a/src/pkg/go/doc/testdata/testing.go b/src/pkg/go/doc/testdata/testing.go
index 71c1d1eaf..c2499ad77 100644
--- a/src/pkg/go/doc/testdata/testing.go
+++ b/src/pkg/go/doc/testdata/testing.go
@@ -197,7 +197,7 @@ func (c *common) Fatalf(format string, args ...interface{}) {
c.FailNow()
}
-// Parallel signals that this test is to be run in parallel with (and only with)
+// Parallel signals that this test is to be run in parallel with (and only with)
// other parallel tests in this CPU group.
func (t *T) Parallel() {
t.signal <- (*T)(nil) // Release main testing loop
@@ -215,7 +215,7 @@ func tRunner(t *T, test *InternalTest) {
t.start = time.Now()
// When this goroutine is done, either because test.F(t)
- // returned normally or because a test failure triggered
+ // returned normally or because a test failure triggered
// a call to runtime.Goexit, record the duration and send
// a signal saying that the test is done.
defer func() {
diff --git a/src/pkg/go/format/format.go b/src/pkg/go/format/format.go
new file mode 100644
index 000000000..65b0e4e4b
--- /dev/null
+++ b/src/pkg/go/format/format.go
@@ -0,0 +1,200 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package format implements standard formatting of Go source.
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "strings"
+)
+
+var config = printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 8}
+
+// Node formats node in canonical gofmt style and writes the result to dst.
+//
+// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl,
+// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec,
+// or ast.Stmt. Node does not modify node. Imports are not sorted for
+// nodes representing partial source files (i.e., if the node is not an
+// *ast.File or a *printer.CommentedNode not wrapping an *ast.File).
+//
+// The function may return early (before the entire result is written)
+// and return a formatting error, for instance due to an incorrect AST.
+//
+func Node(dst io.Writer, fset *token.FileSet, node interface{}) error {
+ // Determine if we have a complete source file (file != nil).
+ var file *ast.File
+ var cnode *printer.CommentedNode
+ switch n := node.(type) {
+ case *ast.File:
+ file = n
+ case *printer.CommentedNode:
+ if f, ok := n.Node.(*ast.File); ok {
+ file = f
+ cnode = n
+ }
+ }
+
+ // Sort imports if necessary.
+ if file != nil && hasUnsortedImports(file) {
+ // Make a copy of the AST because ast.SortImports is destructive.
+ // TODO(gri) Do this more efficiently.
+ var buf bytes.Buffer
+ err := config.Fprint(&buf, fset, file)
+ if err != nil {
+ return err
+ }
+ file, err = parser.ParseFile(fset, "", buf.Bytes(), parser.ParseComments)
+ if err != nil {
+ // We should never get here. If we do, provide good diagnostic.
+ return fmt.Errorf("format.Node internal error (%s)", err)
+ }
+ ast.SortImports(fset, file)
+
+ // Use new file with sorted imports.
+ node = file
+ if cnode != nil {
+ node = &printer.CommentedNode{Node: file, Comments: cnode.Comments}
+ }
+ }
+
+ return config.Fprint(dst, fset, node)
+}
+
+// Source formats src in canonical gofmt style and writes the result to dst
+// or returns an I/O or syntax error. src is expected to be a syntactically
+// correct Go source file, or a list of Go declarations or statements.
+//
+// If src is a partial source file, the leading and trailing space of src
+// is applied to the result (such that it has the same leading and trailing
+// space as src), and the formatted src is indented by the same amount as
+// the first line of src containing code. Imports are not sorted for partial
+// source files.
+//
+func Source(src []byte) ([]byte, error) {
+ fset := token.NewFileSet()
+ node, err := parse(fset, src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if file, ok := node.(*ast.File); ok {
+ // Complete source file.
+ ast.SortImports(fset, file)
+ err := config.Fprint(&buf, fset, file)
+ if err != nil {
+ return nil, err
+ }
+
+ } else {
+ // Partial source file.
+ // Determine and prepend leading space.
+ i, j := 0, 0
+ for j < len(src) && isSpace(src[j]) {
+ if src[j] == '\n' {
+ i = j + 1 // index of last line in leading space
+ }
+ j++
+ }
+ buf.Write(src[:i])
+
+ // Determine indentation of first code line.
+ // Spaces are ignored unless there are no tabs,
+ // in which case spaces count as one tab.
+ indent := 0
+ hasSpace := false
+ for _, b := range src[i:j] {
+ switch b {
+ case ' ':
+ hasSpace = true
+ case '\t':
+ indent++
+ }
+ }
+ if indent == 0 && hasSpace {
+ indent = 1
+ }
+
+ // Format the source.
+ cfg := config
+ cfg.Indent = indent
+ err := cfg.Fprint(&buf, fset, node)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine and append trailing space.
+ i = len(src)
+ for i > 0 && isSpace(src[i-1]) {
+ i--
+ }
+ buf.Write(src[i:])
+ }
+
+ return buf.Bytes(), nil
+}
+
+func hasUnsortedImports(file *ast.File) bool {
+ for _, d := range file.Decls {
+ d, ok := d.(*ast.GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ return false
+ }
+ if d.Lparen.IsValid() {
+ // For now assume all grouped imports are unsorted.
+ // TODO(gri) Should check if they are sorted already.
+ return true
+ }
+ // Ungrouped imports are sorted by default.
+ }
+ return false
+}
+
+func isSpace(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
+
+func parse(fset *token.FileSet, src []byte) (interface{}, error) {
+ // Try as a complete source file.
+ file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err == nil {
+ return file, nil
+ }
+ // If the source is missing a package clause, try as a source fragment; otherwise fail.
+ if !strings.Contains(err.Error(), "expected 'package'") {
+ return nil, err
+ }
+
+ // Try as a declaration list by prepending a package clause in front of src.
+ // Use ';' not '\n' to keep line numbers intact.
+ psrc := append([]byte("package p;"), src...)
+ file, err = parser.ParseFile(fset, "", psrc, parser.ParseComments)
+ if err == nil {
+ return file.Decls, nil
+ }
+ // If the source is missing a declaration, try as a statement list; otherwise fail.
+ if !strings.Contains(err.Error(), "expected declaration") {
+ return nil, err
+ }
+
+ // Try as statement list by wrapping a function around src.
+ fsrc := append(append([]byte("package p; func _() {"), src...), '}')
+ file, err = parser.ParseFile(fset, "", fsrc, parser.ParseComments)
+ if err == nil {
+ return file.Decls[0].(*ast.FuncDecl).Body.List, nil
+ }
+
+ // Failed, and out of options.
+ return nil, err
+}
diff --git a/src/pkg/go/format/format_test.go b/src/pkg/go/format/format_test.go
new file mode 100644
index 000000000..7d7940bb5
--- /dev/null
+++ b/src/pkg/go/format/format_test.go
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format
+
+import (
+ "bytes"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+const testfile = "format_test.go"
+
+func diff(t *testing.T, dst, src []byte) {
+ line := 1
+ offs := 0 // line offset
+ for i := 0; i < len(dst) && i < len(src); i++ {
+ d := dst[i]
+ s := src[i]
+ if d != s {
+ t.Errorf("dst:%d: %s\n", line, dst[offs:i+1])
+ t.Errorf("src:%d: %s\n", line, src[offs:i+1])
+ return
+ }
+ if s == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+ if len(dst) != len(src) {
+ t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src)
+ }
+}
+
+func TestNode(t *testing.T) {
+ src, err := ioutil.ReadFile(testfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, testfile, src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+
+ if err = Node(&buf, fset, file); err != nil {
+ t.Fatal("Node failed:", err)
+ }
+
+ diff(t, buf.Bytes(), src)
+}
+
+func TestSource(t *testing.T) {
+ src, err := ioutil.ReadFile(testfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := Source(src)
+ if err != nil {
+ t.Fatal("Source failed:", err)
+ }
+
+ diff(t, res, src)
+}
+
+// Test cases that are expected to fail are marked by the prefix "ERROR".
+var tests = []string{
+ // declaration lists
+ `import "go/format"`,
+ "var x int",
+ "var x int\n\ntype T struct{}",
+
+ // statement lists
+ "x := 0",
+ "f(a, b, c)\nvar x int = f(1, 2, 3)",
+
+ // indentation, leading and trailing space
+ "\tx := 0\n\tgo f()",
+ "\tx := 0\n\tgo f()\n\n\n",
+ "\n\t\t\n\n\tx := 0\n\tgo f()\n\n\n",
+ "\n\t\t\n\n\t\t\tx := 0\n\t\t\tgo f()\n\n\n",
+ "\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n", // no indentation inside raw strings
+
+ // erroneous programs
+ "ERRORvar x",
+ "ERROR1 + 2 +",
+ "ERRORx := 0",
+}
+
+func String(s string) (string, error) {
+ res, err := Source([]byte(s))
+ if err != nil {
+ return "", err
+ }
+ return string(res), nil
+}
+
+func TestPartial(t *testing.T) {
+ for _, src := range tests {
+ if strings.HasPrefix(src, "ERROR") {
+ // test expected to fail
+ src = src[5:] // remove ERROR prefix
+ res, err := String(src)
+ if err == nil && res == src {
+ t.Errorf("formatting succeeded but was expected to fail:\n%q", src)
+ }
+ } else {
+ // test expected to succeed
+ res, err := String(src)
+ if err != nil {
+ t.Errorf("formatting failed (%s):\n%q", err, src)
+ } else if res != src {
+ t.Errorf("formatting incorrect:\nsource: %q\nresult: %q", src, res)
+ }
+ }
+ }
+}
diff --git a/src/pkg/go/parser/error_test.go b/src/pkg/go/parser/error_test.go
index 377c8b80c..b59fda11a 100644
--- a/src/pkg/go/parser/error_test.go
+++ b/src/pkg/go/parser/error_test.go
@@ -34,9 +34,11 @@ import (
const testdata = "testdata"
+var fsetErrs *token.FileSet
+
// getFile assumes that each filename occurs at most once
func getFile(filename string) (file *token.File) {
- fset.Iterate(func(f *token.File) bool {
+ fsetErrs.Iterate(func(f *token.File) bool {
if f.Name() == filename {
if file != nil {
panic(filename + " used multiple times")
@@ -125,7 +127,7 @@ func compareErrors(t *testing.T, expected map[token.Pos]string, found scanner.Er
if len(expected) > 0 {
t.Errorf("%d errors not reported:", len(expected))
for pos, msg := range expected {
- t.Errorf("%s: %s\n", fset.Position(pos), msg)
+ t.Errorf("%s: %s\n", fsetErrs.Position(pos), msg)
}
}
}
@@ -137,12 +139,13 @@ func checkErrors(t *testing.T, filename string, input interface{}) {
return
}
- _, err = ParseFile(fset, filename, src, DeclarationErrors)
+ _, err = ParseFile(fsetErrs, filename, src, DeclarationErrors|AllErrors)
found, ok := err.(scanner.ErrorList)
if err != nil && !ok {
t.Error(err)
return
}
+ found.RemoveMultiples()
// we are expecting the following errors
// (collect these after parsing a file so that it is found in the file set)
@@ -153,6 +156,7 @@ func checkErrors(t *testing.T, filename string, input interface{}) {
}
func TestErrors(t *testing.T) {
+ fsetErrs = token.NewFileSet()
list, err := ioutil.ReadDir(testdata)
if err != nil {
t.Fatal(err)
diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go
index 5c203a784..39affdd6b 100644
--- a/src/pkg/go/parser/interface.go
+++ b/src/pkg/go/parser/interface.go
@@ -52,12 +52,13 @@ func readSource(filename string, src interface{}) ([]byte, error) {
type Mode uint
const (
- PackageClauseOnly Mode = 1 << iota // parsing stops after package clause
- ImportsOnly // parsing stops after import declarations
- ParseComments // parse comments and add them to AST
- Trace // print a trace of parsed productions
- DeclarationErrors // report declaration errors
- SpuriousErrors // report all (not just the first) errors per line
+ PackageClauseOnly Mode = 1 << iota // stop parsing after package clause
+ ImportsOnly // stop parsing after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+ SpuriousErrors // same as AllErrors, for backward-compatibility
+ AllErrors = SpuriousErrors // report all errors (not just the first 10 on different lines)
)
// ParseFile parses the source code of a single Go source file and returns
@@ -79,26 +80,39 @@ const (
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
-func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (*ast.File, error) {
+func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (f *ast.File, err error) {
// get source
text, err := readSource(filename, src)
if err != nil {
return nil, err
}
- // parse source
var p parser
- p.init(fset, filename, text, mode)
- f := p.parseFile()
+ defer func() {
+ if e := recover(); e != nil {
+ _ = e.(bailout) // re-panics if it's not a bailout
+ }
+
+ // set result values
+ if f == nil {
+ // source is not a valid Go source file - satisfy
+ // ParseFile API and return a valid (but) empty
+ // *ast.File
+ f = &ast.File{
+ Name: new(ast.Ident),
+ Scope: ast.NewScope(nil),
+ }
+ }
- // sort errors
- if p.mode&SpuriousErrors == 0 {
- p.errors.RemoveMultiples()
- } else {
p.errors.Sort()
- }
+ err = p.errors.Err()
+ }()
+
+ // parse source
+ p.init(fset, filename, text, mode)
+ f = p.parseFile()
- return f, p.errors.Err()
+ return
}
// ParseDir calls ParseFile for the files in the directory specified by path and
@@ -149,7 +163,7 @@ func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, m
// ParseExpr is a convenience function for obtaining the AST of an expression x.
// The position information recorded in the AST is undefined.
-//
+//
func ParseExpr(x string) (ast.Expr, error) {
// parse x within the context of a complete package for correct scopes;
// use //line directive for correct positions in error messages and put
diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go
index 20e505d97..a021a5abe 100644
--- a/src/pkg/go/parser/parser.go
+++ b/src/pkg/go/parser/parser.go
@@ -28,7 +28,7 @@ type parser struct {
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
- indent uint // indentation used for tracing output
+ indent int // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
@@ -56,7 +56,7 @@ type parser struct {
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
- // Label scope
+ // Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
@@ -75,14 +75,6 @@ func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mod
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
-
- // set up the pkgScope here (as opposed to in parseFile) because
- // there are other parser entry points (ParseExpr, etc.)
- p.openScope()
- p.pkgScope = p.topScope
-
- // for the same reason, set up a label scope
- p.openLabelScope()
}
// ----------------------------------------------------------------------------
@@ -157,7 +149,7 @@ func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
}
}
} else {
- p.errorExpected(x.Pos(), "identifier")
+ p.errorExpected(x.Pos(), "identifier on left side of :=")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
@@ -170,7 +162,12 @@ func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// internal consistency.
var unresolved = new(ast.Object)
-func (p *parser) resolve(x ast.Expr) {
+// If x is an identifier, tryResolve attempts to resolve x by looking up
+// the object it denotes. If no object is found and collectUnresolved is
+// set, x is marked as unresolved and collected in the list of unresolved
+// identifiers.
+//
+func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
@@ -191,23 +188,30 @@ func (p *parser) resolve(x ast.Expr) {
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
- ident.Obj = unresolved
- p.unresolved = append(p.unresolved, ident)
+ if collectUnresolved {
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
+ }
+}
+
+func (p *parser) resolve(x ast.Expr) {
+ p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
- const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
- ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
- const n = uint(len(dots))
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
- for ; i > n; i -= n {
+ for i > n {
fmt.Print(dots)
+ i -= n
}
+ // i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
@@ -218,7 +222,7 @@ func trace(p *parser, msg string) *parser {
return p
}
-// Usage pattern: defer un(trace(p, "..."));
+// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
@@ -304,14 +308,14 @@ func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
- line := p.file.Line(p.pos) // current line
+ prev := p.pos
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
- if p.file.Line(p.pos) == line {
+ if p.file.Line(p.pos) == p.file.Line(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
@@ -336,8 +340,26 @@ func (p *parser) next() {
}
}
+// A bailout panic is raised to indicate early termination.
+type bailout struct{}
+
func (p *parser) error(pos token.Pos, msg string) {
- p.errors.Add(p.file.Position(pos), msg)
+ epos := p.file.Position(pos)
+
+ // If AllErrors is not set, discard errors reported on the same line
+ // as the last recorded error and stop parsing if there are more than
+ // 10 errors.
+ if p.mode&AllErrors == 0 {
+ n := len(p.errors)
+ if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
+ return // discard - likely a spurious error
+ }
+ if n > 10 {
+ panic(bailout{})
+ }
+ }
+
+ p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
@@ -585,14 +607,15 @@ func (p *parser) parseTypeName() ast.Expr {
return ident
}
-func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
+func (p *parser) parseArrayType() ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
var len ast.Expr
- if ellipsisOk && p.tok == token.ELLIPSIS {
+ // always permit ellipsis for more fault-tolerant parsing
+ if p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
@@ -704,7 +727,7 @@ func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
- typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
+ typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
if typ != nil {
p.resolve(typ)
} else {
@@ -713,7 +736,7 @@ func (p *parser) tryVarType(isParam bool) ast.Expr {
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
- return p.tryIdentOrType(false)
+ return p.tryIdentOrType()
}
// If the result is an identifier, it is not resolved.
@@ -931,29 +954,31 @@ func (p *parser) parseChanType() *ast.ChanType {
pos := p.pos
dir := ast.SEND | ast.RECV
+ var arrow token.Pos
if p.tok == token.CHAN {
p.next()
if p.tok == token.ARROW {
+ arrow = p.pos
p.next()
dir = ast.SEND
}
} else {
- p.expect(token.ARROW)
+ arrow = p.expect(token.ARROW)
p.expect(token.CHAN)
dir = ast.RECV
}
value := p.parseType()
- return &ast.ChanType{Begin: pos, Dir: dir, Value: value}
+ return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
}
// If the result is an identifier, it is not resolved.
-func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
+func (p *parser) tryIdentOrType() ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
- return p.parseArrayType(ellipsisOk)
+ return p.parseArrayType()
case token.STRUCT:
return p.parseStructType()
case token.MUL:
@@ -980,7 +1005,7 @@ func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
}
func (p *parser) tryType() ast.Expr {
- typ := p.tryIdentOrType(false)
+ typ := p.tryIdentOrType()
if typ != nil {
p.resolve(typ)
}
@@ -1088,7 +1113,7 @@ func (p *parser) parseOperand(lhs bool) ast.Expr {
return p.parseFuncTypeOrLit()
}
- if typ := p.tryIdentOrType(true); typ != nil {
+ if typ := p.tryIdentOrType(); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
@@ -1193,14 +1218,35 @@ func (p *parser) parseElement(keyOk bool) ast.Expr {
return p.parseLiteralValue(nil)
}
- x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key
+ // Because the parser doesn't know the composite literal type, it cannot
+ // know if a key that's an identifier is a struct field name or a name
+ // denoting a value. The former is not resolved by the parser or the
+ // resolver.
+ //
+ // Instead, _try_ to resolve such a key if possible. If it resolves,
+ // it a) has correctly resolved, or b) incorrectly resolved because
+ // the key is a struct field with a name matching another identifier.
+ // In the former case we are done, and in the latter case we don't
+ // care because the type checker will do a separate field lookup.
+ //
+ // If the key does not resolve, it a) must be defined at the top
+ // level in another file of the same package, the universe scope, or be
+ // undeclared; or b) it is a struct field. In the former case, the type
+ // checker can do a top-level lookup, and in the latter case it will do
+ // a separate field lookup.
+ x := p.checkExpr(p.parseExpr(keyOk))
if keyOk {
if p.tok == token.COLON {
colon := p.pos
p.next()
+ // Try to resolve the key but don't collect it
+ // as unresolved identifier if it fails so that
+ // we don't get (possibly false) errors about
+ // undeclared names.
+ p.tryResolve(x, false)
return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
}
- p.resolve(x) // not a map key
+ p.resolve(x) // not a key
}
return x
@@ -1404,16 +1450,49 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
case token.ARROW:
// channel type or receive expression
- pos := p.pos
+ arrow := p.pos
p.next()
- if p.tok == token.CHAN {
- p.next()
- value := p.parseType()
- return &ast.ChanType{Begin: pos, Dir: ast.RECV, Value: value}
- }
+
+ // If the next token is token.CHAN we still don't know if it
+ // is a channel type or a receive operation - we only know
+ // once we have found the end of the unary expression. There
+ // are two cases:
+ //
+ // <- type => (<-type) must be channel type
+ // <- expr => <-(expr) is a receive from an expression
+ //
+ // In the first case, the arrow must be re-associated with
+ // the channel type parsed already:
+ //
+ // <- (chan type) => (<-chan type)
+ // <- (chan<- type) => (<-chan (<-type))
x := p.parseUnaryExpr(false)
- return &ast.UnaryExpr{OpPos: pos, Op: token.ARROW, X: p.checkExpr(x)}
+
+ // determine which case we have
+ if typ, ok := x.(*ast.ChanType); ok {
+ // (<-type)
+
+ // re-associate position info and <-
+ dir := ast.SEND
+ for ok && dir == ast.SEND {
+ if typ.Dir == ast.RECV {
+ // error: (<-type) is (<-(<-chan T))
+ p.errorExpected(typ.Arrow, "'chan'")
+ }
+ arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
+ dir, typ.Dir = typ.Dir, ast.RECV
+ typ, ok = typ.Value.(*ast.ChanType)
+ }
+ if dir == ast.SEND {
+ p.errorExpected(arrow, "channel type")
+ }
+
+ return x
+ }
+
+ // <-(expr)
+ return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
@@ -1774,7 +1853,7 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
//
// switch t := 0; t := x.(T) { ... }
//
- // (this code is not valid Go because the first t will
+ // (this code is not valid Go because the first t
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
@@ -2012,7 +2091,7 @@ func (p *parser) parseStmt() (s ast.Stmt) {
// ----------------------------------------------------------------------------
// Declarations
-type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
@@ -2025,7 +2104,7 @@ func isValidImport(lit string) bool {
return s != ""
}
-func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
@@ -2063,15 +2142,15 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
return spec
}
-func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
+func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
if p.trace {
- defer un(trace(p, "ConstSpec"))
+ defer un(trace(p, keyword.String()+"Spec"))
}
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
- if typ != nil || p.tok == token.ASSIGN || iota == 0 {
+ if p.tok == token.ASSIGN || keyword == token.CONST && (typ != nil || iota == 0) || keyword == token.VAR && typ == nil {
p.expect(token.ASSIGN)
values = p.parseRhsList()
}
@@ -2088,12 +2167,16 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
Values: values,
Comment: p.lineComment,
}
- p.declare(spec, iota, p.topScope, ast.Con, idents...)
+ kind := ast.Con
+ if keyword == token.VAR {
+ kind = ast.Var
+ }
+ p.declare(spec, iota, p.topScope, kind, idents...)
return spec
}
-func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
@@ -2114,36 +2197,6 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
return spec
}
-func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
- if p.trace {
- defer un(trace(p, "VarSpec"))
- }
-
- idents := p.parseIdentList()
- typ := p.tryType()
- var values []ast.Expr
- if typ == nil || p.tok == token.ASSIGN {
- p.expect(token.ASSIGN)
- values = p.parseRhsList()
- }
- p.expectSemi() // call before accessing p.linecomment
-
- // Go spec: The scope of a constant or variable identifier declared inside
- // a function begins at the end of the ConstSpec or VarSpec and ends at
- // the end of the innermost containing block.
- // (Global identifiers are resolved in a separate phase after parsing.)
- spec := &ast.ValueSpec{
- Doc: doc,
- Names: idents,
- Type: typ,
- Values: values,
- Comment: p.lineComment,
- }
- p.declare(spec, nil, p.topScope, ast.Var, idents...)
-
- return spec
-}
-
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
@@ -2157,12 +2210,12 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
- list = append(list, f(p, p.leadComment, iota))
+ list = append(list, f(p, p.leadComment, keyword, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
- list = append(list, f(p, nil, 0))
+ list = append(list, f(p, nil, keyword, 0))
}
return &ast.GenDecl{
@@ -2262,14 +2315,11 @@ func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
var f parseSpecFunction
switch p.tok {
- case token.CONST:
- f = parseConstSpec
+ case token.CONST, token.VAR:
+ f = (*parser).parseValueSpec
case token.TYPE:
- f = parseTypeSpec
-
- case token.VAR:
- f = parseVarSpec
+ f = (*parser).parseTypeSpec
case token.FUNC:
return p.parseFuncDecl()
@@ -2292,6 +2342,12 @@ func (p *parser) parseFile() *ast.File {
defer un(trace(p, "File"))
}
+ // Don't bother parsing the rest if we had errors scanning the first token.
+ // Likely not a Go source file at all.
+ if p.errors.Len() != 0 {
+ return nil
+ }
+
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
@@ -2303,15 +2359,19 @@ func (p *parser) parseFile() *ast.File {
}
p.expectSemi()
- var decls []ast.Decl
-
- // Don't bother parsing the rest if we had errors already.
+ // Don't bother parsing the rest if we had errors parsing the package clause.
// Likely not a Go source file at all.
+ if p.errors.Len() != 0 {
+ return nil
+ }
- if p.errors.Len() == 0 && p.mode&PackageClauseOnly == 0 {
+ p.openScope()
+ p.pkgScope = p.topScope
+ var decls []ast.Decl
+ if p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
- decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
+ decls = append(decls, p.parseGenDecl(token.IMPORT, (*parser).parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
@@ -2321,8 +2381,9 @@ func (p *parser) parseFile() *ast.File {
}
}
}
-
- assert(p.topScope == p.pkgScope, "imbalanced scopes")
+ p.closeScope()
+ assert(p.topScope == nil, "unbalanced scopes")
+ assert(p.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go
index 1b7a41b1b..1960377b0 100644
--- a/src/pkg/go/parser/parser_test.go
+++ b/src/pkg/go/parser/parser_test.go
@@ -135,6 +135,53 @@ func TestVarScope(t *testing.T) {
}
}
+func TestObjects(t *testing.T) {
+ const src = `
+package p
+import fmt "fmt"
+const pi = 3.14
+type T struct{}
+var x int
+func f() { L: }
+`
+
+ f, err := ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ objects := map[string]ast.ObjKind{
+ "p": ast.Bad, // not in a scope
+ "fmt": ast.Bad, // not resolved yet
+ "pi": ast.Con,
+ "T": ast.Typ,
+ "x": ast.Var,
+ "int": ast.Bad, // not resolved yet
+ "f": ast.Fun,
+ "L": ast.Lbl,
+ }
+
+ ast.Inspect(f, func(n ast.Node) bool {
+ if ident, ok := n.(*ast.Ident); ok {
+ obj := ident.Obj
+ if obj == nil {
+ if objects[ident.Name] != ast.Bad {
+ t.Errorf("no object for %s", ident.Name)
+ }
+ return true
+ }
+ if obj.Name != ident.Name {
+ t.Errorf("names don't match: obj.Name = %s, ident.Name = %s", obj.Name, ident.Name)
+ }
+ kind := objects[ident.Name]
+ if obj.Kind != kind {
+ t.Errorf("%s: obj.Kind = %s; want %s", ident.Name, obj.Kind, kind)
+ }
+ }
+ return true
+ })
+}
+
func TestUnresolved(t *testing.T) {
f, err := ParseFile(fset, "", `
package p
diff --git a/src/pkg/go/parser/performance_test.go b/src/pkg/go/parser/performance_test.go
new file mode 100644
index 000000000..f2732c0e2
--- /dev/null
+++ b/src/pkg/go/parser/performance_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+ "go/token"
+ "io/ioutil"
+ "testing"
+)
+
+var src = readFile("parser.go")
+
+func readFile(filename string) []byte {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+func BenchmarkParse(b *testing.B) {
+ b.SetBytes(int64(len(src)))
+ for i := 0; i < b.N; i++ {
+ if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments); err != nil {
+ b.Fatalf("benchmark failed due to parse error: %s", err)
+ }
+ }
+}
diff --git a/src/pkg/go/parser/short_test.go b/src/pkg/go/parser/short_test.go
index 238492bf3..c62f7e050 100644
--- a/src/pkg/go/parser/short_test.go
+++ b/src/pkg/go/parser/short_test.go
@@ -13,8 +13,10 @@ var valids = []string{
`package p;`,
`package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
`package p; func f() { if f(T{}) {} };`,
- `package p; func f() { _ = (<-chan int)(x) };`,
- `package p; func f() { _ = (<-chan <-chan int)(x) };`,
+ `package p; func f() { _ = <-chan int(nil) };`,
+ `package p; func f() { _ = (<-chan int)(nil) };`,
+ `package p; func f() { _ = (<-chan <-chan int)(nil) };`,
+ `package p; func f() { _ = <-chan <-chan <-chan <-chan <-int(nil) };`,
`package p; func f(func() func() func());`,
`package p; func f(...T);`,
`package p; func f(float, ...int);`,
@@ -64,8 +66,11 @@ var invalids = []string{
`package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
`package p; var a = ( /* ERROR "expected expression" */ []int);`,
`package p; var a = a[[ /* ERROR "expected expression" */ ]int:[]int];`,
- `package p; var a = <- /* ERROR "expected expression" */ chan int;`,
- `package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
+ `package p; var a = <- /* ERROR "expected expression" */ chan int;`,
+ `package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
+ `package p; func f() { _ = (<-<- /* ERROR "expected 'chan'" */ chan int)(nil) };`,
+ `package p; func f() { _ = (<-chan<-chan<-chan<-chan<-chan<- /* ERROR "expected channel type" */ int)(nil) };`,
+ `package p; func f() { var t []int; t /* ERROR "expected identifier on left side of :=" */ [0] := 0 };`,
}
func TestInvalid(t *testing.T) {
diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go
index f13f9a5a8..ee0bbf1ed 100644
--- a/src/pkg/go/printer/nodes.go
+++ b/src/pkg/go/printer/nodes.go
@@ -83,7 +83,7 @@ func (p *printer) setComment(g *ast.CommentGroup) {
// don't overwrite any pending comment in the p.comment cache
// (there may be a pending comment when a line comment is
// immediately followed by a lead comment with no other
- // tokens inbetween)
+ // tokens between)
if p.commentOffset == infinity {
p.nextComment() // get comment ready for use
}
@@ -203,7 +203,7 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
} else {
const r = 4 // threshold
ratio := float64(size) / float64(prevSize)
- useFF = ratio <= 1/r || r <= ratio
+ useFF = ratio <= 1.0/r || r <= ratio
}
}
@@ -307,7 +307,7 @@ func (p *printer) parameters(fields *ast.FieldList) {
p.print(blank)
}
// parameter type
- p.expr(par.Type)
+ p.expr(stripParensAlways(par.Type))
prevLine = parLineEnd
}
// if the closing ")" is on a separate line from the last parameter,
@@ -325,13 +325,18 @@ func (p *printer) parameters(fields *ast.FieldList) {
}
func (p *printer) signature(params, result *ast.FieldList) {
- p.parameters(params)
+ if params != nil {
+ p.parameters(params)
+ } else {
+ p.print(token.LPAREN, token.RPAREN)
+ }
n := result.NumFields()
if n > 0 {
+ // result != nil
p.print(blank)
if n == 1 && result.List[0].Names == nil {
// single anonymous result; no ()'s
- p.expr(result.List[0].Type)
+ p.expr(stripParensAlways(result.List[0].Type))
return
}
p.parameters(result)
@@ -725,7 +730,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
case *ast.FuncLit:
p.expr(x.Type)
- p.funcBody(x.Body, p.distance(x.Type.Pos(), p.pos), true)
+ p.adjBlock(p.distanceFrom(x.Type.Pos()), blank, x.Body)
case *ast.ParenExpr:
if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
@@ -786,7 +791,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
if len(x.Args) > 1 {
depth++
}
- p.expr1(x.Fun, token.HighestPrec, depth)
+ if _, ok := x.Fun.(*ast.FuncType); ok {
+ // conversions to literal function types require parentheses around the type
+ p.print(token.LPAREN)
+ p.expr1(x.Fun, token.HighestPrec, depth)
+ p.print(token.RPAREN)
+ } else {
+ p.expr1(x.Fun, token.HighestPrec, depth)
+ }
p.print(x.Lparen, token.LPAREN)
if x.Ellipsis.IsValid() {
p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis)
@@ -848,9 +860,9 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
case ast.SEND | ast.RECV:
p.print(token.CHAN)
case ast.RECV:
- p.print(token.ARROW, token.CHAN)
+ p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same
case ast.SEND:
- p.print(token.CHAN, token.ARROW)
+ p.print(token.CHAN, x.Arrow, token.ARROW)
}
p.print(blank)
p.expr(x.Value)
@@ -877,30 +889,38 @@ func (p *printer) expr(x ast.Expr) {
// Print the statement list indented, but without a newline after the last statement.
// Extra line breaks between statements in the source are respected but at most one
// empty line is printed between statements.
-func (p *printer) stmtList(list []ast.Stmt, _indent int, nextIsRBrace bool) {
- // TODO(gri): fix _indent code
- if _indent > 0 {
+func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) {
+ if nindent > 0 {
p.print(indent)
}
multiLine := false
- for i, s := range list {
- // _indent == 0 only for lists of switch/select case clauses;
- // in those cases each clause is a new section
- p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || _indent == 0 || multiLine)
- p.stmt(s, nextIsRBrace && i == len(list)-1)
- multiLine = p.isMultiLine(s)
- }
- if _indent > 0 {
+ i := 0
+ for _, s := range list {
+ // ignore empty statements (was issue 3466)
+ if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty {
+ // _indent == 0 only for lists of switch/select case clauses;
+ // in those cases each clause is a new section
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || multiLine)
+ }
+ p.stmt(s, nextIsRBrace && i == len(list)-1)
+ multiLine = p.isMultiLine(s)
+ i++
+ }
+ }
+ if nindent > 0 {
p.print(unindent)
}
}
// block prints an *ast.BlockStmt; it always spans at least two lines.
-func (p *printer) block(s *ast.BlockStmt, indent int) {
- p.print(s.Pos(), token.LBRACE)
- p.stmtList(s.List, indent, true)
- p.linebreak(p.lineFor(s.Rbrace), 1, ignore, true)
- p.print(s.Rbrace, token.RBRACE)
+func (p *printer) block(b *ast.BlockStmt, nindent int) {
+ p.print(b.Lbrace, token.LBRACE)
+ p.stmtList(b.List, nindent, true)
+ p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true)
+ p.print(b.Rbrace, token.RBRACE)
}
func isTypeName(x ast.Expr) bool {
@@ -939,6 +959,13 @@ func stripParens(x ast.Expr) ast.Expr {
return x
}
+func stripParensAlways(x ast.Expr) ast.Expr {
+ if x, ok := x.(*ast.ParenExpr); ok {
+ return stripParensAlways(x.X)
+ }
+ return x
+}
+
func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
p.print(blank)
needsBlank := false
@@ -1405,19 +1432,19 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
return
}
-func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
+// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's.
+func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int {
pos1 := b.Pos()
pos2 := b.Rbrace
if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
// opening and closing brace are on different lines - don't make it a one-liner
- return false
+ return maxSize + 1
}
if len(b.List) > 5 || p.commentBefore(p.posFor(pos2)) {
// too many statements or there is a comment inside - don't make it a one-liner
- return false
+ return maxSize + 1
}
// otherwise, estimate body size
- const maxSize = 100
bodySize := 0
for i, s := range b.List {
if i > 0 {
@@ -1425,19 +1452,23 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
}
bodySize += p.nodeSize(s, maxSize)
}
- return headerSize+bodySize <= maxSize
+ return bodySize
}
-func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool) {
+// adjBlock prints an "adjacent" block (e.g., a for-loop or function body) following
+// a header (e.g., a for-loop control clause or function signature) of given headerSize.
+// If the header's and block's size are "small enough" and the block is "simple enough",
+// the block is printed on the current line, without line breaks, spaced from the header
+// by sep. Otherwise the block's opening "{" is printed on the current line, followed by
+// lines for the block's statements and its closing "}".
+//
+func (p *printer) adjBlock(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
if b == nil {
return
}
- if p.isOneLineFunc(b, headerSize) {
- sep := vtab
- if isLit {
- sep = blank
- }
+ const maxSize = 100
+ if headerSize+p.bodySize(b, maxSize) <= maxSize {
p.print(sep, b.Lbrace, token.LBRACE)
if len(b.List) > 0 {
p.print(blank)
@@ -1453,17 +1484,20 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool) {
return
}
- p.print(blank)
+ if sep != ignore {
+ p.print(blank) // always use blank
+ }
p.block(b, 1)
}
-// distance returns the column difference between from and to if both
-// are on the same line; if they are on different lines (or unknown)
-// the result is infinity.
-func (p *printer) distance(from0 token.Pos, to token.Position) int {
- from := p.posFor(from0)
- if from.IsValid() && to.IsValid() && from.Line == to.Line {
- return to.Column - from.Column
+// distanceFrom returns the column difference between from and p.pos (the current
+// estimated position) if both are on the same line; if they are on different lines
+// (or unknown) the result is infinity.
+func (p *printer) distanceFrom(from token.Pos) int {
+ if from.IsValid() && p.pos.IsValid() {
+ if f := p.posFor(from); f.Line == p.pos.Line {
+ return p.pos.Column - f.Column
+ }
}
return infinity
}
@@ -1477,7 +1511,7 @@ func (p *printer) funcDecl(d *ast.FuncDecl) {
}
p.expr(d.Name)
p.signature(d.Type.Params, d.Type.Results)
- p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false)
+ p.adjBlock(p.distanceFrom(d.Pos()), vtab, d.Body)
}
func (p *printer) decl(decl ast.Decl) {
@@ -1507,31 +1541,35 @@ func declToken(decl ast.Decl) (tok token.Token) {
return
}
-func (p *printer) file(src *ast.File) {
- p.setComment(src.Doc)
- p.print(src.Pos(), token.PACKAGE, blank)
- p.expr(src.Name)
-
- if len(src.Decls) > 0 {
- tok := token.ILLEGAL
- for _, d := range src.Decls {
- prev := tok
- tok = declToken(d)
- // if the declaration token changed (e.g., from CONST to TYPE)
- // or the next declaration has documentation associated with it,
- // print an empty line between top-level declarations
- // (because p.linebreak is called with the position of d, which
- // is past any documentation, the minimum requirement is satisfied
- // even w/o the extra getDoc(d) nil-check - leave it in case the
- // linebreak logic improves - there's already a TODO).
+func (p *printer) declList(list []ast.Decl) {
+ tok := token.ILLEGAL
+ for _, d := range list {
+ prev := tok
+ tok = declToken(d)
+ // If the declaration token changed (e.g., from CONST to TYPE)
+ // or the next declaration has documentation associated with it,
+ // print an empty line between top-level declarations.
+ // (because p.linebreak is called with the position of d, which
+ // is past any documentation, the minimum requirement is satisfied
+ // even w/o the extra getDoc(d) nil-check - leave it in case the
+ // linebreak logic improves - there's already a TODO).
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
min := 1
if prev != tok || getDoc(d) != nil {
min = 2
}
p.linebreak(p.lineFor(d.Pos()), min, ignore, false)
- p.decl(d)
}
+ p.decl(d)
}
+}
+func (p *printer) file(src *ast.File) {
+ p.setComment(src.Doc)
+ p.print(src.Pos(), token.PACKAGE, blank)
+ p.expr(src.Name)
+ p.declList(src.Decls)
p.print(newline)
}
diff --git a/src/pkg/go/printer/performance_test.go b/src/pkg/go/printer/performance_test.go
index 0c6a4e71f..5b29affcb 100644
--- a/src/pkg/go/printer/performance_test.go
+++ b/src/pkg/go/printer/performance_test.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// This file implements a simple printer performance benchmark:
-// go test -bench=BenchmarkPrint
+// go test -bench=BenchmarkPrint
package printer
@@ -20,7 +20,7 @@ import (
var testfile *ast.File
func testprint(out io.Writer, file *ast.File) {
- if err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil {
+ if err := (&Config{TabIndent | UseSpaces, 8, 0}).Fprint(out, fset, file); err != nil {
log.Fatalf("print error: %s", err)
}
}
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index a027d32da..3c8d23e65 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"text/tabwriter"
+ "unicode"
)
const (
@@ -164,15 +165,15 @@ func (p *printer) atLineBegin(pos token.Position) {
// write indentation
// use "hard" htabs - indentation columns
// must not be discarded by the tabwriter
- for i := 0; i < p.indent; i++ {
+ n := p.Config.Indent + p.indent // include base indentation
+ for i := 0; i < n; i++ {
p.output = append(p.output, '\t')
}
// update positions
- i := p.indent
- p.pos.Offset += i
- p.pos.Column += i
- p.out.Column += i
+ p.pos.Offset += n
+ p.pos.Column += n
+ p.out.Column += n
}
// writeByte writes ch n times to p.output and updates p.pos.
@@ -220,14 +221,6 @@ func (p *printer) writeString(pos token.Position, s string, isLit bool) {
// atLineBegin updates p.pos if there's indentation, but p.pos
// is the position of s.
p.pos = pos
- // reset state if the file changed
- // (used when printing merged ASTs of different files
- // e.g., the result of ast.MergePackageFiles)
- if p.last.IsValid() && p.last.Filename != pos.Filename {
- p.indent = 0
- p.mode = 0
- p.wsbuf = p.wsbuf[0:0]
- }
}
if isLit {
@@ -405,6 +398,7 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *as
// Split comment text into lines
// (using strings.Split(text, "\n") is significantly slower for
// this specific purpose, as measured with: go test -bench=Print)
+//
func split(text string) []string {
// count lines (comment text never ends in a newline)
n := 1
@@ -432,6 +426,7 @@ func split(text string) []string {
// Returns true if s contains only white space
// (only tabs and blanks can appear in the printer's context).
+//
func isBlank(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] > ' ' {
@@ -441,6 +436,7 @@ func isBlank(s string) bool {
return true
}
+// commonPrefix returns the common prefix of a and b.
func commonPrefix(a, b string) string {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
@@ -449,11 +445,22 @@ func commonPrefix(a, b string) string {
return a[0:i]
}
+// trimRight returns s with trailing whitespace removed.
+func trimRight(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
+// comment line is indented, all but the first line have some form of space prefix).
+// The prefix is computed using heuristics such that is likely that the comment
+// contents are nicely laid out after re-printing each line using the printer's
+// current indentation.
+//
func stripCommonPrefix(lines []string) {
- if len(lines) < 2 {
+ if len(lines) <= 1 {
return // at most one line - nothing to do
}
- // len(lines) >= 2
+ // len(lines) > 1
// The heuristic in this function tries to handle a few
// common patterns of /*-style comments: Comments where
@@ -479,7 +486,7 @@ func stripCommonPrefix(lines []string) {
for i, line := range lines[1 : len(lines)-1] {
switch {
case isBlank(line):
- lines[1+i] = "" // range starts at line 1
+ lines[1+i] = "" // range starts with lines[1]
case first:
prefix = commonPrefix(line, line)
first = false
@@ -544,9 +551,7 @@ func stripCommonPrefix(lines []string) {
}
// Shorten the computed common prefix by the length of
// suffix, if it is found as suffix of the prefix.
- if strings.HasSuffix(prefix, string(suffix)) {
- prefix = prefix[0 : len(prefix)-len(suffix)]
- }
+ prefix = strings.TrimSuffix(prefix, string(suffix))
}
}
@@ -570,9 +575,9 @@ func stripCommonPrefix(lines []string) {
}
// Remove the common prefix from all but the first and empty lines.
- for i, line := range lines[1:] {
- if len(line) != 0 {
- lines[1+i] = line[len(prefix):] // range starts at line 1
+ for i, line := range lines {
+ if i > 0 && line != "" {
+ lines[i] = line[len(prefix):]
}
}
}
@@ -605,13 +610,26 @@ func (p *printer) writeComment(comment *ast.Comment) {
// shortcut common case of //-style comments
if text[1] == '/' {
- p.writeString(pos, text, true)
+ p.writeString(pos, trimRight(text), true)
return
}
// for /*-style comments, print line by line and let the
// write function take care of the proper indentation
lines := split(text)
+
+ // The comment started in the first column but is going
+ // to be indented. For an idempotent result, add indentation
+ // to all lines such that they look like they were indented
+ // before - this will make sure the common prefix computation
+ // is the same independent of how many times formatting is
+ // applied (was issue 1835).
+ if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
+ for i, line := range lines[1:] {
+ lines[1+i] = " " + line
+ }
+ }
+
stripCommonPrefix(lines)
// write comment lines, separated by formfeed,
@@ -622,7 +640,7 @@ func (p *printer) writeComment(comment *ast.Comment) {
pos = p.pos
}
if len(line) > 0 {
- p.writeString(pos, line, true)
+ p.writeString(pos, trimRight(line), true)
}
}
}
@@ -1012,9 +1030,9 @@ func (p *printer) printNode(node interface{}) error {
case ast.Expr:
p.expr(n)
case ast.Stmt:
- // A labeled statement will un-indent to position the
- // label. Set indent to 1 so we don't get indent "underflow".
- if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ if _, ok := n.(*ast.LabeledStmt); ok {
p.indent = 1
}
p.stmt(n, false)
@@ -1022,6 +1040,17 @@ func (p *printer) printNode(node interface{}) error {
p.decl(n)
case ast.Spec:
p.spec(n, 1, false)
+ case []ast.Stmt:
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ for _, s := range n {
+ if _, ok := s.(*ast.LabeledStmt); ok {
+ p.indent = 1
+ }
+ }
+ p.stmtList(n, 0, false)
+ case []ast.Decl:
+ p.declList(n)
case *ast.File:
p.file(n)
default:
@@ -1140,7 +1169,7 @@ func (p *trimmer) Write(data []byte) (n int, err error) {
// ----------------------------------------------------------------------------
// Public interface
-// A Mode value is a set of flags (or 0). They coontrol printing.
+// A Mode value is a set of flags (or 0). They control printing.
type Mode uint
const (
@@ -1154,6 +1183,7 @@ const (
type Config struct {
Mode Mode // default: 0
Tabwidth int // default: 8
+ Indent int // default: 0 (all code is indented at least by this much)
}
// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
@@ -1198,7 +1228,7 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{
}
// flush tabwriter, if any
- if tw, _ := (output).(*tabwriter.Writer); tw != nil {
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
err = tw.Flush()
}
@@ -1215,8 +1245,8 @@ type CommentedNode struct {
// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
// Position information is interpreted relative to the file set fset.
-// The node type must be *ast.File, *CommentedNode, or assignment-compatible
-// to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
+// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
+// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
//
func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
return cfg.fprint(output, fset, node, make(map[ast.Node]int))
diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go
index 497d671f2..8454ac12b 100644
--- a/src/pkg/go/printer/printer_test.go
+++ b/src/pkg/go/printer/printer_test.go
@@ -6,7 +6,9 @@ package printer
import (
"bytes"
+ "errors"
"flag"
+ "fmt"
"go/ast"
"go/parser"
"go/token"
@@ -25,33 +27,28 @@ var update = flag.Bool("update", false, "update golden files")
var fset = token.NewFileSet()
-func lineString(text []byte, i int) string {
- i0 := i
- for i < len(text) && text[i] != '\n' {
- i++
- }
- return string(text[i0:i])
-}
-
type checkMode uint
const (
export checkMode = 1 << iota
rawFormat
+ idempotent
)
-func runcheck(t *testing.T, source, golden string, mode checkMode) {
- // parse source
- prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments)
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte, mode checkMode) ([]byte, error) {
+ // parse src
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
if err != nil {
- t.Error(err)
- return
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
}
// filter exports if necessary
if mode&export != 0 {
- ast.FileExports(prog) // ignore result
- prog.Comments = nil // don't print comments that are not in AST
+ ast.FileExports(f) // ignore result
+ f.Comments = nil // don't print comments that are not in AST
}
// determine printer configuration
@@ -60,17 +57,72 @@ func runcheck(t *testing.T, source, golden string, mode checkMode) {
cfg.Mode |= RawFormat
}
- // format source
+ // print AST
var buf bytes.Buffer
- if err := cfg.Fprint(&buf, fset, prog); err != nil {
- t.Error(err)
+ if err := cfg.Fprint(&buf, fset, f); err != nil {
+ return nil, fmt.Errorf("print: %s", err)
}
- res := buf.Bytes()
- // formatted source must be valid
+ // make sure formated output is syntactically correct
+ res := buf.Bytes()
if _, err := parser.ParseFile(fset, "", res, 0); err != nil {
+ return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes())
+ }
+
+ return res, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+ i := offs
+ for i < len(text) && text[i] != '\n' {
+ i++
+ }
+ return text[offs:i]
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+ var buf bytes.Buffer // holding long error message
+
+ // compare lengths
+ if len(a) != len(b) {
+ fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+ }
+
+ // compare contents
+ line := 1
+ offs := 1
+ for i := 0; i < len(a) && i < len(b); i++ {
+ ch := a[i]
+ if ch != b[i] {
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+ fmt.Fprintf(&buf, "\n\n")
+ break
+ }
+ if ch == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+
+ if buf.Len() > 0 {
+ return errors.New(buf.String())
+ }
+ return nil
+}
+
+func runcheck(t *testing.T, source, golden string, mode checkMode) {
+ src, err := ioutil.ReadFile(source)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ res, err := format(src, mode)
+ if err != nil {
t.Error(err)
- t.Logf("\n%s", res)
return
}
@@ -89,23 +141,19 @@ func runcheck(t *testing.T, source, golden string, mode checkMode) {
return
}
- // compare lengths
- if len(res) != len(gld) {
- t.Errorf("len = %d, expected %d (= len(%s))", len(res), len(gld), golden)
+ // formatted source and golden must be the same
+ if err := diff(source, golden, res, gld); err != nil {
+ t.Error(err)
+ return
}
- // compare contents
- for i, line, offs := 0, 1, 0; i < len(res) && i < len(gld); i++ {
- ch := res[i]
- if ch != gld[i] {
- t.Errorf("%s:%d:%d: %s", source, line, i-offs+1, lineString(res, offs))
- t.Errorf("%s:%d:%d: %s", golden, line, i-offs+1, lineString(gld, offs))
- t.Error()
- return
- }
- if ch == '\n' {
- line++
- offs = i + 1
+ if mode&idempotent != 0 {
+ // formatting golden must be idempotent
+ // (This is very difficult to achieve in general and for now
+ // it is only checked for files explicitly marked as such.)
+ res, err = format(gld, mode)
+ if err := diff(golden, fmt.Sprintf("format(%s)", golden), gld, res); err != nil {
+ t.Errorf("golden is not idempotent: %s", err)
}
}
}
@@ -142,15 +190,16 @@ type entry struct {
// Use go test -update to create/update the respective golden files.
var data = []entry{
- {"empty.input", "empty.golden", 0},
+ {"empty.input", "empty.golden", idempotent},
{"comments.input", "comments.golden", 0},
{"comments.input", "comments.x", export},
- {"linebreaks.input", "linebreaks.golden", 0},
- {"expressions.input", "expressions.golden", 0},
- {"expressions.input", "expressions.raw", rawFormat},
+ {"comments2.input", "comments2.golden", idempotent},
+ {"linebreaks.input", "linebreaks.golden", idempotent},
+ {"expressions.input", "expressions.golden", idempotent},
+ {"expressions.input", "expressions.raw", rawFormat | idempotent},
{"declarations.input", "declarations.golden", 0},
{"statements.input", "statements.golden", 0},
- {"slow.input", "slow.golden", 0},
+ {"slow.input", "slow.golden", idempotent},
}
func TestFiles(t *testing.T) {
@@ -248,7 +297,7 @@ func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
}
}
-// Verify that the printer produces always produces a correct program
+// Verify that the printer produces a correct program
// even if the position information of comments introducing newlines
// is incorrect.
func TestBadComments(t *testing.T) {
@@ -385,28 +434,136 @@ func (t *t) foo(a, b, c int) int {
}
}
-// TextX is a skeleton test that can be filled in for debugging one-off cases.
-// Do not remove.
-func TestX(t *testing.T) {
- const src = `
-package p
-func _() {}
-`
- // parse original
- f, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+var decls = []string{
+ `import "fmt"`,
+ "const pi = 3.1415\nconst e = 2.71828\n\nvar x = pi",
+ "func sum(x, y int) int\t{ return x + y }",
+}
+
+func TestDeclLists(t *testing.T) {
+ for _, src := range decls {
+ file, err := parser.ParseFile(fset, "", "package p;"+src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls) // only print declarations
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+var stmts = []string{
+ "i := 0",
+ "select {}\nvar a, b = 1, 2\nreturn a + b",
+ "go f()\ndefer func() {}()",
+}
+
+func TestStmtLists(t *testing.T) {
+ for _, src := range stmts {
+ file, err := parser.ParseFile(fset, "", "package p; func _() {"+src+"}", parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls[0].(*ast.FuncDecl).Body.List) // only print statements
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+func TestBaseIndent(t *testing.T) {
+ // The testfile must not contain multi-line raw strings since those
+ // are not indented (because their values must not change) and make
+ // this test fail.
+ const filename = "printer.go"
+ src, err := ioutil.ReadFile(filename)
if err != nil {
- t.Fatal(err)
+ panic(err) // error in test
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ for indent := 0; indent < 4; indent++ {
+ buf.Reset()
+ (&Config{Tabwidth: tabwidth, Indent: indent}).Fprint(&buf, fset, file)
+ // all code must be indented by at least 'indent' tabs
+ lines := bytes.Split(buf.Bytes(), []byte{'\n'})
+ for i, line := range lines {
+ if len(line) == 0 {
+ continue // empty lines don't have indentation
+ }
+ n := 0
+ for j, b := range line {
+ if b != '\t' {
+ // end of indentation
+ n = j
+ break
+ }
+ }
+ if n < indent {
+ t.Errorf("line %d: got only %d tabs; want at least %d: %q", i, n, indent, line)
+ }
+ }
+ }
+}
+
+// TestFuncType tests that an ast.FuncType with a nil Params field
+// can be printed (per go/ast specification). Test case for issue 3870.
+func TestFuncType(t *testing.T) {
+ src := &ast.File{
+ Name: &ast.Ident{Name: "p"},
+ Decls: []ast.Decl{
+ &ast.FuncDecl{
+ Name: &ast.Ident{Name: "f"},
+ Type: &ast.FuncType{},
+ },
+ },
}
- // pretty-print original
var buf bytes.Buffer
- if err = (&Config{Mode: UseSpaces, Tabwidth: 8}).Fprint(&buf, fset, f); err != nil {
+ if err := Fprint(&buf, fset, src); err != nil {
t.Fatal(err)
}
+ got := buf.String()
- // parse pretty printed original
- if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
- t.Fatalf("%s\n%s", err, buf.Bytes())
+ const want = `package p
+
+func f()
+`
+
+ if got != want {
+ t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
}
+}
+// TextX is a skeleton test that can be filled in for debugging one-off cases.
+// Do not remove.
+func TestX(t *testing.T) {
+ const src = `
+package p
+func _() {}
+`
+ _, err := format([]byte(src), 0)
+ if err != nil {
+ t.Error(err)
+ }
}
diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden
index d9aa2d82f..610a42a68 100644
--- a/src/pkg/go/printer/testdata/comments.golden
+++ b/src/pkg/go/printer/testdata/comments.golden
@@ -529,7 +529,7 @@ func _() {
}
func _() {
- var a = []int{1, 2}// jasldf
+ var a = []int{1, 2}// jasldf
_ = a
}
@@ -626,4 +626,13 @@ func _() {
var lflag bool // -l - disable line directives
}
+// Trailing white space in comments should be trimmed
+func _() {
+ // This comment has 4 blanks following that should be trimmed:
+ /* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+ */
+}
+
/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/pkg/go/printer/testdata/comments.input b/src/pkg/go/printer/testdata/comments.input
index 6084b3fe4..d121dd4be 100644
--- a/src/pkg/go/printer/testdata/comments.input
+++ b/src/pkg/go/printer/testdata/comments.input
@@ -534,7 +534,7 @@ func _() {
}
func _() {
- var a = []int{1, 2, // jasldf
+ var a = []int{1, 2, // jasldf
}
_ = a
}
@@ -630,5 +630,13 @@ var vflag string // -v [y.output] - y.output file
var lflag bool // -l - disable line directives
}
+// Trailing white space in comments should be trimmed
+func _() {
+// This comment has 4 blanks following that should be trimmed:
+/* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+*/
+}
/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/pkg/go/printer/testdata/comments2.golden b/src/pkg/go/printer/testdata/comments2.golden
new file mode 100644
index 000000000..d3b50bf3e
--- /dev/null
+++ b/src/pkg/go/printer/testdata/comments2.golden
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+ /*
+ c2a
+ */
+ /*
+ c2b
+ */
+ /* foo
+ c2c
+ */
+ /* foo
+ c2d
+ */
+ /*
+ c2e
+ foo */
+ /*
+ c2f
+ foo */
+}
+
+func g() {
+ /*
+ c3a
+ */
+ /*
+ c3b
+ */
+ /* foo
+ c3c
+ */
+ /* foo
+ c3d
+ */
+ /*
+ c3e
+ foo */
+ /*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+ /*
+ prints test 5 times
+ */
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+}
diff --git a/src/pkg/go/printer/testdata/comments2.input b/src/pkg/go/printer/testdata/comments2.input
new file mode 100644
index 000000000..6f8c85c94
--- /dev/null
+++ b/src/pkg/go/printer/testdata/comments2.input
@@ -0,0 +1,79 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+/*
+c2a
+*/
+/*
+ c2b
+*/
+/* foo
+c2c
+*/
+/* foo
+ c2d
+*/
+/*
+c2e
+foo */
+/*
+ c2f
+ foo */
+}
+
+func g() {
+/*
+c3a
+*/
+/*
+ c3b
+*/
+/* foo
+c3c
+*/
+/* foo
+ c3d
+*/
+/*
+c3e
+foo */
+/*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+/*
+prints test 5 times
+*/
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+} \ No newline at end of file
diff --git a/src/pkg/go/printer/testdata/declarations.golden b/src/pkg/go/printer/testdata/declarations.golden
index 71ed32ed1..0ad72d349 100644
--- a/src/pkg/go/printer/testdata/declarations.golden
+++ b/src/pkg/go/printer/testdata/declarations.golden
@@ -654,6 +654,35 @@ var _ = map[int]int{
abcde: a, // align with previous line
}
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
func _() {
var _ = T{
a, // must introduce trailing comma
@@ -858,3 +887,28 @@ type _ interface {
r string,
x ...int)
}
+
+// omit superfluous parentheses in parameter lists
+func _(int)
+func _(int)
+func _(x int)
+func _(x int)
+func _(x, y int)
+func _(x, y int)
+
+func _() int
+func _() int
+func _() int
+
+func _() (x int)
+func _() (x int)
+func _() (x int)
+
+// special cases: some channel types require parentheses
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
diff --git a/src/pkg/go/printer/testdata/declarations.input b/src/pkg/go/printer/testdata/declarations.input
index d74cff25d..455c0c6c1 100644
--- a/src/pkg/go/printer/testdata/declarations.input
+++ b/src/pkg/go/printer/testdata/declarations.input
@@ -667,6 +667,35 @@ var _ = map[int]int{
abcde: a, // align with previous line
}
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
func _() {
var _ = T{
@@ -867,3 +896,28 @@ p, q,
r string,
x ...int)
}
+
+// omit superfluous parentheses in parameter lists
+func _((int))
+func _((((((int))))))
+func _(x (int))
+func _(x (((((int))))))
+func _(x, y (int))
+func _(x, y (((((int))))))
+
+func _() (int)
+func _() ((int))
+func _() ((((((int))))))
+
+func _() (x int)
+func _() (x (int))
+func _() (x (((((int))))))
+
+// special cases: some channel types require parentheses
+func _(x chan(<-chan int))
+func _(x (chan(<-chan int)))
+func _(x ((((chan(<-chan int))))))
+
+func _(x chan<-(chan int))
+func _(x (chan<-(chan int)))
+func _(x ((((chan<-(chan int))))))
diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden
index 45fa4d97a..4291c557c 100644
--- a/src/pkg/go/printer/testdata/expressions.golden
+++ b/src/pkg/go/printer/testdata/expressions.golden
@@ -647,3 +647,18 @@ func _() {
a...,
)
}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
diff --git a/src/pkg/go/printer/testdata/expressions.input b/src/pkg/go/printer/testdata/expressions.input
index f545c6605..1ec12a050 100644
--- a/src/pkg/go/printer/testdata/expressions.input
+++ b/src/pkg/go/printer/testdata/expressions.input
@@ -676,3 +676,18 @@ func _() {
a...,
)
}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = func()()(nil)
+ _ = func(x int)(float)(nil)
+ _ = func() func() func()()(nil)
+
+ _ = (func()())(nil)
+ _ = (func(x int)(float))(nil)
+ _ = (func() func() func()())(nil)
+}
diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw
index 87a4b0083..062900e07 100644
--- a/src/pkg/go/printer/testdata/expressions.raw
+++ b/src/pkg/go/printer/testdata/expressions.raw
@@ -647,3 +647,18 @@ func _() {
a...,
)
}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
diff --git a/src/pkg/go/printer/testdata/statements.golden b/src/pkg/go/printer/testdata/statements.golden
index 4d70617bf..3b298f95e 100644
--- a/src/pkg/go/printer/testdata/statements.golden
+++ b/src/pkg/go/printer/testdata/statements.golden
@@ -241,7 +241,7 @@ func _() {
}
}
-// Formatting of for-statement headers.
+// Formatting of for-statement headers for single-line for-loops.
func _() {
for {
}
@@ -279,6 +279,86 @@ func _() {
} // no parens printed
}
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for {
+ }
+ for expr {
+ }
+ for expr {
+ } // no parens printed
+ for {
+ } // no semicolons printed
+ for x := expr; ; {
+ use(x)
+ }
+ for expr {
+ } // no semicolons printed
+ for expr {
+ } // no semicolons and parens printed
+ for ; ; expr = false {
+ }
+ for x := expr; expr; {
+ use(x)
+ }
+ for x := expr; ; expr = false {
+ use(x)
+ }
+ for ; expr; expr = false {
+ }
+ for x := expr; expr; expr = false {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ } // no parens printed
+}
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {
+ }
+ if cond {
+ } // multiple lines
+ if cond {
+ } else {
+ } // else clause always requires multiple lines
+
+ for {
+ }
+ for i := 0; i < len(a); 1++ {
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ } // multiple lines
+
+ for i := range a {
+ }
+ for i := range a {
+ a[i] = i
+ }
+ for i := range a {
+ a[i] = i
+ } // multiple lines
+
+ go func() {
+ for {
+ a <- <-b
+ }
+ }()
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Sprintf("error: %s", x.msg)
+ }
+ }()
+}
+
// Don't remove mandatory parentheses around composite literals in control clauses.
func _() {
// strip parentheses - no composite literals or composite literals don't start with a type name
@@ -527,3 +607,29 @@ AVeryLongLabelThatShouldNotAffectFormatting:
// There should be a single empty line before this comment.
MoreCode()
}
+
+// Formatting of empty statements.
+func _() {
+
+}
+
+func _() {
+}
+
+func _() {
+}
+
+func _() {
+ f()
+}
+
+func _() {
+L:
+ ;
+}
+
+func _() {
+L:
+ ;
+ f()
+}
diff --git a/src/pkg/go/printer/testdata/statements.input b/src/pkg/go/printer/testdata/statements.input
index bd03bc98b..e7fcc0e54 100644
--- a/src/pkg/go/printer/testdata/statements.input
+++ b/src/pkg/go/printer/testdata/statements.input
@@ -223,7 +223,7 @@ func _() {
}
-// Formatting of for-statement headers.
+// Formatting of for-statement headers for single-line for-loops.
func _() {
for{}
for expr {}
@@ -235,14 +235,70 @@ func _() {
for; ; expr = false {}
for x :=expr; expr; {use(x)}
for x := expr;; expr=false {use(x)}
- for;expr;expr =false {
- }
+ for;expr;expr =false {}
for x := expr;expr;expr = false { use(x) }
for x := range []int{} { use(x) }
for x := range (([]int{})) { use(x) } // no parens printed
}
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for{
+ }
+ for expr {
+ }
+ for (expr) {
+ } // no parens printed
+ for;;{
+ } // no semicolons printed
+ for x :=expr;; {use( x)
+ }
+ for; expr;{
+ } // no semicolons printed
+ for; ((expr));{
+ } // no semicolons and parens printed
+ for; ; expr = false {
+ }
+ for x :=expr; expr; {use(x)
+ }
+ for x := expr;; expr=false {use(x)
+ }
+ for;expr;expr =false {
+ }
+ for x := expr;expr;expr = false {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x) }
+ for x := range (([]int{})) {
+ use(x) } // no parens printed
+}
+
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {}
+ if cond {
+ } // multiple lines
+ if cond {} else {} // else clause always requires multiple lines
+
+ for {}
+ for i := 0; i < len(a); 1++ {}
+ for i := 0; i < len(a); 1++ { a[i] = i }
+ for i := 0; i < len(a); 1++ { a[i] = i
+ } // multiple lines
+
+ for i := range a {}
+ for i := range a { a[i] = i }
+ for i := range a { a[i] = i
+ } // multiple lines
+
+ go func() { for { a <- <-b } }()
+ defer func() { if x := recover(); x != nil { err = fmt.Sprintf("error: %s", x.msg) } }()
+}
+
+
// Don't remove mandatory parentheses around composite literals in control clauses.
func _() {
// strip parentheses - no composite literals or composite literals don't start with a type name
@@ -468,3 +524,27 @@ AVeryLongLabelThatShouldNotAffectFormatting:
// There should be a single empty line before this comment.
MoreCode()
}
+
+
+// Formatting of empty statements.
+func _() {
+ ;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;}
+
+func _() {
+f();;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+ f()
+}
diff --git a/src/pkg/go/scanner/errors.go b/src/pkg/go/scanner/errors.go
index 8a75a9650..22de69c3c 100644
--- a/src/pkg/go/scanner/errors.go
+++ b/src/pkg/go/scanner/errors.go
@@ -120,7 +120,7 @@ func PrintError(w io.Writer, err error) {
for _, e := range list {
fmt.Fprintf(w, "%s\n", e)
}
- } else {
+ } else if err != nil {
fmt.Fprintf(w, "%s\n", err)
}
}
diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go
index da508747a..3322c58b3 100644
--- a/src/pkg/go/scanner/scanner.go
+++ b/src/pkg/go/scanner/scanner.go
@@ -81,7 +81,7 @@ func (s *Scanner) next() {
}
}
-// A mode value is set of flags (or 0).
+// A mode value is a set of flags (or 0).
// They control scanner behavior.
//
type Mode uint
@@ -125,6 +125,9 @@ func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode
s.ErrorCount = 0
s.next()
+ if s.ch == '\uFEFF' {
+ s.next() // ignore BOM
+ }
}
func (s *Scanner) error(offs int, msg string) {
@@ -157,11 +160,15 @@ func (s *Scanner) interpretLineComment(text []byte) {
func (s *Scanner) scanComment() string {
// initial '/' already consumed; s.ch == '/' || s.ch == '*'
offs := s.offset - 1 // position of initial '/'
+ hasCR := false
if s.ch == '/' {
//-style comment
s.next()
for s.ch != '\n' && s.ch >= 0 {
+ if s.ch == '\r' {
+ hasCR = true
+ }
s.next()
}
if offs == s.lineOffset {
@@ -175,6 +182,9 @@ func (s *Scanner) scanComment() string {
s.next()
for s.ch >= 0 {
ch := s.ch
+ if ch == '\r' {
+ hasCR = true
+ }
s.next()
if ch == '*' && s.ch == '/' {
s.next()
@@ -185,7 +195,12 @@ func (s *Scanner) scanComment() string {
s.error(offs, "comment not terminated")
exit:
- return string(s.src[offs:s.offset])
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
}
func (s *Scanner) findLineEnd() bool {
@@ -378,7 +393,7 @@ func (s *Scanner) scanEscape(quote rune) {
for ; i > 0 && s.ch != quote && s.ch >= 0; i-- {
s.next()
}
- if x > max || 0xd800 <= x && x < 0xe000 {
+ if x > max || 0xD800 <= x && x < 0xE000 {
s.error(offs, "escape sequence is invalid Unicode code point")
}
}
@@ -527,6 +542,8 @@ func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Tok
// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
// has the corresponding value.
//
+// If the returned token is a keyword, the literal string is the keyword.
+//
// If the returned token is token.SEMICOLON, the corresponding
// literal string is ";" if the semicolon was present in the source,
// and "\n" if the semicolon was inserted because of a newline or
@@ -560,12 +577,18 @@ scanAgain:
switch ch := s.ch; {
case isLetter(ch):
lit = s.scanIdentifier()
- tok = token.Lookup(lit)
- switch tok {
- case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
+ if len(lit) > 1 {
+ // keywords are longer than one letter - avoid lookup otherwise
+ tok = token.Lookup(lit)
+ switch tok {
+ case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
+ insertSemi = true
+ }
+ } else {
insertSemi = true
+ tok = token.IDENT
}
- case digitVal(ch) < 10:
+ case '0' <= ch && ch <= '9':
insertSemi = true
tok, lit = s.scanNumber(false)
default:
@@ -598,7 +621,7 @@ scanAgain:
case ':':
tok = s.switch2(token.COLON, token.DEFINE)
case '.':
- if digitVal(s.ch) < 10 {
+ if '0' <= s.ch && s.ch <= '9' {
insertSemi = true
tok, lit = s.scanNumber(true)
} else if s.ch == '.' {
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index 06223e23b..1c19053e6 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -6,6 +6,7 @@ package scanner
import (
"go/token"
+ "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -43,12 +44,16 @@ var tokens = [...]elt{
// Special tokens
{token.COMMENT, "/* a comment */", special},
{token.COMMENT, "// a comment \n", special},
+ {token.COMMENT, "/*\r*/", special},
+ {token.COMMENT, "//\r\n", special},
// Identifiers and basic type literals
{token.IDENT, "foobar", literal},
{token.IDENT, "a۰۱۸", literal},
{token.IDENT, "foo६४", literal},
{token.IDENT, "bar9876", literal},
+ {token.IDENT, "ŝ", literal}, // was bug (issue 4000)
+ {token.IDENT, "ŝfoo", literal}, // was bug (issue 4000)
{token.INT, "0", literal},
{token.INT, "1", literal},
{token.INT, "123456789012345678890", literal},
@@ -214,8 +219,6 @@ func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
- // make source
- src_linecount := newlineCount(string(source))
whitespace_linecount := newlineCount(whitespace)
// error handler
@@ -226,59 +229,81 @@ func TestScan(t *testing.T) {
// verify scan
var s Scanner
s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
- index := 0
- // epos is the expected position
+
+ // set up expected position
epos := token.Position{
Filename: "",
Offset: 0,
Line: 1,
Column: 1,
}
+
+ index := 0
for {
pos, tok, lit := s.Scan()
- if lit == "" {
- // no literal value for non-literal tokens
- lit = tok.String()
+
+ // check position
+ if tok == token.EOF {
+ // correction for EOF
+ epos.Line = newlineCount(string(source))
+ epos.Column = 2
}
+ checkPos(t, lit, pos, epos)
+
+ // check token
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
+ index++
}
- if tok == token.EOF {
- lit = "<EOF>"
- epos.Line = src_linecount
- epos.Column = 2
- }
- checkPos(t, lit, pos, epos)
if tok != e.tok {
t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
}
- if e.tok.IsLiteral() {
- // no CRs in raw string literals
- elit := e.lit
- if elit[0] == '`' {
- elit = string(stripCR([]byte(elit)))
- epos.Offset += len(e.lit) - len(lit) // correct position
- }
- if lit != elit {
- t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
- }
- }
+
+ // check token class
if tokenclass(tok) != e.class {
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
}
- epos.Offset += len(lit) + len(whitespace)
- epos.Line += newlineCount(lit) + whitespace_linecount
- if tok == token.COMMENT && lit[1] == '/' {
- // correct for unaccounted '/n' in //-style comment
- epos.Offset++
- epos.Line++
+
+ // check literal
+ elit := ""
+ switch e.tok {
+ case token.COMMENT:
+ // no CRs in comments
+ elit = string(stripCR([]byte(e.lit)))
+ //-style comment literal doesn't contain newline
+ if elit[1] == '/' {
+ elit = elit[0 : len(elit)-1]
+ }
+ case token.IDENT:
+ elit = e.lit
+ case token.SEMICOLON:
+ elit = ";"
+ default:
+ if e.tok.IsLiteral() {
+ // no CRs in raw string literals
+ elit = e.lit
+ if elit[0] == '`' {
+ elit = string(stripCR([]byte(elit)))
+ }
+ } else if e.tok.IsKeyword() {
+ elit = e.lit
+ }
+ }
+ if lit != elit {
+ t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
}
- index++
+
if tok == token.EOF {
break
}
+
+ // update position
+ epos.Offset += len(e.lit) + len(whitespace)
+ epos.Line += newlineCount(e.lit) + whitespace_linecount
+
}
+
if s.ErrorCount != 0 {
t.Errorf("found %d errors", s.ErrorCount)
}
@@ -321,6 +346,7 @@ var lines = []string{
// # indicates a semicolon present in the source
// $ indicates an automatically inserted semicolon
"",
+ "\ufeff#;", // first BOM is ignored
"#;",
"foo$\n",
"123$\n",
@@ -521,7 +547,7 @@ func TestLineComments(t *testing.T) {
}
}
-// Verify that initializing the same scanner more then once works correctly.
+// Verify that initializing the same scanner more than once works correctly.
func TestInit(t *testing.T) {
var s Scanner
@@ -669,6 +695,7 @@ var errors = []struct {
{"0X", token.INT, 0, "illegal hexadecimal number"},
{"\"abc\x00def\"", token.STRING, 4, "illegal character NUL"},
{"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"},
+ {"\ufeff\ufeff", token.ILLEGAL, 3, "illegal character U+FEFF"}, // only first BOM is ignored
}
func TestScanErrors(t *testing.T) {
@@ -683,7 +710,7 @@ func BenchmarkScan(b *testing.B) {
file := fset.AddFile("", fset.Base(), len(source))
var s Scanner
b.StartTimer()
- for i := b.N - 1; i >= 0; i-- {
+ for i := 0; i < b.N; i++ {
s.Init(file, source, nil, ScanComments)
for {
_, tok, _ := s.Scan()
@@ -693,3 +720,26 @@ func BenchmarkScan(b *testing.B) {
}
}
}
+
+func BenchmarkScanFile(b *testing.B) {
+ b.StopTimer()
+ const filename = "scanner.go"
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(err)
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile(filename, fset.Base(), len(src))
+ b.SetBytes(int64(len(src)))
+ var s Scanner
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ s.Init(file, src, nil, ScanComments)
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ }
+}
diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go
index 647d1b770..f5d999561 100644
--- a/src/pkg/go/token/position.go
+++ b/src/pkg/go/token/position.go
@@ -76,7 +76,7 @@ type Pos int
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
-//
+//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
@@ -295,9 +295,9 @@ type FileSet struct {
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
- s := new(FileSet)
- s.base = 1 // 0 == NoPos
- return s
+ return &FileSet{
+ base: 1, // 0 == NoPos
+ }
}
// Base returns the minimum base offset that must be provided to
@@ -347,7 +347,7 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
-//
+//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
@@ -367,8 +367,10 @@ func searchFiles(a []*File, x int) int {
}
func (s *FileSet) file(p Pos) *File {
+ s.mutex.RLock()
// common case: p is in last file
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ s.mutex.RUnlock()
return f
}
// p is not in last file - search all files
@@ -376,10 +378,14 @@ func (s *FileSet) file(p Pos) *File {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
- s.last = f
+ s.mutex.RUnlock()
+ s.mutex.Lock()
+ s.last = f // race is ok - s.last is only a cache
+ s.mutex.Unlock()
return f
}
}
+ s.mutex.RUnlock()
return nil
}
@@ -389,9 +395,7 @@ func (s *FileSet) file(p Pos) *File {
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
- s.mutex.RLock()
f = s.file(p)
- s.mutex.RUnlock()
}
return
}
@@ -399,11 +403,9 @@ func (s *FileSet) File(p Pos) (f *File) {
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
- s.mutex.RLock()
if f := s.file(p); f != nil {
pos = f.position(p)
}
- s.mutex.RUnlock()
}
return
}
diff --git a/src/pkg/go/token/position_test.go b/src/pkg/go/token/position_test.go
index 160107df4..1d36c2226 100644
--- a/src/pkg/go/token/position_test.go
+++ b/src/pkg/go/token/position_test.go
@@ -6,6 +6,8 @@ package token
import (
"fmt"
+ "math/rand"
+ "sync"
"testing"
)
@@ -179,3 +181,52 @@ func TestFiles(t *testing.T) {
}
}
}
+
+// FileSet.File should return nil if Pos is past the end of the FileSet.
+func TestFileSetPastEnd(t *testing.T) {
+ fset := NewFileSet()
+ for _, test := range tests {
+ fset.AddFile(test.filename, fset.Base(), test.size)
+ }
+ if f := fset.File(Pos(fset.Base())); f != nil {
+ t.Errorf("expected nil, got %v", f)
+ }
+}
+
+func TestFileSetCacheUnlikely(t *testing.T) {
+ fset := NewFileSet()
+ offsets := make(map[string]int)
+ for _, test := range tests {
+ offsets[test.filename] = fset.Base()
+ fset.AddFile(test.filename, fset.Base(), test.size)
+ }
+ for file, pos := range offsets {
+ f := fset.File(Pos(pos))
+ if f.Name() != file {
+ t.Errorf("expecting %q at position %d, got %q", file, pos, f.Name())
+ }
+ }
+}
+
+// issue 4345. Test concurrent use of FileSet.Pos does not trigger a
+// race in the FileSet position cache.
+func TestFileSetRace(t *testing.T) {
+ fset := NewFileSet()
+ for i := 0; i < 100; i++ {
+ fset.AddFile(fmt.Sprintf("file-%d", i), fset.Base(), 1031)
+ }
+ max := int32(fset.Base())
+ var stop sync.WaitGroup
+ r := rand.New(rand.NewSource(7))
+ for i := 0; i < 2; i++ {
+ r := rand.New(rand.NewSource(r.Int63()))
+ stop.Add(1)
+ go func() {
+ for i := 0; i < 1000; i++ {
+ fset.Position(Pos(r.Int31n(max)))
+ }
+ stop.Done()
+ }()
+ }
+ stop.Wait()
+}
diff --git a/src/pkg/go/types/api.go b/src/pkg/go/types/api.go
new file mode 100644
index 000000000..13b453faa
--- /dev/null
+++ b/src/pkg/go/types/api.go
@@ -0,0 +1,105 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package types declares the data structures for representing
+// Go types and implements typechecking of package files.
+//
+// WARNING: THE TYPES API IS SUBJECT TO CHANGE.
+//
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// A Context specifies the supporting context for type checking.
+// An empty Context is a ready-to-use default context.
+type Context struct {
+ // If Error != nil, it is called with each error found
+ // during type checking. The error strings of errors with
+ // detailed position information are formatted as follows:
+ // filename:line:column: message
+ Error func(err error)
+
+ // If Ident != nil, it is called for each identifier id
+ // denoting an Object in the files provided to Check, and
+ // obj is the denoted object.
+ // Ident is not called for fields and methods in struct or
+ // interface types or composite literals, or for blank (_)
+ // or dot (.) identifiers in dot-imports.
+ // TODO(gri) Consider making Fields and Methods ordinary
+ // Objects - than we could lift this restriction.
+ Ident func(id *ast.Ident, obj Object)
+
+ // If Expr != nil, it is called exactly once for each expression x
+ // that is type-checked: typ is the expression type, and val is the
+ // value if x is constant, val is nil otherwise.
+ //
+ // If x is a literal value (constant, composite literal), typ is always
+ // the dynamic type of x (never an interface type). Otherwise, typ is x's
+ // static type (possibly an interface type).
+ //
+ // Constants are represented as follows:
+ //
+ // bool -> bool
+ // numeric -> int64, *big.Int, *big.Rat, Complex
+ // string -> string
+ // nil -> NilType
+ //
+ // Constant values are normalized, that is, they are represented
+ // using the "smallest" possible type that can represent the value.
+ // For instance, 1.0 is represented as an int64 because it can be
+ // represented accurately as an int64.
+ Expr func(x ast.Expr, typ Type, val interface{})
+
+ // If Import != nil, it is called for each imported package.
+ // Otherwise, GcImporter is called.
+ Import Importer
+
+ // If Alignof != nil, it is called to determine the alignment
+ // of the given type. Otherwise DefaultAlignmentof is called.
+ // Alignof must implement the alignment guarantees required by
+ // the spec.
+ Alignof func(Type) int64
+
+ // If Offsetsof != nil, it is called to determine the offsets
+ // of the given struct fields, in bytes. Otherwise DefaultOffsetsof
+ // is called. Offsetsof must implement the offset guarantees
+ // required by the spec.
+ Offsetsof func(fields []*Field) []int64
+
+ // If Sizeof != nil, it is called to determine the size of the
+ // given type. Otherwise, DefaultSizeof is called. Sizeof must
+ // implement the size guarantees required by the spec.
+ Sizeof func(Type) int64
+}
+
+// An Importer resolves import paths to Package objects.
+// The imports map records the packages already imported,
+// indexed by package id (canonical import path).
+// An Importer must determine the canonical import path and
+// check the map to see if it is already present in the imports map.
+// If so, the Importer can return the map entry. Otherwise, the
+// Importer should load the package data for the given path into
+// a new *Package, record pkg in the imports map, and then
+// return pkg.
+type Importer func(imports map[string]*Package, path string) (pkg *Package, err error)
+
+// Check resolves and typechecks a set of package files within the given
+// context. It returns the package and the first error encountered, if
+// any. If the context's Error handler is nil, Check terminates as soon
+// as the first error is encountered; otherwise it continues until the
+// entire package is checked. If there are errors, the package may be
+// only partially type-checked, and the resulting package may be incomplete
+// (missing objects, imports, etc.).
+func (ctxt *Context) Check(fset *token.FileSet, files []*ast.File) (*Package, error) {
+ return check(ctxt, fset, files)
+}
+
+// Check is shorthand for ctxt.Check where ctxt is a default (empty) context.
+func Check(fset *token.FileSet, files []*ast.File) (*Package, error) {
+ var ctxt Context
+ return ctxt.Check(fset, files)
+}
diff --git a/src/pkg/go/types/builtins.go b/src/pkg/go/types/builtins.go
new file mode 100644
index 000000000..ad9259118
--- /dev/null
+++ b/src/pkg/go/types/builtins.go
@@ -0,0 +1,455 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of builtin function calls.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// builtin typechecks a built-in call. The built-in type is bin, and iota is the current
+// value of iota or -1 if iota doesn't have a value in the current context. The result
+// of the call is returned via x. If the call has type errors, the returned x is marked
+// as invalid (x.mode == invalid).
+//
+func (check *checker) builtin(x *operand, call *ast.CallExpr, bin *builtin, iota int) {
+ args := call.Args
+ id := bin.id
+
+ // declare before goto's
+ var arg0 ast.Expr // first argument, if present
+
+ // check argument count
+ n := len(args)
+ msg := ""
+ if n < bin.nargs {
+ msg = "not enough"
+ } else if !bin.isVariadic && n > bin.nargs {
+ msg = "too many"
+ }
+ if msg != "" {
+ check.invalidOp(call.Pos(), msg+" arguments for %s (expected %d, found %d)", call, bin.nargs, n)
+ goto Error
+ }
+
+ // common case: evaluate first argument if present;
+ // if it is an expression, x has the expression value
+ if n > 0 {
+ arg0 = args[0]
+ switch id {
+ case _Make, _New, _Print, _Println, _Offsetof, _Trace:
+ // respective cases below do the work
+ default:
+ // argument must be an expression
+ check.expr(x, arg0, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ }
+ }
+
+ switch id {
+ case _Append:
+ if _, ok := underlying(x.typ).(*Slice); !ok {
+ check.invalidArg(x.pos(), "%s is not a typed slice", x)
+ goto Error
+ }
+ resultTyp := x.typ
+ for _, arg := range args[1:] {
+ check.expr(x, arg, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ // TODO(gri) check assignability
+ }
+ x.mode = value
+ x.typ = resultTyp
+
+ case _Cap, _Len:
+ mode := invalid
+ var val interface{}
+ switch typ := implicitArrayDeref(underlying(x.typ)).(type) {
+ case *Basic:
+ if isString(typ) && id == _Len {
+ if x.mode == constant {
+ mode = constant
+ val = int64(len(x.val.(string)))
+ } else {
+ mode = value
+ }
+ }
+
+ case *Array:
+ mode = value
+ // spec: "The expressions len(s) and cap(s) are constants
+ // if the type of s is an array or pointer to an array and
+ // the expression s does not contain channel receives or
+ // function calls; in this case s is not evaluated."
+ if !check.containsCallsOrReceives(arg0) {
+ mode = constant
+ val = typ.Len
+ }
+
+ case *Slice, *Chan:
+ mode = value
+
+ case *Map:
+ if id == _Len {
+ mode = value
+ }
+ }
+
+ if mode == invalid {
+ check.invalidArg(x.pos(), "%s for %s", x, bin.name)
+ goto Error
+ }
+ x.mode = mode
+ x.typ = Typ[Int]
+ x.val = val
+
+ case _Close:
+ ch, ok := underlying(x.typ).(*Chan)
+ if !ok {
+ check.invalidArg(x.pos(), "%s is not a channel", x)
+ goto Error
+ }
+ if ch.Dir&ast.SEND == 0 {
+ check.invalidArg(x.pos(), "%s must not be a receive-only channel", x)
+ goto Error
+ }
+ x.mode = novalue
+
+ case _Complex:
+ if !check.complexArg(x) {
+ goto Error
+ }
+
+ var y operand
+ check.expr(&y, args[1], nil, iota)
+ if y.mode == invalid {
+ goto Error
+ }
+ if !check.complexArg(&y) {
+ goto Error
+ }
+
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ goto Error
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ goto Error
+ }
+
+ if !IsIdentical(x.typ, y.typ) {
+ check.invalidArg(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
+ goto Error
+ }
+
+ typ := underlying(x.typ).(*Basic)
+ if x.mode == constant && y.mode == constant {
+ x.val = binaryOpConst(x.val, toImagConst(y.val), token.ADD, typ)
+ } else {
+ x.mode = value
+ }
+
+ switch typ.Kind {
+ case Float32:
+ x.typ = Typ[Complex64]
+ case Float64:
+ x.typ = Typ[Complex128]
+ case UntypedInt, UntypedRune, UntypedFloat:
+ x.typ = Typ[UntypedComplex]
+ default:
+ check.invalidArg(x.pos(), "float32 or float64 arguments expected")
+ goto Error
+ }
+
+ case _Copy:
+ var y operand
+ check.expr(&y, args[1], nil, iota)
+ if y.mode == invalid {
+ goto Error
+ }
+
+ var dst, src Type
+ if t, ok := underlying(x.typ).(*Slice); ok {
+ dst = t.Elt
+ }
+ switch t := underlying(y.typ).(type) {
+ case *Basic:
+ if isString(y.typ) {
+ src = Typ[Byte]
+ }
+ case *Slice:
+ src = t.Elt
+ }
+
+ if dst == nil || src == nil {
+ check.invalidArg(x.pos(), "copy expects slice arguments; found %s and %s", x, &y)
+ goto Error
+ }
+
+ if !IsIdentical(dst, src) {
+ check.invalidArg(x.pos(), "arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src)
+ goto Error
+ }
+
+ x.mode = value
+ x.typ = Typ[Int]
+
+ case _Delete:
+ m, ok := underlying(x.typ).(*Map)
+ if !ok {
+ check.invalidArg(x.pos(), "%s is not a map", x)
+ goto Error
+ }
+ check.expr(x, args[1], nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ if !x.isAssignable(check.ctxt, m.Key) {
+ check.invalidArg(x.pos(), "%s is not assignable to %s", x, m.Key)
+ goto Error
+ }
+ x.mode = novalue
+
+ case _Imag, _Real:
+ if !isComplex(x.typ) {
+ check.invalidArg(x.pos(), "%s must be a complex number", x)
+ goto Error
+ }
+ if x.mode == constant {
+ // nothing to do for x.val == 0
+ if !isZeroConst(x.val) {
+ c := x.val.(Complex)
+ if id == _Real {
+ x.val = c.Re
+ } else {
+ x.val = c.Im
+ }
+ }
+ } else {
+ x.mode = value
+ }
+ k := Invalid
+ switch underlying(x.typ).(*Basic).Kind {
+ case Complex64:
+ k = Float32
+ case Complex128:
+ k = Float64
+ case UntypedComplex:
+ k = UntypedFloat
+ default:
+ unreachable()
+ }
+ x.typ = Typ[k]
+
+ case _Make:
+ resultTyp := check.typ(arg0, false)
+ if resultTyp == Typ[Invalid] {
+ goto Error
+ }
+ var min int // minimum number of arguments
+ switch underlying(resultTyp).(type) {
+ case *Slice:
+ min = 2
+ case *Map, *Chan:
+ min = 1
+ default:
+ check.invalidArg(arg0.Pos(), "cannot make %s; type must be slice, map, or channel", arg0)
+ goto Error
+ }
+ if n := len(args); n < min || min+1 < n {
+ check.errorf(call.Pos(), "%s expects %d or %d arguments; found %d", call, min, min+1, n)
+ goto Error
+ }
+ var sizes []interface{} // constant integer arguments, if any
+ for _, arg := range args[1:] {
+ check.expr(x, arg, nil, iota)
+ if x.isInteger(check.ctxt) {
+ if x.mode == constant {
+ if isNegConst(x.val) {
+ check.invalidArg(x.pos(), "%s must not be negative", x)
+ // safe to continue
+ } else {
+ sizes = append(sizes, x.val) // x.val >= 0
+ }
+ }
+ } else {
+ check.invalidArg(x.pos(), "%s must be an integer", x)
+ // safe to continue
+ }
+ }
+ if len(sizes) == 2 && compareConst(sizes[0], sizes[1], token.GTR) {
+ check.invalidArg(args[1].Pos(), "length and capacity swapped")
+ // safe to continue
+ }
+ x.mode = variable
+ x.typ = resultTyp
+
+ case _New:
+ resultTyp := check.typ(arg0, false)
+ if resultTyp == Typ[Invalid] {
+ goto Error
+ }
+ x.mode = variable
+ x.typ = &Pointer{Base: resultTyp}
+
+ case _Panic:
+ x.mode = novalue
+
+ case _Print, _Println:
+ for _, arg := range args {
+ check.expr(x, arg, nil, -1)
+ if x.mode == invalid {
+ goto Error
+ }
+ }
+ x.mode = novalue
+
+ case _Recover:
+ x.mode = value
+ x.typ = new(Interface)
+
+ case _Alignof:
+ x.mode = constant
+ x.val = check.ctxt.alignof(x.typ)
+ x.typ = Typ[Uintptr]
+
+ case _Offsetof:
+ arg, ok := unparen(arg0).(*ast.SelectorExpr)
+ if !ok {
+ check.invalidArg(arg0.Pos(), "%s is not a selector expression", arg0)
+ goto Error
+ }
+ check.expr(x, arg.X, nil, -1)
+ if x.mode == invalid {
+ goto Error
+ }
+ sel := arg.Sel.Name
+ res := lookupField(x.typ, QualifiedName{check.pkg, arg.Sel.Name})
+ if res.index == nil {
+ check.invalidArg(x.pos(), "%s has no single field %s", x, sel)
+ goto Error
+ }
+ offs := check.ctxt.offsetof(deref(x.typ), res.index)
+ if offs < 0 {
+ check.invalidArg(x.pos(), "field %s is embedded via a pointer in %s", sel, x)
+ goto Error
+ }
+ x.mode = constant
+ x.val = offs
+ x.typ = Typ[Uintptr]
+
+ case _Sizeof:
+ x.mode = constant
+ x.val = check.ctxt.sizeof(x.typ)
+ x.typ = Typ[Uintptr]
+
+ case _Assert:
+ // assert(pred) causes a typechecker error if pred is false.
+ // The result of assert is the value of pred if there is no error.
+ // Note: assert is only available in self-test mode.
+ if x.mode != constant || !isBoolean(x.typ) {
+ check.invalidArg(x.pos(), "%s is not a boolean constant", x)
+ goto Error
+ }
+ pred, ok := x.val.(bool)
+ if !ok {
+ check.errorf(x.pos(), "internal error: value of %s should be a boolean constant", x)
+ goto Error
+ }
+ if !pred {
+ check.errorf(call.Pos(), "%s failed", call)
+ // compile-time assertion failure - safe to continue
+ }
+
+ case _Trace:
+ // trace(x, y, z, ...) dumps the positions, expressions, and
+ // values of its arguments. The result of trace is the value
+ // of the first argument.
+ // Note: trace is only available in self-test mode.
+ if len(args) == 0 {
+ check.dump("%s: trace() without arguments", call.Pos())
+ x.mode = novalue
+ x.expr = call
+ return
+ }
+ var t operand
+ x1 := x
+ for _, arg := range args {
+ check.rawExpr(x1, arg, nil, iota, true) // permit trace for types, e.g.: new(trace(T))
+ check.dump("%s: %s", x1.pos(), x1)
+ x1 = &t // use incoming x only for first argument
+ }
+
+ default:
+ check.invalidAST(call.Pos(), "unknown builtin id %d", id)
+ goto Error
+ }
+
+ x.expr = call
+ return
+
+Error:
+ x.mode = invalid
+ x.expr = call
+}
+
+// implicitArrayDeref returns A if typ is of the form *A and A is an array;
+// otherwise it returns typ.
+//
+func implicitArrayDeref(typ Type) Type {
+ if p, ok := typ.(*Pointer); ok {
+ if a, ok := underlying(p.Base).(*Array); ok {
+ return a
+ }
+ }
+ return typ
+}
+
+// containsCallsOrReceives reports if x contains function calls or channel receives.
+// Expects that x was type-checked already.
+//
+func (check *checker) containsCallsOrReceives(x ast.Expr) (found bool) {
+ ast.Inspect(x, func(x ast.Node) bool {
+ switch x := x.(type) {
+ case *ast.CallExpr:
+ // calls and conversions look the same
+ if !check.conversions[x] {
+ found = true
+ }
+ case *ast.UnaryExpr:
+ if x.Op == token.ARROW {
+ found = true
+ }
+ }
+ return !found // no need to continue if found
+ })
+ return
+}
+
+// unparen removes any parentheses surrounding an expression and returns
+// the naked expression.
+//
+func unparen(x ast.Expr) ast.Expr {
+ if p, ok := x.(*ast.ParenExpr); ok {
+ return unparen(p.X)
+ }
+ return x
+}
+
+func (check *checker) complexArg(x *operand) bool {
+ t, _ := underlying(x.typ).(*Basic)
+ if t != nil && (t.Info&IsFloat != 0 || t.Kind == UntypedInt || t.Kind == UntypedRune) {
+ return true
+ }
+ check.invalidArg(x.pos(), "%s must be a float32, float64, or an untyped non-complex numeric constant", x)
+ return false
+}
diff --git a/src/pkg/go/types/check.go b/src/pkg/go/types/check.go
new file mode 100644
index 000000000..f7b87e30c
--- /dev/null
+++ b/src/pkg/go/types/check.go
@@ -0,0 +1,507 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Check function, which typechecks a package.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+)
+
+// debugging support
+const (
+ debug = true // leave on during development
+ trace = false // turn on for detailed type resolution traces
+)
+
+type checker struct {
+ ctxt *Context
+ fset *token.FileSet
+ files []*ast.File
+
+ // lazily initialized
+ pkg *Package // current package
+ firsterr error // first error encountered
+ idents map[*ast.Ident]Object // maps identifiers to their unique object
+ objects map[*ast.Object]Object // maps *ast.Objects to their unique object
+ initspecs map[*ast.ValueSpec]*ast.ValueSpec // "inherited" type and initialization expressions for constant declarations
+ methods map[*TypeName]*Scope // maps type names to associated methods
+ conversions map[*ast.CallExpr]bool // set of type-checked conversions (to distinguish from calls)
+
+ // untyped expressions
+ // TODO(gri): Consider merging the untyped and constants map. Should measure
+ // the ratio between untyped non-constant and untyped constant expressions
+ // to make an informed decision.
+ untyped map[ast.Expr]*Basic // map of expressions of untyped type
+ constants map[ast.Expr]interface{} // map of untyped constant expressions; each key also appears in untyped
+ shiftOps map[ast.Expr]bool // map of lhs shift operands with delayed type-checking
+
+ // functions
+ funclist []function // list of functions/methods with correct signatures and non-empty bodies
+ funcsig *Signature // signature of currently typechecked function
+ pos []token.Pos // stack of expr positions; debugging support, used if trace is set
+}
+
+func (check *checker) register(id *ast.Ident, obj Object) {
+ // When an expression is evaluated more than once (happens
+ // in rare cases, e.g. for statement expressions, see
+ // comment in stmt.go), the object has been registered
+ // before. Don't do anything in that case.
+ if alt := check.idents[id]; alt != nil {
+ assert(alt == obj)
+ return
+ }
+ check.idents[id] = obj
+ if f := check.ctxt.Ident; f != nil {
+ f(id, obj)
+ }
+}
+
+// lookup returns the unique Object denoted by the identifier.
+// For identifiers without assigned *ast.Object, it uses the
+// checker.idents map; for identifiers with an *ast.Object it
+// uses the checker.objects map.
+//
+// TODO(gri) Once identifier resolution is done entirely by
+// the typechecker, only the idents map is needed.
+//
+func (check *checker) lookup(ident *ast.Ident) Object {
+ obj := check.idents[ident]
+ astObj := ident.Obj
+
+ if obj != nil {
+ assert(astObj == nil || check.objects[astObj] == nil || check.objects[astObj] == obj)
+ return obj
+ }
+
+ if astObj == nil {
+ return nil
+ }
+
+ if obj = check.objects[astObj]; obj == nil {
+ obj = newObj(check.pkg, astObj)
+ check.objects[astObj] = obj
+ }
+ check.register(ident, obj)
+
+ return obj
+}
+
+type function struct {
+ obj *Func // for debugging/tracing only
+ sig *Signature
+ body *ast.BlockStmt
+}
+
+// later adds a function with non-empty body to the list of functions
+// that need to be processed after all package-level declarations
+// are typechecked.
+//
+func (check *checker) later(f *Func, sig *Signature, body *ast.BlockStmt) {
+ // functions implemented elsewhere (say in assembly) have no body
+ if body != nil {
+ check.funclist = append(check.funclist, function{f, sig, body})
+ }
+}
+
+func (check *checker) declareIdent(scope *Scope, ident *ast.Ident, obj Object) {
+ assert(check.lookup(ident) == nil) // identifier already declared or resolved
+ check.register(ident, obj)
+ if ident.Name != "_" {
+ if alt := scope.Insert(obj); alt != nil {
+ prevDecl := ""
+ if pos := alt.GetPos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", check.fset.Position(pos))
+ }
+ check.errorf(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ }
+}
+
+func (check *checker) valueSpec(pos token.Pos, obj Object, lhs []*ast.Ident, spec *ast.ValueSpec, iota int) {
+ if len(lhs) == 0 {
+ check.invalidAST(pos, "missing lhs in declaration")
+ return
+ }
+
+ // determine type for all of lhs, if any
+ // (but only set it for the object we typecheck!)
+ var typ Type
+ if spec.Type != nil {
+ typ = check.typ(spec.Type, false)
+ }
+
+ // len(lhs) > 0
+ rhs := spec.Values
+ if len(lhs) == len(rhs) {
+ // check only lhs and rhs corresponding to obj
+ var l, r ast.Expr
+ for i, name := range lhs {
+ if check.lookup(name) == obj {
+ l = lhs[i]
+ r = rhs[i]
+ break
+ }
+ }
+ assert(l != nil)
+ switch obj := obj.(type) {
+ case *Const:
+ obj.Type = typ
+ case *Var:
+ obj.Type = typ
+ default:
+ unreachable()
+ }
+ check.assign1to1(l, r, nil, true, iota)
+ return
+ }
+
+ // there must be a type or initialization expressions
+ if typ == nil && len(rhs) == 0 {
+ check.invalidAST(pos, "missing type or initialization expression")
+ typ = Typ[Invalid]
+ }
+
+ // if we have a type, mark all of lhs
+ if typ != nil {
+ for _, name := range lhs {
+ switch obj := check.lookup(name).(type) {
+ case *Const:
+ obj.Type = typ
+ case *Var:
+ obj.Type = typ
+ default:
+ unreachable()
+ }
+ }
+ }
+
+ // check initial values, if any
+ if len(rhs) > 0 {
+ // TODO(gri) should try to avoid this conversion
+ lhx := make([]ast.Expr, len(lhs))
+ for i, e := range lhs {
+ lhx[i] = e
+ }
+ check.assignNtoM(lhx, rhs, true, iota)
+ }
+}
+
+// object typechecks an object by assigning it a type.
+//
+func (check *checker) object(obj Object, cycleOk bool) {
+ switch obj := obj.(type) {
+ case *Package:
+ // nothing to do
+ case *Const:
+ if obj.Type != nil {
+ return // already checked
+ }
+ // The obj.Val field for constants is initialized to its respective
+ // iota value by the parser.
+ // The object's fields can be in one of the following states:
+ // Type != nil => the constant value is Val
+ // Type == nil => the constant is not typechecked yet, and Val can be:
+ // Val is int => Val is the value of iota for this declaration
+ // Val == nil => the object's expression is being evaluated
+ if obj.Val == nil {
+ check.errorf(obj.GetPos(), "illegal cycle in initialization of %s", obj.Name)
+ obj.Type = Typ[Invalid]
+ return
+ }
+ spec := obj.spec
+ iota := obj.Val.(int)
+ obj.Val = nil // mark obj as "visited" for cycle detection
+ // determine spec for type and initialization expressions
+ init := spec
+ if len(init.Values) == 0 {
+ init = check.initspecs[spec]
+ }
+ check.valueSpec(spec.Pos(), obj, spec.Names, init, iota)
+
+ case *Var:
+ if obj.Type != nil {
+ return // already checked
+ }
+ if obj.visited {
+ check.errorf(obj.GetPos(), "illegal cycle in initialization of %s", obj.Name)
+ obj.Type = Typ[Invalid]
+ return
+ }
+ switch d := obj.decl.(type) {
+ case *ast.Field:
+ unreachable() // function parameters are always typed when collected
+ case *ast.ValueSpec:
+ obj.visited = true
+ check.valueSpec(d.Pos(), obj, d.Names, d, 0)
+ case *ast.AssignStmt:
+ // If we reach here, we have a short variable declaration
+ // where the rhs didn't typecheck and thus the lhs has no
+ // types.
+ obj.visited = true
+ obj.Type = Typ[Invalid]
+ default:
+ unreachable() // see also function newObj
+ }
+
+ case *TypeName:
+ if obj.Type != nil {
+ return // already checked
+ }
+ typ := &NamedType{Obj: obj}
+ obj.Type = typ // "mark" object so recursion terminates
+ typ.Underlying = underlying(check.typ(obj.spec.Type, cycleOk))
+ // typecheck associated method signatures
+ if scope := check.methods[obj]; scope != nil {
+ switch t := typ.Underlying.(type) {
+ case *Struct:
+ // struct fields must not conflict with methods
+ for _, f := range t.Fields {
+ if m := scope.Lookup(f.Name); m != nil {
+ check.errorf(m.GetPos(), "type %s has both field and method named %s", obj.Name, f.Name)
+ // ok to continue
+ }
+ }
+ case *Interface:
+ // methods cannot be associated with an interface type
+ for _, m := range scope.Entries {
+ recv := m.(*Func).decl.Recv.List[0].Type
+ check.errorf(recv.Pos(), "invalid receiver type %s (%s is an interface type)", obj.Name, obj.Name)
+ // ok to continue
+ }
+ }
+ // typecheck method signatures
+ var methods []*Method
+ for _, obj := range scope.Entries {
+ m := obj.(*Func)
+ sig := check.typ(m.decl.Type, cycleOk).(*Signature)
+ params, _ := check.collectParams(m.decl.Recv, false)
+ sig.Recv = params[0] // the parser/assocMethod ensure there is exactly one parameter
+ m.Type = sig
+ methods = append(methods, &Method{QualifiedName{check.pkg, m.Name}, sig})
+ check.later(m, sig, m.decl.Body)
+ }
+ typ.Methods = methods
+ delete(check.methods, obj) // we don't need this scope anymore
+ }
+
+ case *Func:
+ if obj.Type != nil {
+ return // already checked
+ }
+ fdecl := obj.decl
+ // methods are typechecked when their receivers are typechecked
+ if fdecl.Recv == nil {
+ sig := check.typ(fdecl.Type, cycleOk).(*Signature)
+ if obj.Name == "init" && (len(sig.Params) != 0 || len(sig.Results) != 0) {
+ check.errorf(fdecl.Pos(), "func init must have no arguments and no return values")
+ // ok to continue
+ }
+ obj.Type = sig
+ check.later(obj, sig, fdecl.Body)
+ }
+
+ default:
+ unreachable()
+ }
+}
+
+// assocInitvals associates "inherited" initialization expressions
+// with the corresponding *ast.ValueSpec in the check.initspecs map
+// for constant declarations without explicit initialization expressions.
+//
+func (check *checker) assocInitvals(decl *ast.GenDecl) {
+ var last *ast.ValueSpec
+ for _, s := range decl.Specs {
+ if s, ok := s.(*ast.ValueSpec); ok {
+ if len(s.Values) > 0 {
+ last = s
+ } else {
+ check.initspecs[s] = last
+ }
+ }
+ }
+ if last == nil {
+ check.invalidAST(decl.Pos(), "no initialization values provided")
+ }
+}
+
+// assocMethod associates a method declaration with the respective
+// receiver base type. meth.Recv must exist.
+//
+func (check *checker) assocMethod(meth *ast.FuncDecl) {
+ // The receiver type is one of the following (enforced by parser):
+ // - *ast.Ident
+ // - *ast.StarExpr{*ast.Ident}
+ // - *ast.BadExpr (parser error)
+ typ := meth.Recv.List[0].Type
+ if ptr, ok := typ.(*ast.StarExpr); ok {
+ typ = ptr.X
+ }
+ // determine receiver base type name
+ ident, ok := typ.(*ast.Ident)
+ if !ok {
+ // not an identifier - parser reported error already
+ return // ignore this method
+ }
+ // determine receiver base type object
+ var tname *TypeName
+ if obj := check.lookup(ident); obj != nil {
+ obj, ok := obj.(*TypeName)
+ if !ok {
+ check.errorf(ident.Pos(), "%s is not a type", ident.Name)
+ return // ignore this method
+ }
+ if obj.spec == nil {
+ check.errorf(ident.Pos(), "cannot define method on non-local type %s", ident.Name)
+ return // ignore this method
+ }
+ tname = obj
+ } else {
+ // identifier not declared/resolved - parser reported error already
+ return // ignore this method
+ }
+ // declare method in receiver base type scope
+ scope := check.methods[tname]
+ if scope == nil {
+ scope = new(Scope)
+ check.methods[tname] = scope
+ }
+ check.declareIdent(scope, meth.Name, &Func{Pkg: check.pkg, Name: meth.Name.Name, decl: meth})
+}
+
+func (check *checker) decl(decl ast.Decl) {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ // ignore
+ case *ast.GenDecl:
+ for _, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // nothing to do (handled by check.resolve)
+ case *ast.ValueSpec:
+ for _, name := range s.Names {
+ check.object(check.lookup(name), false)
+ }
+ case *ast.TypeSpec:
+ check.object(check.lookup(s.Name), false)
+ default:
+ check.invalidAST(s.Pos(), "unknown ast.Spec node %T", s)
+ }
+ }
+ case *ast.FuncDecl:
+ // methods are checked when their respective base types are checked
+ if d.Recv != nil {
+ return
+ }
+ obj := check.lookup(d.Name)
+ // Initialization functions don't have an object associated with them
+ // since they are not in any scope. Create a dummy object for them.
+ if d.Name.Name == "init" {
+ assert(obj == nil) // all other functions should have an object
+ obj = &Func{Pkg: check.pkg, Name: d.Name.Name, decl: d}
+ check.register(d.Name, obj)
+ }
+ check.object(obj, false)
+ default:
+ check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d)
+ }
+}
+
+// A bailout panic is raised to indicate early termination.
+type bailout struct{}
+
+func check(ctxt *Context, fset *token.FileSet, files []*ast.File) (pkg *Package, err error) {
+ // initialize checker
+ check := checker{
+ ctxt: ctxt,
+ fset: fset,
+ files: files,
+ idents: make(map[*ast.Ident]Object),
+ objects: make(map[*ast.Object]Object),
+ initspecs: make(map[*ast.ValueSpec]*ast.ValueSpec),
+ methods: make(map[*TypeName]*Scope),
+ conversions: make(map[*ast.CallExpr]bool),
+ untyped: make(map[ast.Expr]*Basic),
+ constants: make(map[ast.Expr]interface{}),
+ shiftOps: make(map[ast.Expr]bool),
+ }
+
+ // set results and handle panics
+ defer func() {
+ pkg = check.pkg
+ switch p := recover().(type) {
+ case nil, bailout:
+ // normal return or early exit
+ err = check.firsterr
+ default:
+ // unexpected panic: don't crash clients
+ if debug {
+ check.dump("INTERNAL PANIC: %v", p)
+ panic(p)
+ }
+ // TODO(gri) add a test case for this scenario
+ err = fmt.Errorf("types internal error: %v", p)
+ }
+ }()
+
+ // resolve identifiers
+ imp := ctxt.Import
+ if imp == nil {
+ imp = GcImport
+ }
+ methods := check.resolve(imp)
+
+ // associate methods with types
+ for _, m := range methods {
+ check.assocMethod(m)
+ }
+
+ // typecheck all declarations
+ for _, f := range check.files {
+ for _, d := range f.Decls {
+ check.decl(d)
+ }
+ }
+
+ // typecheck all function/method bodies
+ // (funclist may grow when checking statements - do not use range clause!)
+ for i := 0; i < len(check.funclist); i++ {
+ f := check.funclist[i]
+ if trace {
+ s := "<function literal>"
+ if f.obj != nil {
+ s = f.obj.Name
+ }
+ fmt.Println("---", s)
+ }
+ check.funcsig = f.sig
+ check.stmtList(f.body.List)
+ }
+
+ // remaining untyped expressions must indeed be untyped
+ if debug {
+ for x, typ := range check.untyped {
+ if !isUntyped(typ) {
+ check.dump("%s: %s (type %s) is not untyped", x.Pos(), x, typ)
+ panic(0)
+ }
+ }
+ }
+
+ // notify client of any untyped types left
+ // TODO(gri) Consider doing this before and
+ // after function body checking for smaller
+ // map size and more immediate feedback.
+ if ctxt.Expr != nil {
+ for x, typ := range check.untyped {
+ ctxt.Expr(x, typ, check.constants[x])
+ }
+ }
+
+ return
+}
diff --git a/src/pkg/go/types/check_test.go b/src/pkg/go/types/check_test.go
new file mode 100644
index 000000000..470f3a1a9
--- /dev/null
+++ b/src/pkg/go/types/check_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a typechecker test harness. The packages specified
+// in tests are typechecked. Error messages reported by the typechecker are
+// compared against the error messages expected in the test files.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position. Consecutive comments may be
+// used to indicate multiple errors for the same token position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+package types
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "testing"
+)
+
+var listErrors = flag.Bool("list", false, "list errors")
+
+// The test filenames do not end in .go so that they are invisible
+// to gofmt since they contain comments that must not change their
+// positions relative to surrounding tokens.
+
+var tests = []struct {
+ name string
+ files []string
+}{
+ {"decls0", []string{"testdata/decls0.src"}},
+ {"decls1", []string{"testdata/decls1.src"}},
+ {"decls2", []string{"testdata/decls2a.src", "testdata/decls2b.src"}},
+ {"decls3", []string{"testdata/decls3.src"}},
+ {"const0", []string{"testdata/const0.src"}},
+ {"expr0", []string{"testdata/expr0.src"}},
+ {"expr1", []string{"testdata/expr1.src"}},
+ {"expr2", []string{"testdata/expr2.src"}},
+ {"expr3", []string{"testdata/expr3.src"}},
+ {"builtins", []string{"testdata/builtins.src"}},
+ {"conversions", []string{"testdata/conversions.src"}},
+ {"stmt0", []string{"testdata/stmt0.src"}},
+}
+
+var fset = token.NewFileSet()
+
+func getFile(filename string) (file *token.File) {
+ fset.Iterate(func(f *token.File) bool {
+ if f.Name() == filename {
+ file = f
+ return false // end iteration
+ }
+ return true
+ })
+ return file
+}
+
+// Positioned errors are of the form filename:line:column: message .
+var posMsgRx = regexp.MustCompile(`^(.*:[0-9]+:[0-9]+): *(.*)`)
+
+// splitError splits an error's error message into a position string
+// and the actual error message. If there's no position information,
+// pos is the empty string, and msg is the entire error message.
+//
+func splitError(err error) (pos, msg string) {
+ msg = err.Error()
+ if m := posMsgRx.FindStringSubmatch(msg); len(m) == 3 {
+ pos = m[1]
+ msg = m[2]
+ }
+ return
+}
+
+func parseFiles(t *testing.T, testname string, filenames []string) ([]*ast.File, []error) {
+ var files []*ast.File
+ var errlist []error
+ for _, filename := range filenames {
+ file, err := parser.ParseFile(fset, filename, nil, parser.DeclarationErrors|parser.AllErrors)
+ if file == nil {
+ t.Fatalf("%s: could not parse file %s", testname, filename)
+ }
+ files = append(files, file)
+ if err != nil {
+ if list, _ := err.(scanner.ErrorList); len(list) > 0 {
+ for _, err := range list {
+ errlist = append(errlist, err)
+ }
+ } else {
+ errlist = append(errlist, err)
+ }
+ }
+ }
+ return files, errlist
+}
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+//
+var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
+
+// errMap collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func errMap(t *testing.T, testname string, files []*ast.File) map[string][]string {
+ errmap := make(map[string][]string)
+
+ for _, file := range files {
+ filename := fset.Position(file.Package).Filename
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("%s: could not read %s", testname, filename)
+ }
+
+ var s scanner.Scanner
+ // file was parsed already - do not add it again to the file
+ // set otherwise the position information returned here will
+ // not match the position information collected by the parser
+ s.Init(getFile(filename), src, nil, scanner.ScanComments)
+ var prev string // position string of last non-comment, non-semicolon token
+
+ scanFile:
+ for {
+ pos, tok, lit := s.Scan()
+ switch tok {
+ case token.EOF:
+ break scanFile
+ case token.COMMENT:
+ s := errRx.FindStringSubmatch(lit)
+ if len(s) == 2 {
+ errmap[prev] = append(errmap[prev], string(s[1]))
+ }
+ case token.SEMICOLON:
+ // ignore automatically inserted semicolon
+ if lit == "\n" {
+ continue scanFile
+ }
+ fallthrough
+ default:
+ prev = fset.Position(pos).String()
+ }
+ }
+ }
+
+ return errmap
+}
+
+func eliminate(t *testing.T, errmap map[string][]string, errlist []error) {
+ for _, err := range errlist {
+ pos, msg := splitError(err)
+ list := errmap[pos]
+ index := -1 // list index of matching message, if any
+ // we expect one of the messages in list to match the error at pos
+ for i, msg := range list {
+ rx, err := regexp.Compile(msg)
+ if err != nil {
+ t.Errorf("%s: %v", pos, err)
+ continue
+ }
+ if rx.MatchString(msg) {
+ index = i
+ break
+ }
+ }
+ if index >= 0 {
+ // eliminate from list
+ if n := len(list) - 1; n > 0 {
+ // not the last entry - swap in last element and shorten list by 1
+ list[index] = list[n]
+ errmap[pos] = list[:n]
+ } else {
+ // last entry - remove list from map
+ delete(errmap, pos)
+ }
+ } else {
+ t.Errorf("%s: no error expected: %q", pos, msg)
+ }
+
+ }
+}
+
+func checkFiles(t *testing.T, testname string, testfiles []string) {
+ // parse files and collect parser errors
+ files, errlist := parseFiles(t, testname, testfiles)
+
+ // typecheck and collect typechecker errors
+ var ctxt Context
+ ctxt.Error = func(err error) { errlist = append(errlist, err) }
+ ctxt.Check(fset, files)
+
+ if *listErrors {
+ t.Errorf("--- %s: %d errors found:", testname, len(errlist))
+ for _, err := range errlist {
+ t.Error(err)
+ }
+ return
+ }
+
+ // match and eliminate errors
+ // we are expecting the following errors
+ // (collect these after parsing the files so that
+ // they are found in the file set)
+ errmap := errMap(t, testname, files)
+ eliminate(t, errmap, errlist)
+
+ // there should be no expected errors left
+ if len(errmap) > 0 {
+ t.Errorf("--- %s: %d source positions with expected (but not reported) errors:", testname, len(errmap))
+ for pos, list := range errmap {
+ for _, rx := range list {
+ t.Errorf("%s: %q", pos, rx)
+ }
+ }
+ }
+}
+
+var testBuiltinsDeclared = false
+
+func TestCheck(t *testing.T) {
+ // Declare builtins for testing.
+ // Not done in an init func to avoid an init race with
+ // the construction of the Universe var.
+ if !testBuiltinsDeclared {
+ testBuiltinsDeclared = true
+ // Pkg == nil for Universe objects
+ def(&Func{Name: "assert", Type: &builtin{_Assert, "assert", 1, false, true}})
+ def(&Func{Name: "trace", Type: &builtin{_Trace, "trace", 0, true, true}})
+ }
+
+ // For easy debugging w/o changing the testing code,
+ // if there is a local test file, only test that file.
+ const testfile = "testdata/test.go"
+ if fi, err := os.Stat(testfile); err == nil && !fi.IsDir() {
+ fmt.Printf("WARNING: Testing only %s (remove it to run all tests)\n", testfile)
+ checkFiles(t, testfile, []string{testfile})
+ return
+ }
+
+ // Otherwise, run all the tests.
+ for _, test := range tests {
+ checkFiles(t, test.name, test.files)
+ }
+}
diff --git a/src/pkg/go/types/const.go b/src/pkg/go/types/const.go
new file mode 100644
index 000000000..503652e75
--- /dev/null
+++ b/src/pkg/go/types/const.go
@@ -0,0 +1,718 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements operations on constant values.
+
+package types
+
+import (
+ "fmt"
+ "go/token"
+ "math/big"
+ "strconv"
+)
+
+// TODO(gri) At the moment, constants are different types
+// passed around as interface{} values. Introduce a Const
+// interface and use methods instead of xConst functions.
+
+// Representation of constant values.
+//
+// bool -> bool (true, false)
+// numeric -> int64, *big.Int, *big.Rat, Complex (ordered by increasing data structure "size")
+// string -> string
+// nil -> NilType (nilConst)
+//
+// Numeric constants are normalized after each operation such
+// that they are represented by the "smallest" data structure
+// required to represent the constant, independent of actual
+// type. Non-numeric constants are always normalized.
+
+// Representation of complex numbers.
+type Complex struct {
+ Re, Im *big.Rat
+}
+
+func (c Complex) String() string {
+ if c.Re.Sign() == 0 {
+ return fmt.Sprintf("%si", c.Im)
+ }
+ // normalized complex values always have an imaginary part
+ return fmt.Sprintf("(%s + %si)", c.Re, c.Im)
+}
+
+// Representation of nil.
+type NilType struct{}
+
+func (NilType) String() string {
+ return "nil"
+}
+
+// Frequently used values.
+var (
+ nilConst = NilType{}
+ zeroConst = int64(0)
+)
+
+// int64 bounds
+var (
+ minInt64 = big.NewInt(-1 << 63)
+ maxInt64 = big.NewInt(1<<63 - 1)
+)
+
+// normalizeIntConst returns the smallest constant representation
+// for the specific value of x; either an int64 or a *big.Int value.
+//
+func normalizeIntConst(x *big.Int) interface{} {
+ if minInt64.Cmp(x) <= 0 && x.Cmp(maxInt64) <= 0 {
+ return x.Int64()
+ }
+ return x
+}
+
+// normalizeRatConst returns the smallest constant representation
+// for the specific value of x; either an int64, *big.Int,
+// or *big.Rat value.
+//
+func normalizeRatConst(x *big.Rat) interface{} {
+ if x.IsInt() {
+ return normalizeIntConst(x.Num())
+ }
+ return x
+}
+
+// newComplex returns the smallest constant representation
+// for the specific value re + im*i; either an int64, *big.Int,
+// *big.Rat, or complex value.
+//
+func newComplex(re, im *big.Rat) interface{} {
+ if im.Sign() == 0 {
+ return normalizeRatConst(re)
+ }
+ return Complex{re, im}
+}
+
+// makeRuneConst returns the int64 code point for the rune literal
+// lit. The result is nil if lit is not a correct rune literal.
+//
+func makeRuneConst(lit string) interface{} {
+ if n := len(lit); n >= 2 {
+ if code, _, _, err := strconv.UnquoteChar(lit[1:n-1], '\''); err == nil {
+ return int64(code)
+ }
+ }
+ return nil
+}
+
+// makeRuneConst returns the smallest integer constant representation
+// (int64, *big.Int) for the integer literal lit. The result is nil if
+// lit is not a correct integer literal.
+//
+func makeIntConst(lit string) interface{} {
+ if x, err := strconv.ParseInt(lit, 0, 64); err == nil {
+ return x
+ }
+ if x, ok := new(big.Int).SetString(lit, 0); ok {
+ return x
+ }
+ return nil
+}
+
+// makeFloatConst returns the smallest floating-point constant representation
+// (int64, *big.Int, *big.Rat) for the floating-point literal lit. The result
+// is nil if lit is not a correct floating-point literal.
+//
+func makeFloatConst(lit string) interface{} {
+ if x, ok := new(big.Rat).SetString(lit); ok {
+ return normalizeRatConst(x)
+ }
+ return nil
+}
+
+// makeComplexConst returns the complex constant representation (Complex) for
+// the imaginary literal lit. The result is nil if lit is not a correct imaginary
+// literal.
+//
+func makeComplexConst(lit string) interface{} {
+ n := len(lit)
+ if n > 0 && lit[n-1] == 'i' {
+ if im, ok := new(big.Rat).SetString(lit[0 : n-1]); ok {
+ return newComplex(big.NewRat(0, 1), im)
+ }
+ }
+ return nil
+}
+
+// makeStringConst returns the string constant representation (string) for
+// the string literal lit. The result is nil if lit is not a correct string
+// literal.
+//
+func makeStringConst(lit string) interface{} {
+ if s, err := strconv.Unquote(lit); err == nil {
+ return s
+ }
+ return nil
+}
+
+// toImagConst returns the constant Complex(0, x) for a non-complex x.
+func toImagConst(x interface{}) interface{} {
+ var im *big.Rat
+ switch x := x.(type) {
+ case int64:
+ im = big.NewRat(x, 1)
+ case *big.Int:
+ im = new(big.Rat).SetFrac(x, int1)
+ case *big.Rat:
+ im = x
+ default:
+ unreachable()
+ }
+ return Complex{rat0, im}
+}
+
+// isZeroConst reports whether the value of constant x is 0.
+// x must be normalized.
+//
+func isZeroConst(x interface{}) bool {
+ i, ok := x.(int64) // good enough since constants are normalized
+ return ok && i == 0
+}
+
+// isNegConst reports whether the value of constant x is < 0.
+// x must be a non-complex numeric value.
+//
+func isNegConst(x interface{}) bool {
+ switch x := x.(type) {
+ case int64:
+ return x < 0
+ case *big.Int:
+ return x.Sign() < 0
+ case *big.Rat:
+ return x.Sign() < 0
+ }
+ unreachable()
+ return false
+}
+
+// isRepresentableConst reports whether the value of constant x can
+// be represented as a value of the basic type Typ[as] without loss
+// of precision.
+//
+func isRepresentableConst(x interface{}, ctxt *Context, as BasicKind) bool {
+ switch x := x.(type) {
+ case bool:
+ return as == Bool || as == UntypedBool
+
+ case int64:
+ switch as {
+ case Int:
+ var s = uint(ctxt.sizeof(Typ[as])) * 8
+ return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
+ case Int8:
+ const s = 8
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int16:
+ const s = 16
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int32:
+ const s = 32
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int64:
+ return true
+ case Uint, Uintptr:
+ var s = uint(ctxt.sizeof(Typ[as])) * 8
+ return 0 <= x && x <= int64(1)<<(s-1)-1
+ case Uint8:
+ const s = 8
+ return 0 <= x && x <= 1<<s-1
+ case Uint16:
+ const s = 16
+ return 0 <= x && x <= 1<<s-1
+ case Uint32:
+ const s = 32
+ return 0 <= x && x <= 1<<s-1
+ case Uint64:
+ return 0 <= x
+ case Float32:
+ return true // TODO(gri) fix this
+ case Float64:
+ return true // TODO(gri) fix this
+ case Complex64:
+ return true // TODO(gri) fix this
+ case Complex128:
+ return true // TODO(gri) fix this
+ case UntypedInt, UntypedFloat, UntypedComplex:
+ return true
+ }
+
+ case *big.Int:
+ switch as {
+ case Uint, Uintptr:
+ var s = uint(ctxt.sizeof(Typ[as])) * 8
+ return x.Sign() >= 0 && x.BitLen() <= int(s)
+ case Uint64:
+ return x.Sign() >= 0 && x.BitLen() <= 64
+ case Float32:
+ return true // TODO(gri) fix this
+ case Float64:
+ return true // TODO(gri) fix this
+ case Complex64:
+ return true // TODO(gri) fix this
+ case Complex128:
+ return true // TODO(gri) fix this
+ case UntypedInt, UntypedFloat, UntypedComplex:
+ return true
+ }
+
+ case *big.Rat:
+ switch as {
+ case Float32:
+ return true // TODO(gri) fix this
+ case Float64:
+ return true // TODO(gri) fix this
+ case Complex64:
+ return true // TODO(gri) fix this
+ case Complex128:
+ return true // TODO(gri) fix this
+ case UntypedFloat, UntypedComplex:
+ return true
+ }
+
+ case Complex:
+ switch as {
+ case Complex64:
+ return true // TODO(gri) fix this
+ case Complex128:
+ return true // TODO(gri) fix this
+ case UntypedComplex:
+ return true
+ }
+
+ case string:
+ return as == String || as == UntypedString
+
+ case NilType:
+ return as == UntypedNil || as == UnsafePointer
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+var (
+ int1 = big.NewInt(1)
+ rat0 = big.NewRat(0, 1)
+)
+
+// complexity returns a measure of representation complexity for constant x.
+func complexity(x interface{}) int {
+ switch x.(type) {
+ case bool, string, NilType:
+ return 1
+ case int64:
+ return 2
+ case *big.Int:
+ return 3
+ case *big.Rat:
+ return 4
+ case Complex:
+ return 5
+ }
+ unreachable()
+ return 0
+}
+
+// matchConst returns the matching representation (same type) with the
+// smallest complexity for two constant values x and y. They must be
+// of the same "kind" (boolean, numeric, string, or NilType).
+//
+func matchConst(x, y interface{}) (_, _ interface{}) {
+ if complexity(x) > complexity(y) {
+ y, x = matchConst(y, x)
+ return x, y
+ }
+ // complexity(x) <= complexity(y)
+
+ switch x := x.(type) {
+ case bool, Complex, string, NilType:
+ return x, y
+
+ case int64:
+ switch y := y.(type) {
+ case int64:
+ return x, y
+ case *big.Int:
+ return big.NewInt(x), y
+ case *big.Rat:
+ return big.NewRat(x, 1), y
+ case Complex:
+ return Complex{big.NewRat(x, 1), rat0}, y
+ }
+
+ case *big.Int:
+ switch y := y.(type) {
+ case *big.Int:
+ return x, y
+ case *big.Rat:
+ return new(big.Rat).SetFrac(x, int1), y
+ case Complex:
+ return Complex{new(big.Rat).SetFrac(x, int1), rat0}, y
+ }
+
+ case *big.Rat:
+ switch y := y.(type) {
+ case *big.Rat:
+ return x, y
+ case Complex:
+ return Complex{x, rat0}, y
+ }
+ }
+
+ unreachable()
+ return nil, nil
+}
+
+// is32bit reports whether x can be represented using 32 bits.
+func is32bit(x int64) bool {
+ return -1<<31 <= x && x <= 1<<31-1
+}
+
+// is63bit reports whether x can be represented using 63 bits.
+func is63bit(x int64) bool {
+ return -1<<62 <= x && x <= 1<<62-1
+}
+
+// unaryOpConst returns the result of the constant evaluation op x where x is of the given type.
+func unaryOpConst(x interface{}, ctxt *Context, op token.Token, typ *Basic) interface{} {
+ switch op {
+ case token.ADD:
+ return x // nothing to do
+ case token.SUB:
+ switch x := x.(type) {
+ case int64:
+ if z := -x; z != x {
+ return z // no overflow
+ }
+ // overflow - need to convert to big.Int
+ return normalizeIntConst(new(big.Int).Neg(big.NewInt(x)))
+ case *big.Int:
+ return normalizeIntConst(new(big.Int).Neg(x))
+ case *big.Rat:
+ return normalizeRatConst(new(big.Rat).Neg(x))
+ case Complex:
+ return newComplex(new(big.Rat).Neg(x.Re), new(big.Rat).Neg(x.Im))
+ }
+ case token.XOR:
+ var z big.Int
+ switch x := x.(type) {
+ case int64:
+ z.Not(big.NewInt(x))
+ case *big.Int:
+ z.Not(x)
+ default:
+ unreachable()
+ }
+ // For unsigned types, the result will be negative and
+ // thus "too large": We must limit the result size to
+ // the type's size.
+ if typ.Info&IsUnsigned != 0 {
+ s := uint(ctxt.sizeof(typ)) * 8
+ z.AndNot(&z, new(big.Int).Lsh(big.NewInt(-1), s)) // z &^= (-1)<<s
+ }
+ return normalizeIntConst(&z)
+ case token.NOT:
+ return !x.(bool)
+ }
+ unreachable()
+ return nil
+}
+
+// binaryOpConst returns the result of the constant evaluation x op y;
+// both operands must be of the same constant "kind" (boolean, numeric, or string).
+// If typ is an integer type, division (op == token.QUO) is using integer division
+// (and the result is guaranteed to be integer) rather than floating-point
+// division. Division by zero leads to a run-time panic.
+//
+func binaryOpConst(x, y interface{}, op token.Token, typ *Basic) interface{} {
+ x, y = matchConst(x, y)
+
+ switch x := x.(type) {
+ case bool:
+ y := y.(bool)
+ switch op {
+ case token.LAND:
+ return x && y
+ case token.LOR:
+ return x || y
+ }
+
+ case int64:
+ y := y.(int64)
+ switch op {
+ case token.ADD:
+ // TODO(gri) can do better than this
+ if is63bit(x) && is63bit(y) {
+ return x + y
+ }
+ return normalizeIntConst(new(big.Int).Add(big.NewInt(x), big.NewInt(y)))
+ case token.SUB:
+ // TODO(gri) can do better than this
+ if is63bit(x) && is63bit(y) {
+ return x - y
+ }
+ return normalizeIntConst(new(big.Int).Sub(big.NewInt(x), big.NewInt(y)))
+ case token.MUL:
+ // TODO(gri) can do better than this
+ if is32bit(x) && is32bit(y) {
+ return x * y
+ }
+ return normalizeIntConst(new(big.Int).Mul(big.NewInt(x), big.NewInt(y)))
+ case token.REM:
+ return x % y
+ case token.QUO:
+ if typ.Info&IsInteger != 0 {
+ return x / y
+ }
+ return normalizeRatConst(new(big.Rat).SetFrac(big.NewInt(x), big.NewInt(y)))
+ case token.AND:
+ return x & y
+ case token.OR:
+ return x | y
+ case token.XOR:
+ return x ^ y
+ case token.AND_NOT:
+ return x &^ y
+ }
+
+ case *big.Int:
+ y := y.(*big.Int)
+ var z big.Int
+ switch op {
+ case token.ADD:
+ z.Add(x, y)
+ case token.SUB:
+ z.Sub(x, y)
+ case token.MUL:
+ z.Mul(x, y)
+ case token.REM:
+ z.Rem(x, y)
+ case token.QUO:
+ if typ.Info&IsInteger != 0 {
+ z.Quo(x, y)
+ } else {
+ return normalizeRatConst(new(big.Rat).SetFrac(x, y))
+ }
+ case token.AND:
+ z.And(x, y)
+ case token.OR:
+ z.Or(x, y)
+ case token.XOR:
+ z.Xor(x, y)
+ case token.AND_NOT:
+ z.AndNot(x, y)
+ default:
+ unreachable()
+ }
+ return normalizeIntConst(&z)
+
+ case *big.Rat:
+ y := y.(*big.Rat)
+ var z big.Rat
+ switch op {
+ case token.ADD:
+ z.Add(x, y)
+ case token.SUB:
+ z.Sub(x, y)
+ case token.MUL:
+ z.Mul(x, y)
+ case token.QUO:
+ z.Quo(x, y)
+ default:
+ unreachable()
+ }
+ return normalizeRatConst(&z)
+
+ case Complex:
+ y := y.(Complex)
+ a, b := x.Re, x.Im
+ c, d := y.Re, y.Im
+ var re, im big.Rat
+ switch op {
+ case token.ADD:
+ // (a+c) + i(b+d)
+ re.Add(a, c)
+ im.Add(b, d)
+ case token.SUB:
+ // (a-c) + i(b-d)
+ re.Sub(a, c)
+ im.Sub(b, d)
+ case token.MUL:
+ // (ac-bd) + i(bc+ad)
+ var ac, bd, bc, ad big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ re.Sub(&ac, &bd)
+ im.Add(&bc, &ad)
+ case token.QUO:
+ // (ac+bd)/s + i(bc-ad)/s, with s = cc + dd
+ var ac, bd, bc, ad, s big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ s.Add(c.Mul(c, c), d.Mul(d, d))
+ re.Add(&ac, &bd)
+ re.Quo(&re, &s)
+ im.Sub(&bc, &ad)
+ im.Quo(&im, &s)
+ default:
+ unreachable()
+ }
+ return newComplex(&re, &im)
+
+ case string:
+ if op == token.ADD {
+ return x + y.(string)
+ }
+ }
+
+ unreachable()
+ return nil
+}
+
+// shiftConst returns the result of the constant evaluation x op s
+// where op is token.SHL or token.SHR (<< or >>). x must be an
+// integer constant.
+//
+func shiftConst(x interface{}, s uint, op token.Token) interface{} {
+ switch x := x.(type) {
+ case int64:
+ switch op {
+ case token.SHL:
+ z := big.NewInt(x)
+ return normalizeIntConst(z.Lsh(z, s))
+ case token.SHR:
+ return x >> s
+ }
+
+ case *big.Int:
+ var z big.Int
+ switch op {
+ case token.SHL:
+ return normalizeIntConst(z.Lsh(x, s))
+ case token.SHR:
+ return normalizeIntConst(z.Rsh(x, s))
+ }
+ }
+
+ unreachable()
+ return nil
+}
+
+// compareConst returns the result of the constant comparison x op y;
+// both operands must be of the same "kind" (boolean, numeric, string,
+// or NilType).
+//
+func compareConst(x, y interface{}, op token.Token) (z bool) {
+ x, y = matchConst(x, y)
+
+ // x == y => x == y
+ // x != y => x != y
+ // x > y => y < x
+ // x >= y => u <= x
+ swap := false
+ switch op {
+ case token.GTR:
+ swap = true
+ op = token.LSS
+ case token.GEQ:
+ swap = true
+ op = token.LEQ
+ }
+
+ // x == y => x == y
+ // x != y => !(x == y)
+ // x < y => x < y
+ // x <= y => !(y < x)
+ negate := false
+ switch op {
+ case token.NEQ:
+ negate = true
+ op = token.EQL
+ case token.LEQ:
+ swap = !swap
+ negate = true
+ op = token.LSS
+ }
+
+ if negate {
+ defer func() { z = !z }()
+ }
+
+ if swap {
+ x, y = y, x
+ }
+
+ switch x := x.(type) {
+ case bool:
+ if op == token.EQL {
+ return x == y.(bool)
+ }
+
+ case int64:
+ y := y.(int64)
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.LSS:
+ return x < y
+ }
+
+ case *big.Int:
+ s := x.Cmp(y.(*big.Int))
+ switch op {
+ case token.EQL:
+ return s == 0
+ case token.LSS:
+ return s < 0
+ }
+
+ case *big.Rat:
+ s := x.Cmp(y.(*big.Rat))
+ switch op {
+ case token.EQL:
+ return s == 0
+ case token.LSS:
+ return s < 0
+ }
+
+ case Complex:
+ y := y.(Complex)
+ if op == token.EQL {
+ return x.Re.Cmp(y.Re) == 0 && x.Im.Cmp(y.Im) == 0
+ }
+
+ case string:
+ y := y.(string)
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.LSS:
+ return x < y
+ }
+
+ case NilType:
+ if op == token.EQL {
+ return x == y.(NilType)
+ }
+ }
+
+ fmt.Printf("x = %s (%T), y = %s (%T)\n", x, x, y, y)
+ unreachable()
+ return
+}
diff --git a/src/pkg/go/types/conversions.go b/src/pkg/go/types/conversions.go
new file mode 100644
index 000000000..fcbaf7717
--- /dev/null
+++ b/src/pkg/go/types/conversions.go
@@ -0,0 +1,129 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of conversions.
+
+package types
+
+import (
+ "go/ast"
+)
+
+// conversion typechecks the type conversion conv to type typ. iota is the current
+// value of iota or -1 if iota doesn't have a value in the current context. The result
+// of the conversion is returned via x. If the conversion has type errors, the returned
+// x is marked as invalid (x.mode == invalid).
+//
+func (check *checker) conversion(x *operand, conv *ast.CallExpr, typ Type, iota int) {
+ // all conversions have one argument
+ if len(conv.Args) != 1 {
+ check.invalidOp(conv.Pos(), "%s conversion requires exactly one argument", conv)
+ goto Error
+ }
+
+ // evaluate argument
+ check.expr(x, conv.Args[0], nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ if x.mode == constant && isConstType(typ) {
+ // constant conversion
+ // TODO(gri) implement this
+ } else {
+ // non-constant conversion
+ if !x.isConvertible(check.ctxt, typ) {
+ check.invalidOp(conv.Pos(), "cannot convert %s to %s", x, typ)
+ goto Error
+ }
+ x.mode = value
+ }
+
+ check.conversions[conv] = true // for cap/len checking
+ x.expr = conv
+ x.typ = typ
+ return
+
+Error:
+ x.mode = invalid
+}
+
+func (x *operand) isConvertible(ctxt *Context, T Type) bool {
+ // "x is assignable to T"
+ if x.isAssignable(ctxt, T) {
+ return true
+ }
+
+ // "x's type and T have identical underlying types"
+ V := x.typ
+ Vu := underlying(V)
+ Tu := underlying(T)
+ if IsIdentical(Vu, Tu) {
+ return true
+ }
+
+ // "x's type and T are unnamed pointer types and their pointer base types have identical underlying types"
+ if V, ok := V.(*Pointer); ok {
+ if T, ok := T.(*Pointer); ok {
+ if IsIdentical(underlying(V.Base), underlying(T.Base)) {
+ return true
+ }
+ }
+ }
+
+ // "x's type and T are both integer or floating point types"
+ if (isInteger(V) || isFloat(V)) && (isInteger(T) || isFloat(T)) {
+ return true
+ }
+
+ // "x's type and T are both complex types"
+ if isComplex(V) && isComplex(T) {
+ return true
+ }
+
+ // "x is an integer or a slice of bytes or runes and T is a string type"
+ if (isInteger(V) || isBytesOrRunes(Vu)) && isString(T) {
+ return true
+ }
+
+ // "x is a string and T is a slice of bytes or runes"
+ if isString(V) && isBytesOrRunes(Tu) {
+ return true
+ }
+
+ // package unsafe:
+ // "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+ if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(T) {
+ return true
+ }
+ // "and vice versa"
+ if isUnsafePointer(V) && (isPointer(Tu) || isUintptr(Tu)) {
+ return true
+ }
+
+ return false
+}
+
+func isUintptr(typ Type) bool {
+ t, ok := typ.(*Basic)
+ return ok && t.Kind == Uintptr
+}
+
+func isUnsafePointer(typ Type) bool {
+ t, ok := typ.(*Basic)
+ return ok && t.Kind == UnsafePointer
+}
+
+func isPointer(typ Type) bool {
+ _, ok := typ.(*Pointer)
+ return ok
+}
+
+func isBytesOrRunes(typ Type) bool {
+ if s, ok := typ.(*Slice); ok {
+ t, ok := underlying(s.Elt).(*Basic)
+ return ok && (t.Kind == Byte || t.Kind == Rune)
+ }
+ return false
+}
diff --git a/src/pkg/go/types/errors.go b/src/pkg/go/types/errors.go
new file mode 100644
index 000000000..62ee54791
--- /dev/null
+++ b/src/pkg/go/types/errors.go
@@ -0,0 +1,335 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various error reporters.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+)
+
+// TODO(gri) eventually assert and unimplemented should disappear.
+func assert(p bool) {
+ if !p {
+ panic("assertion failed")
+ }
+}
+
+func unreachable() {
+ panic("unreachable")
+}
+
+func (check *checker) printTrace(format string, args []interface{}) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . "
+ n := len(check.pos) - 1
+ i := 3 * n
+ for i > len(dots) {
+ fmt.Print(dots)
+ i -= len(dots)
+ }
+ // i <= len(dots)
+ fmt.Printf("%s:\t", check.fset.Position(check.pos[n]))
+ fmt.Print(dots[0:i])
+ fmt.Println(check.formatMsg(format, args))
+}
+
+func (check *checker) trace(pos token.Pos, format string, args ...interface{}) {
+ check.pos = append(check.pos, pos)
+ check.printTrace(format, args)
+}
+
+func (check *checker) untrace(format string, args ...interface{}) {
+ if len(format) > 0 {
+ check.printTrace(format, args)
+ }
+ check.pos = check.pos[:len(check.pos)-1]
+}
+
+func (check *checker) formatMsg(format string, args []interface{}) string {
+ for i, arg := range args {
+ switch a := arg.(type) {
+ case token.Pos:
+ args[i] = check.fset.Position(a).String()
+ case ast.Expr:
+ args[i] = exprString(a)
+ case Type:
+ args[i] = typeString(a)
+ case operand:
+ panic("internal error: should always pass *operand")
+ }
+ }
+ return fmt.Sprintf(format, args...)
+}
+
+// dump is only needed for debugging
+func (check *checker) dump(format string, args ...interface{}) {
+ fmt.Println(check.formatMsg(format, args))
+}
+
+func (check *checker) err(err error) {
+ if check.firsterr == nil {
+ check.firsterr = err
+ }
+ f := check.ctxt.Error
+ if f == nil {
+ panic(bailout{}) // report only first error
+ }
+ f(err)
+}
+
+func (check *checker) errorf(pos token.Pos, format string, args ...interface{}) {
+ check.err(fmt.Errorf("%s: %s", check.fset.Position(pos), check.formatMsg(format, args)))
+}
+
+func (check *checker) invalidAST(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid AST: "+format, args...)
+}
+
+func (check *checker) invalidArg(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid argument: "+format, args...)
+}
+
+func (check *checker) invalidOp(pos token.Pos, format string, args ...interface{}) {
+ check.errorf(pos, "invalid operation: "+format, args...)
+}
+
+// exprString returns a (simplified) string representation for an expression.
+func exprString(expr ast.Expr) string {
+ var buf bytes.Buffer
+ writeExpr(&buf, expr)
+ return buf.String()
+}
+
+// TODO(gri) Need to merge with typeString since some expressions are types (try: ([]int)(a))
+func writeExpr(buf *bytes.Buffer, expr ast.Expr) {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ buf.WriteString(x.Name)
+
+ case *ast.BasicLit:
+ buf.WriteString(x.Value)
+
+ case *ast.FuncLit:
+ buf.WriteString("(func literal)")
+
+ case *ast.CompositeLit:
+ buf.WriteString("(composite literal)")
+
+ case *ast.ParenExpr:
+ buf.WriteByte('(')
+ writeExpr(buf, x.X)
+ buf.WriteByte(')')
+
+ case *ast.SelectorExpr:
+ writeExpr(buf, x.X)
+ buf.WriteByte('.')
+ buf.WriteString(x.Sel.Name)
+
+ case *ast.IndexExpr:
+ writeExpr(buf, x.X)
+ buf.WriteByte('[')
+ writeExpr(buf, x.Index)
+ buf.WriteByte(']')
+
+ case *ast.SliceExpr:
+ writeExpr(buf, x.X)
+ buf.WriteByte('[')
+ if x.Low != nil {
+ writeExpr(buf, x.Low)
+ }
+ buf.WriteByte(':')
+ if x.High != nil {
+ writeExpr(buf, x.High)
+ }
+ buf.WriteByte(']')
+
+ case *ast.TypeAssertExpr:
+ writeExpr(buf, x.X)
+ buf.WriteString(".(...)")
+
+ case *ast.CallExpr:
+ writeExpr(buf, x.Fun)
+ buf.WriteByte('(')
+ for i, arg := range x.Args {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ writeExpr(buf, arg)
+ }
+ buf.WriteByte(')')
+
+ case *ast.StarExpr:
+ buf.WriteByte('*')
+ writeExpr(buf, x.X)
+
+ case *ast.UnaryExpr:
+ buf.WriteString(x.Op.String())
+ writeExpr(buf, x.X)
+
+ case *ast.BinaryExpr:
+ // The AST preserves source-level parentheses so there is
+ // no need to introduce parentheses here for correctness.
+ writeExpr(buf, x.X)
+ buf.WriteByte(' ')
+ buf.WriteString(x.Op.String())
+ buf.WriteByte(' ')
+ writeExpr(buf, x.Y)
+
+ default:
+ fmt.Fprintf(buf, "<expr %T>", x)
+ }
+}
+
+// typeString returns a string representation for typ.
+func typeString(typ Type) string {
+ var buf bytes.Buffer
+ writeType(&buf, typ)
+ return buf.String()
+}
+
+func writeParams(buf *bytes.Buffer, params []*Var, isVariadic bool) {
+ buf.WriteByte('(')
+ for i, par := range params {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ if par.Name != "" {
+ buf.WriteString(par.Name)
+ buf.WriteByte(' ')
+ }
+ if isVariadic && i == len(params)-1 {
+ buf.WriteString("...")
+ }
+ writeType(buf, par.Type)
+ }
+ buf.WriteByte(')')
+}
+
+func writeSignature(buf *bytes.Buffer, sig *Signature) {
+ writeParams(buf, sig.Params, sig.IsVariadic)
+ if len(sig.Results) == 0 {
+ // no result
+ return
+ }
+
+ buf.WriteByte(' ')
+ if len(sig.Results) == 1 && sig.Results[0].Name == "" {
+ // single unnamed result
+ writeType(buf, sig.Results[0].Type.(Type))
+ return
+ }
+
+ // multiple or named result(s)
+ writeParams(buf, sig.Results, false)
+}
+
+func writeType(buf *bytes.Buffer, typ Type) {
+ switch t := typ.(type) {
+ case nil:
+ buf.WriteString("<nil>")
+
+ case *Basic:
+ buf.WriteString(t.Name)
+
+ case *Array:
+ fmt.Fprintf(buf, "[%d]", t.Len)
+ writeType(buf, t.Elt)
+
+ case *Slice:
+ buf.WriteString("[]")
+ writeType(buf, t.Elt)
+
+ case *Struct:
+ buf.WriteString("struct{")
+ for i, f := range t.Fields {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ if !f.IsAnonymous {
+ buf.WriteString(f.Name)
+ buf.WriteByte(' ')
+ }
+ writeType(buf, f.Type)
+ if f.Tag != "" {
+ fmt.Fprintf(buf, " %q", f.Tag)
+ }
+ }
+ buf.WriteByte('}')
+
+ case *Pointer:
+ buf.WriteByte('*')
+ writeType(buf, t.Base)
+
+ case *Result:
+ writeParams(buf, t.Values, false)
+
+ case *Signature:
+ buf.WriteString("func")
+ writeSignature(buf, t)
+
+ case *builtin:
+ fmt.Fprintf(buf, "<type of %s>", t.name)
+
+ case *Interface:
+ buf.WriteString("interface{")
+ for i, m := range t.Methods {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ buf.WriteString(m.Name)
+ writeSignature(buf, m.Type)
+ }
+ buf.WriteByte('}')
+
+ case *Map:
+ buf.WriteString("map[")
+ writeType(buf, t.Key)
+ buf.WriteByte(']')
+ writeType(buf, t.Elt)
+
+ case *Chan:
+ var s string
+ switch t.Dir {
+ case ast.SEND:
+ s = "chan<- "
+ case ast.RECV:
+ s = "<-chan "
+ default:
+ s = "chan "
+ }
+ buf.WriteString(s)
+ writeType(buf, t.Elt)
+
+ case *NamedType:
+ s := "<NamedType w/o object>"
+ if obj := t.Obj; obj != nil {
+ if obj.Pkg != nil && obj.Pkg.Path != "" {
+ buf.WriteString(obj.Pkg.Path)
+ buf.WriteString(".")
+ }
+ s = t.Obj.GetName()
+ }
+ buf.WriteString(s)
+
+ default:
+ fmt.Fprintf(buf, "<type %T>", t)
+ }
+}
+
+func (t *Array) String() string { return typeString(t) }
+func (t *Basic) String() string { return typeString(t) }
+func (t *Chan) String() string { return typeString(t) }
+func (t *Interface) String() string { return typeString(t) }
+func (t *Map) String() string { return typeString(t) }
+func (t *NamedType) String() string { return typeString(t) }
+func (t *Pointer) String() string { return typeString(t) }
+func (t *Result) String() string { return typeString(t) }
+func (t *Signature) String() string { return typeString(t) }
+func (t *Slice) String() string { return typeString(t) }
+func (t *Struct) String() string { return typeString(t) }
+func (t *builtin) String() string { return typeString(t) }
diff --git a/src/pkg/go/types/exportdata.go b/src/pkg/go/types/exportdata.go
new file mode 100644
index 000000000..1f6a3c725
--- /dev/null
+++ b/src/pkg/go/types/exportdata.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FindGcExportData.
+
+package types
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = errors.New("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindGcExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+//
+func FindGcExportData(r *bufio.Reader) (err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF, which should
+ // be second archive entry.
+ var name string
+ var size int
+
+ // First entry should be __.GOSYMDEF.
+ // Older archives used __.SYMDEF, so allow that too.
+ // Read and discard.
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+ if name != "__.SYMDEF" && name != "__.GOSYMDEF" {
+ err = errors.New("go archive does not begin with __.SYMDEF or __.GOSYMDEF")
+ return
+ }
+ const block = 4096
+ tmp := make([]byte, block)
+ for size > 0 {
+ n := size
+ if n > block {
+ n = block
+ }
+ if _, err = io.ReadFull(r, tmp[:n]); err != nil {
+ return
+ }
+ size -= n
+ }
+
+ // Second entry should be __.PKGDEF.
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+ if name != "__.PKGDEF" {
+ err = errors.New("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = errors.New("not a go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ return
+ }
+ }
+
+ return
+}
diff --git a/src/pkg/go/types/expr.go b/src/pkg/go/types/expr.go
new file mode 100644
index 000000000..86d782d48
--- /dev/null
+++ b/src/pkg/go/types/expr.go
@@ -0,0 +1,1520 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of expressions.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+)
+
+// TODO(gri) Cleanups
+// - don't print error messages referring to invalid types (they are likely spurious errors)
+// - simplify invalid handling: maybe just use Typ[Invalid] as marker, get rid of invalid Mode for values?
+// - rethink error handling: should all callers check if x.mode == valid after making a call?
+// - at the moment, iota is passed around almost everywhere - in many places we know it cannot be used
+// - use "" or "_" consistently for anonymous identifiers? (e.g. reeceivers that have no name)
+// - consider storing error messages in invalid operands for better error messages/debugging output
+
+// TODO(gri) API issues
+// - clients need access to builtins type information
+// - API tests are missing (e.g., identifiers should be handled as expressions in callbacks)
+
+func (check *checker) collectParams(list *ast.FieldList, variadicOk bool) (params []*Var, isVariadic bool) {
+ if list == nil {
+ return
+ }
+ var last *Var
+ for i, field := range list.List {
+ ftype := field.Type
+ if t, _ := ftype.(*ast.Ellipsis); t != nil {
+ ftype = t.Elt
+ if variadicOk && i == len(list.List)-1 {
+ isVariadic = true
+ } else {
+ check.invalidAST(field.Pos(), "... not permitted")
+ // ok to continue
+ }
+ }
+ // the parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag
+ typ := check.typ(ftype, true)
+ if len(field.Names) > 0 {
+ // named parameter
+ for _, name := range field.Names {
+ par := check.lookup(name).(*Var)
+ par.Type = typ
+ last = par
+ copy := *par
+ params = append(params, &copy)
+ }
+ } else {
+ // anonymous parameter
+ par := &Var{Type: typ}
+ last = nil // not accessible inside function
+ params = append(params, par)
+ }
+ }
+ // For a variadic function, change the last parameter's object type
+ // from T to []T (this is the type used inside the function), but
+ // keep the params list unchanged (this is the externally visible type).
+ if isVariadic && last != nil {
+ last.Type = &Slice{Elt: last.Type}
+ }
+ return
+}
+
+func (check *checker) collectMethods(list *ast.FieldList) (methods []*Method) {
+ if list == nil {
+ return
+ }
+ for _, f := range list.List {
+ typ := check.typ(f.Type, len(f.Names) > 0) // cycles are not ok for embedded interfaces
+ // the parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag
+ if len(f.Names) > 0 {
+ // methods (the parser ensures that there's only one
+ // and we don't care if a constructed AST has more)
+ sig, ok := typ.(*Signature)
+ if !ok {
+ check.invalidAST(f.Type.Pos(), "%s is not a method signature", typ)
+ continue
+ }
+ for _, name := range f.Names {
+ methods = append(methods, &Method{QualifiedName{check.pkg, name.Name}, sig})
+ }
+ } else {
+ // embedded interface
+ utyp := underlying(typ)
+ if ityp, ok := utyp.(*Interface); ok {
+ methods = append(methods, ityp.Methods...)
+ } else if utyp != Typ[Invalid] {
+ // if utyp is invalid, don't complain (the root cause was reported before)
+ check.errorf(f.Type.Pos(), "%s is not an interface type", typ)
+ }
+ }
+ }
+ // Check for double declarations.
+ // The parser inserts methods into an interface-local scope, so local
+ // double declarations are reported by the parser already. We need to
+ // check again for conflicts due to embedded interfaces. This will lead
+ // to a 2nd error message if the double declaration was reported before
+ // by the parser.
+ // TODO(gri) clean this up a bit
+ seen := make(map[string]bool)
+ for _, m := range methods {
+ if seen[m.Name] {
+ check.errorf(list.Pos(), "multiple methods named %s", m.Name)
+ return // keep multiple entries, lookup will only return the first entry
+ }
+ seen[m.Name] = true
+ }
+ return
+}
+
+func (check *checker) tag(t *ast.BasicLit) string {
+ if t != nil {
+ if t.Kind == token.STRING {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.invalidAST(t.Pos(), "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func (check *checker) collectFields(list *ast.FieldList, cycleOk bool) (fields []*Field) {
+ if list == nil {
+ return
+ }
+
+ var typ Type // current field typ
+ var tag string // current field tag
+ add := func(name string, isAnonymous bool) {
+ fields = append(fields, &Field{QualifiedName{check.pkg, name}, typ, tag, isAnonymous})
+ }
+
+ for _, f := range list.List {
+ typ = check.typ(f.Type, cycleOk)
+ tag = check.tag(f.Tag)
+ if len(f.Names) > 0 {
+ // named fields
+ for _, name := range f.Names {
+ add(name.Name, false)
+ }
+ } else {
+ // anonymous field
+ switch t := deref(typ).(type) {
+ case *Basic:
+ add(t.Name, true)
+ case *NamedType:
+ add(t.Obj.GetName(), true)
+ default:
+ if typ != Typ[Invalid] {
+ check.invalidAST(f.Type.Pos(), "anonymous field type %s must be named", typ)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+type opPredicates map[token.Token]func(Type) bool
+
+var unaryOpPredicates = opPredicates{
+ token.ADD: isNumeric,
+ token.SUB: isNumeric,
+ token.XOR: isInteger,
+ token.NOT: isBoolean,
+}
+
+func (check *checker) op(m opPredicates, x *operand, op token.Token) bool {
+ if pred := m[op]; pred != nil {
+ if !pred(x.typ) {
+ check.invalidOp(x.pos(), "operator %s not defined for %s", op, x)
+ return false
+ }
+ } else {
+ check.invalidAST(x.pos(), "unknown operator %s", op)
+ return false
+ }
+ return true
+}
+
+func (check *checker) unary(x *operand, op token.Token) {
+ switch op {
+ case token.AND:
+ // spec: "As an exception to the addressability
+ // requirement x may also be a composite literal."
+ if _, ok := unparen(x.expr).(*ast.CompositeLit); ok {
+ x.mode = variable
+ }
+ if x.mode != variable {
+ check.invalidOp(x.pos(), "cannot take address of %s", x)
+ goto Error
+ }
+ x.typ = &Pointer{Base: x.typ}
+ return
+
+ case token.ARROW:
+ typ, ok := underlying(x.typ).(*Chan)
+ if !ok {
+ check.invalidOp(x.pos(), "cannot receive from non-channel %s", x)
+ goto Error
+ }
+ if typ.Dir&ast.RECV == 0 {
+ check.invalidOp(x.pos(), "cannot receive from send-only channel %s", x)
+ goto Error
+ }
+ x.mode = valueok
+ x.typ = typ.Elt
+ return
+ }
+
+ if !check.op(unaryOpPredicates, x, op) {
+ goto Error
+ }
+
+ if x.mode == constant {
+ typ := underlying(x.typ).(*Basic)
+ x.val = unaryOpConst(x.val, check.ctxt, op, typ)
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ check.isRepresentable(x, typ)
+ return
+ }
+
+ x.mode = value
+ // x.typ remains unchanged
+ return
+
+Error:
+ x.mode = invalid
+}
+
+func isShift(op token.Token) bool {
+ return op == token.SHL || op == token.SHR
+}
+
+func isComparison(op token.Token) bool {
+ // Note: tokens are not ordered well to make this much easier
+ switch op {
+ case token.EQL, token.NEQ, token.LSS, token.LEQ, token.GTR, token.GEQ:
+ return true
+ }
+ return false
+}
+
+// isRepresentable checks that a constant operand is representable in the given type.
+func (check *checker) isRepresentable(x *operand, typ *Basic) {
+ if x.mode != constant || isUntyped(typ) {
+ return
+ }
+
+ if !isRepresentableConst(x.val, check.ctxt, typ.Kind) {
+ var msg string
+ if isNumeric(x.typ) && isNumeric(typ) {
+ msg = "%s overflows %s"
+ } else {
+ msg = "cannot convert %s to %s"
+ }
+ check.errorf(x.pos(), msg, x, typ)
+ x.mode = invalid
+ }
+}
+
+// updateExprType updates the type of all untyped nodes in the
+// expression tree of x to typ. If shiftOp is set, x is the lhs
+// of a shift expression. In that case, and if x is in the set
+// of shift operands with delayed type checking, and typ is not
+// an untyped type, updateExprType will check if typ is an
+// integer type.
+// If Context.Expr != nil, it is called for all nodes that are
+// now assigned their final (not untyped) type.
+func (check *checker) updateExprType(x ast.Expr, typ Type, shiftOp bool) {
+ switch x := x.(type) {
+ case *ast.BadExpr,
+ *ast.FuncLit,
+ *ast.CompositeLit,
+ *ast.SelectorExpr,
+ *ast.IndexExpr,
+ *ast.SliceExpr,
+ *ast.TypeAssertExpr,
+ *ast.CallExpr,
+ *ast.StarExpr,
+ *ast.KeyValueExpr,
+ *ast.ArrayType,
+ *ast.StructType,
+ *ast.FuncType,
+ *ast.InterfaceType,
+ *ast.MapType,
+ *ast.ChanType:
+ // these expression are never untyped - nothing to do
+ return
+
+ case *ast.Ident, *ast.BasicLit:
+ // update type
+
+ case *ast.ParenExpr:
+ check.updateExprType(x.X, typ, false)
+
+ case *ast.UnaryExpr:
+ check.updateExprType(x.X, typ, false)
+
+ case *ast.BinaryExpr:
+ if isComparison(x.Op) {
+ // result type is independent of operand types
+ } else if isShift(x.Op) {
+ // result type depends only on lhs operand
+ check.updateExprType(x.X, typ, true)
+ } else {
+ // operand types match result type
+ check.updateExprType(x.X, typ, false)
+ check.updateExprType(x.Y, typ, false)
+ }
+
+ case *ast.Ellipsis:
+ unreachable()
+ default:
+ unreachable()
+ }
+
+ // TODO(gri) t should always exist, shouldn't it?
+ if t := check.untyped[x]; t != nil {
+ if isUntyped(typ) {
+ check.untyped[x] = typ.(*Basic)
+ } else {
+ // notify clients of final type for x
+ if f := check.ctxt.Expr; f != nil {
+ f(x, typ, check.constants[x])
+ }
+ delete(check.untyped, x)
+ delete(check.constants, x)
+ // check delayed shift
+ // Note: Using shiftOp is an optimization: it prevents
+ // map lookups when we know x is not a shiftOp in the
+ // first place.
+ if shiftOp && check.shiftOps[x] {
+ if !isInteger(typ) {
+ check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ)
+ }
+ delete(check.shiftOps, x)
+ }
+ }
+ }
+}
+
+// convertUntyped attempts to set the type of an untyped value to the target type.
+func (check *checker) convertUntyped(x *operand, target Type) {
+ if x.mode == invalid || !isUntyped(x.typ) {
+ return
+ }
+
+ // TODO(gri) Sloppy code - clean up. This function is central
+ // to assignment and expression checking.
+
+ if isUntyped(target) {
+ // both x and target are untyped
+ xkind := x.typ.(*Basic).Kind
+ tkind := target.(*Basic).Kind
+ if isNumeric(x.typ) && isNumeric(target) {
+ if xkind < tkind {
+ x.typ = target
+ check.updateExprType(x.expr, target, false)
+ }
+ } else if xkind != tkind {
+ goto Error
+ }
+ return
+ }
+
+ // typed target
+ switch t := underlying(target).(type) {
+ case nil:
+ // We may reach here due to previous type errors.
+ // Be conservative and don't crash.
+ x.mode = invalid
+ return
+ case *Basic:
+ check.isRepresentable(x, t)
+ if x.mode == invalid {
+ return // error already reported
+ }
+ case *Interface:
+ if !x.isNil() && len(t.Methods) > 0 /* empty interfaces are ok */ {
+ goto Error
+ }
+ // Update operand types to the default type rather then
+ // the target (interface) type: values must have concrete
+ // dynamic types. If the value is nil, keep it untyped
+ // (this is important for tools such as go vet which need
+ // the dynamic type for argument checking of say, print
+ // functions)
+ if x.isNil() {
+ target = Typ[UntypedNil]
+ } else {
+ // cannot assign untyped values to non-empty interfaces
+ if len(t.Methods) > 0 {
+ goto Error
+ }
+ target = defaultType(x.typ)
+ }
+ case *Pointer, *Signature, *Slice, *Map, *Chan:
+ if !x.isNil() {
+ goto Error
+ }
+ // keep nil untyped - see comment for interfaces, above
+ target = Typ[UntypedNil]
+ default:
+ if debug {
+ check.dump("convertUntyped(x = %v, target = %v)", x, target)
+ }
+ unreachable()
+ }
+
+ x.typ = target
+ check.updateExprType(x.expr, target, false)
+ return
+
+Error:
+ check.errorf(x.pos(), "cannot convert %s to %s", x, target)
+ x.mode = invalid
+}
+
+func (check *checker) comparison(x, y *operand, op token.Token) {
+ // TODO(gri) deal with interface vs non-interface comparison
+
+ valid := false
+ if x.isAssignable(check.ctxt, y.typ) || y.isAssignable(check.ctxt, x.typ) {
+ switch op {
+ case token.EQL, token.NEQ:
+ valid = isComparable(x.typ) ||
+ x.isNil() && hasNil(y.typ) ||
+ y.isNil() && hasNil(x.typ)
+ case token.LSS, token.LEQ, token.GTR, token.GEQ:
+ valid = isOrdered(x.typ)
+ default:
+ unreachable()
+ }
+ }
+
+ if !valid {
+ check.invalidOp(x.pos(), "cannot compare %s %s %s", x, op, y)
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant && y.mode == constant {
+ x.val = compareConst(x.val, y.val, op)
+ } else {
+ x.mode = value
+ }
+
+ x.typ = Typ[UntypedBool]
+}
+
+func (check *checker) shift(x, y *operand, op token.Token) {
+ // spec: "The right operand in a shift expression must have unsigned
+ // integer type or be an untyped constant that can be converted to
+ // unsigned integer type."
+ switch {
+ case isInteger(y.typ) && isUnsigned(y.typ):
+ // nothing to do
+ case y.mode == constant && isUntyped(y.typ):
+ check.convertUntyped(x, Typ[UntypedInt])
+ default:
+ check.invalidOp(y.pos(), "shift count %s must be unsigned integer", y)
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant {
+ if y.mode == constant {
+ // constant shift - lhs must be (representable as) an integer
+ if isUntyped(x.typ) {
+ if !isRepresentableConst(x.val, check.ctxt, UntypedInt) {
+ check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+ x.typ = Typ[UntypedInt]
+ }
+ assert(x.isInteger(check.ctxt))
+
+ // rhs must be within reasonable bounds
+ const stupidShift = 1024
+ s, ok := y.val.(int64)
+ if !ok || s < 0 || s >= stupidShift {
+ check.invalidOp(y.pos(), "%s: stupid shift", y)
+ x.mode = invalid
+ return
+ }
+
+ // everything's ok
+ x.val = shiftConst(x.val, uint(s), op)
+ return
+ }
+
+ // non-constant shift with constant lhs
+ if isUntyped(x.typ) {
+ // spec: "If the left operand of a non-constant shift expression is
+ // an untyped constant, the type of the constant is what it would be
+ // if the shift expression were replaced by its left operand alone;
+ // the type is int if it cannot be determined from the context (for
+ // instance, if the shift expression is an operand in a comparison
+ // against an untyped constant)".
+
+ // delay operand checking until we know the type
+ check.shiftOps[x.expr] = true
+ x.mode = value
+ return
+ }
+ }
+
+ // non-constant shift - lhs must be an integer
+ if !isInteger(x.typ) {
+ check.invalidOp(x.pos(), "shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ // non-constant shift
+ x.mode = value
+}
+
+var binaryOpPredicates = opPredicates{
+ token.ADD: func(typ Type) bool { return isNumeric(typ) || isString(typ) },
+ token.SUB: isNumeric,
+ token.MUL: isNumeric,
+ token.QUO: isNumeric,
+ token.REM: isInteger,
+
+ token.AND: isInteger,
+ token.OR: isInteger,
+ token.XOR: isInteger,
+ token.AND_NOT: isInteger,
+
+ token.LAND: isBoolean,
+ token.LOR: isBoolean,
+}
+
+func (check *checker) binary(x *operand, lhs, rhs ast.Expr, op token.Token, iota int) {
+ var y operand
+
+ check.expr(x, lhs, nil, iota)
+ check.expr(&y, rhs, nil, iota)
+
+ if x.mode == invalid {
+ return
+ }
+ if y.mode == invalid {
+ x.mode = invalid
+ x.expr = y.expr
+ return
+ }
+
+ if isShift(op) {
+ check.shift(x, &y, op)
+ return
+ }
+
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+
+ if isComparison(op) {
+ check.comparison(x, &y, op)
+ return
+ }
+
+ if !IsIdentical(x.typ, y.typ) {
+ check.invalidOp(x.pos(), "mismatched types %s and %s", x.typ, y.typ)
+ x.mode = invalid
+ return
+ }
+
+ if !check.op(binaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if (op == token.QUO || op == token.REM) && y.mode == constant && isZeroConst(y.val) {
+ check.invalidOp(y.pos(), "division by zero")
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant && y.mode == constant {
+ typ := underlying(x.typ).(*Basic)
+ x.val = binaryOpConst(x.val, y.val, op, typ)
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ check.isRepresentable(x, typ)
+ return
+ }
+
+ x.mode = value
+ // x.typ is unchanged
+}
+
+// index checks an index expression for validity. If length >= 0, it is the upper
+// bound for the index. The result is a valid index >= 0, or a negative value.
+//
+func (check *checker) index(index ast.Expr, length int64, iota int) int64 {
+ var x operand
+
+ check.expr(&x, index, nil, iota)
+ if !x.isInteger(check.ctxt) {
+ check.errorf(x.pos(), "index %s must be integer", &x)
+ return -1
+ }
+ if x.mode != constant {
+ return -1 // we cannot check more
+ }
+ // The spec doesn't require int64 indices, but perhaps it should.
+ i, ok := x.val.(int64)
+ if !ok {
+ check.errorf(x.pos(), "stupid index %s", &x)
+ return -1
+ }
+ if i < 0 {
+ check.errorf(x.pos(), "index %s must not be negative", &x)
+ return -1
+ }
+ if length >= 0 && i >= length {
+ check.errorf(x.pos(), "index %s is out of bounds (>= %d)", &x, length)
+ return -1
+ }
+
+ return i
+}
+
+// compositeLitKey resolves unresolved composite literal keys.
+// For details, see comment in go/parser/parser.go, method parseElement.
+func (check *checker) compositeLitKey(key ast.Expr) {
+ if ident, ok := key.(*ast.Ident); ok && ident.Obj == nil {
+ if obj := check.pkg.Scope.Lookup(ident.Name); obj != nil {
+ check.register(ident, obj)
+ } else if obj := Universe.Lookup(ident.Name); obj != nil {
+ check.register(ident, obj)
+ } else {
+ check.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+ }
+ }
+}
+
+// indexElts checks the elements (elts) of an array or slice composite literal
+// against the literal's element type (typ), and the element indices against
+// the literal length if known (length >= 0). It returns the length of the
+// literal (maximum index value + 1).
+//
+func (check *checker) indexedElts(elts []ast.Expr, typ Type, length int64, iota int) int64 {
+ visited := make(map[int64]bool, len(elts))
+ var index, max int64
+ for _, e := range elts {
+ // determine and check index
+ validIndex := false
+ eval := e
+ if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
+ check.compositeLitKey(kv.Key)
+ if i := check.index(kv.Key, length, iota); i >= 0 {
+ index = i
+ validIndex = true
+ }
+ eval = kv.Value
+ } else if length >= 0 && index >= length {
+ check.errorf(e.Pos(), "index %d is out of bounds (>= %d)", index, length)
+ } else {
+ validIndex = true
+ }
+
+ // if we have a valid index, check for duplicate entries
+ if validIndex {
+ if visited[index] {
+ check.errorf(e.Pos(), "duplicate index %d in array or slice literal", index)
+ }
+ visited[index] = true
+ }
+ index++
+ if index > max {
+ max = index
+ }
+
+ // check element against composite literal element type
+ var x operand
+ check.expr(&x, eval, typ, iota)
+ if !check.assignment(&x, typ) && x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in array or slice literal", &x, typ)
+ }
+ }
+ return max
+}
+
+// argument typechecks passing an argument arg (if arg != nil) or
+// x (if arg == nil) to the i'th parameter of the given signature.
+// If passSlice is set, the argument is followed by ... in the call.
+//
+func (check *checker) argument(sig *Signature, i int, arg ast.Expr, x *operand, passSlice bool) {
+ // determine parameter
+ var par *Var
+ n := len(sig.Params)
+ if i < n {
+ par = sig.Params[i]
+ } else if sig.IsVariadic {
+ par = sig.Params[n-1]
+ } else {
+ check.errorf(arg.Pos(), "too many arguments")
+ return
+ }
+
+ // determine argument
+ var z operand
+ z.mode = variable
+ z.expr = nil // TODO(gri) can we do better here? (for good error messages)
+ z.typ = par.Type
+
+ if arg != nil {
+ check.expr(x, arg, z.typ, -1)
+ }
+ if x.mode == invalid {
+ return // ignore this argument
+ }
+
+ // check last argument of the form x...
+ if passSlice {
+ if i+1 != n {
+ check.errorf(x.pos(), "can only use ... with matching parameter")
+ return // ignore this argument
+ }
+ // spec: "If the final argument is assignable to a slice type []T,
+ // it may be passed unchanged as the value for a ...T parameter if
+ // the argument is followed by ..."
+ z.typ = &Slice{Elt: z.typ} // change final parameter type to []T
+ }
+
+ if !check.assignment(x, z.typ) && x.mode != invalid {
+ check.errorf(x.pos(), "cannot pass argument %s to %s", x, &z)
+ }
+}
+
+var emptyResult Result
+
+func (check *checker) callExpr(x *operand) {
+ var typ Type
+ var val interface{}
+ switch x.mode {
+ case invalid:
+ return // nothing to do
+ case novalue:
+ typ = &emptyResult
+ case constant:
+ typ = x.typ
+ val = x.val
+ default:
+ typ = x.typ
+ }
+
+ // if the operand is untyped, delay notification
+ // until it becomes typed or until the end of
+ // type checking
+ if isUntyped(typ) {
+ check.untyped[x.expr] = typ.(*Basic)
+ if val != nil {
+ check.constants[x.expr] = val
+ }
+ return
+ }
+
+ // TODO(gri) ensure that literals always report
+ // their dynamic (never interface) type.
+ // This is not the case yet.
+
+ if check.ctxt.Expr != nil {
+ check.ctxt.Expr(x.expr, typ, val)
+ }
+}
+
+// rawExpr typechecks expression e and initializes x with the expression
+// value or type. If an error occurred, x.mode is set to invalid.
+// If hint != nil, it is the type of a composite literal element.
+// iota >= 0 indicates that the expression is part of a constant declaration.
+// cycleOk indicates whether it is ok for a type expression to refer to itself.
+//
+func (check *checker) rawExpr(x *operand, e ast.Expr, hint Type, iota int, cycleOk bool) {
+ if trace {
+ c := ""
+ if cycleOk {
+ c = " ⨁"
+ }
+ check.trace(e.Pos(), "%s (%s, %d%s)", e, typeString(hint), iota, c)
+ defer check.untrace("=> %s", x)
+ }
+
+ defer check.callExpr(x)
+
+ switch e := e.(type) {
+ case *ast.BadExpr:
+ goto Error // error was reported before
+
+ case *ast.Ident:
+ if e.Name == "_" {
+ check.invalidOp(e.Pos(), "cannot use _ as value or type")
+ goto Error
+ }
+ obj := check.lookup(e)
+ if obj == nil {
+ goto Error // error was reported before
+ }
+ check.object(obj, cycleOk)
+ switch obj := obj.(type) {
+ case *Package:
+ check.errorf(e.Pos(), "use of package %s not in selector", obj.Name)
+ goto Error
+ case *Const:
+ if obj.Val == nil {
+ goto Error // cycle detected
+ }
+ x.mode = constant
+ if obj == universeIota {
+ if iota < 0 {
+ check.invalidAST(e.Pos(), "cannot use iota outside constant declaration")
+ goto Error
+ }
+ x.val = int64(iota)
+ } else {
+ x.val = obj.Val
+ }
+ case *TypeName:
+ x.mode = typexpr
+ if !cycleOk && underlying(obj.Type) == nil {
+ check.errorf(obj.spec.Pos(), "illegal cycle in declaration of %s", obj.Name)
+ x.expr = e
+ x.typ = Typ[Invalid]
+ return // don't goto Error - need x.mode == typexpr
+ }
+ case *Var:
+ x.mode = variable
+ case *Func:
+ x.mode = value
+ default:
+ unreachable()
+ }
+ x.typ = obj.GetType()
+
+ case *ast.Ellipsis:
+ // ellipses are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.errorf(e.Pos(), "invalid use of '...'")
+ goto Error
+
+ case *ast.BasicLit:
+ x.setConst(e.Kind, e.Value)
+ if x.mode == invalid {
+ check.invalidAST(e.Pos(), "invalid literal %v", e.Value)
+ goto Error
+ }
+
+ case *ast.FuncLit:
+ if sig, ok := check.typ(e.Type, false).(*Signature); ok {
+ x.mode = value
+ x.typ = sig
+ check.later(nil, sig, e.Body)
+ } else {
+ check.invalidAST(e.Pos(), "invalid function literal %s", e)
+ goto Error
+ }
+
+ case *ast.CompositeLit:
+ typ := hint
+ openArray := false
+ if e.Type != nil {
+ // [...]T array types may only appear with composite literals.
+ // Check for them here so we don't have to handle ... in general.
+ typ = nil
+ if atyp, _ := e.Type.(*ast.ArrayType); atyp != nil && atyp.Len != nil {
+ if ellip, _ := atyp.Len.(*ast.Ellipsis); ellip != nil && ellip.Elt == nil {
+ // We have an "open" [...]T array type.
+ // Create a new ArrayType with unknown length (-1)
+ // and finish setting it up after analyzing the literal.
+ typ = &Array{Len: -1, Elt: check.typ(atyp.Elt, cycleOk)}
+ openArray = true
+ }
+ }
+ if typ == nil {
+ typ = check.typ(e.Type, false)
+ }
+ }
+ if typ == nil {
+ check.errorf(e.Pos(), "missing type in composite literal")
+ goto Error
+ }
+
+ switch utyp := underlying(deref(typ)).(type) {
+ case *Struct:
+ if len(e.Elts) == 0 {
+ break
+ }
+ fields := utyp.Fields
+ if _, ok := e.Elts[0].(*ast.KeyValueExpr); ok {
+ // all elements must have keys
+ visited := make([]bool, len(fields))
+ for _, e := range e.Elts {
+ kv, _ := e.(*ast.KeyValueExpr)
+ if kv == nil {
+ check.errorf(e.Pos(), "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ key, _ := kv.Key.(*ast.Ident)
+ if key == nil {
+ check.errorf(kv.Pos(), "invalid field name %s in struct literal", kv.Key)
+ continue
+ }
+ i := utyp.fieldIndex(QualifiedName{check.pkg, key.Name})
+ if i < 0 {
+ check.errorf(kv.Pos(), "unknown field %s in struct literal", key.Name)
+ continue
+ }
+ // 0 <= i < len(fields)
+ if visited[i] {
+ check.errorf(kv.Pos(), "duplicate field name %s in struct literal", key.Name)
+ continue
+ }
+ visited[i] = true
+ check.expr(x, kv.Value, nil, iota)
+ etyp := fields[i].Type
+ if !check.assignment(x, etyp) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
+ }
+ continue
+ }
+ }
+ } else {
+ // no element must have a key
+ for i, e := range e.Elts {
+ if kv, _ := e.(*ast.KeyValueExpr); kv != nil {
+ check.errorf(kv.Pos(), "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ check.expr(x, e, nil, iota)
+ if i >= len(fields) {
+ check.errorf(x.pos(), "too many values in struct literal")
+ break // cannot continue
+ }
+ // i < len(fields)
+ etyp := fields[i].Type
+ if !check.assignment(x, etyp) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in struct literal", x, etyp)
+ }
+ continue
+ }
+ }
+ if len(e.Elts) < len(fields) {
+ check.errorf(e.Rbrace, "too few values in struct literal")
+ // ok to continue
+ }
+ }
+
+ case *Array:
+ n := check.indexedElts(e.Elts, utyp.Elt, utyp.Len, iota)
+ // if we have an "open" [...]T array, set the length now that we know it
+ if openArray {
+ utyp.Len = n
+ }
+
+ case *Slice:
+ check.indexedElts(e.Elts, utyp.Elt, -1, iota)
+
+ case *Map:
+ visited := make(map[interface{}]bool, len(e.Elts))
+ for _, e := range e.Elts {
+ kv, _ := e.(*ast.KeyValueExpr)
+ if kv == nil {
+ check.errorf(e.Pos(), "missing key in map literal")
+ continue
+ }
+ check.compositeLitKey(kv.Key)
+ check.expr(x, kv.Key, nil, iota)
+ if !check.assignment(x, utyp.Key) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s key in map literal", x, utyp.Key)
+ }
+ continue
+ }
+ if x.mode == constant {
+ if visited[x.val] {
+ check.errorf(x.pos(), "duplicate key %s in map literal", x.val)
+ continue
+ }
+ visited[x.val] = true
+ }
+ check.expr(x, kv.Value, utyp.Elt, iota)
+ if !check.assignment(x, utyp.Elt) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot use %s as %s value in map literal", x, utyp.Elt)
+ }
+ continue
+ }
+ }
+
+ default:
+ check.errorf(e.Pos(), "%s is not a valid composite literal type", typ)
+ goto Error
+ }
+
+ x.mode = value
+ x.typ = typ
+
+ case *ast.ParenExpr:
+ check.rawExpr(x, e.X, nil, iota, cycleOk)
+
+ case *ast.SelectorExpr:
+ sel := e.Sel.Name
+ // If the identifier refers to a package, handle everything here
+ // so we don't need a "package" mode for operands: package names
+ // can only appear in qualified identifiers which are mapped to
+ // selector expressions.
+ if ident, ok := e.X.(*ast.Ident); ok {
+ if pkg, ok := check.lookup(ident).(*Package); ok {
+ exp := pkg.Scope.Lookup(sel)
+ // gcimported package scopes contain non-exported
+ // objects such as types used in partially exported
+ // objects - do not accept them
+ if exp == nil || !ast.IsExported(exp.GetName()) {
+ check.errorf(e.Pos(), "cannot refer to unexported %s", e)
+ goto Error
+ }
+ check.register(e.Sel, exp)
+ // Simplified version of the code for *ast.Idents:
+ // - imported packages use types.Scope and types.Objects
+ // - imported objects are always fully initialized
+ switch exp := exp.(type) {
+ case *Const:
+ assert(exp.Val != nil)
+ x.mode = constant
+ x.typ = exp.Type
+ x.val = exp.Val
+ case *TypeName:
+ x.mode = typexpr
+ x.typ = exp.Type
+ case *Var:
+ x.mode = variable
+ x.typ = exp.Type
+ case *Func:
+ x.mode = value
+ x.typ = exp.Type
+ default:
+ unreachable()
+ }
+ x.expr = e
+ return
+ }
+ }
+
+ check.exprOrType(x, e.X, iota, false)
+ if x.mode == invalid {
+ goto Error
+ }
+ res := lookupField(x.typ, QualifiedName{check.pkg, sel})
+ if res.mode == invalid {
+ check.invalidOp(e.Pos(), "%s has no single field or method %s", x, sel)
+ goto Error
+ }
+ if x.mode == typexpr {
+ // method expression
+ sig, ok := res.typ.(*Signature)
+ if !ok {
+ check.invalidOp(e.Pos(), "%s has no method %s", x, sel)
+ goto Error
+ }
+ // the receiver type becomes the type of the first function
+ // argument of the method expression's function type
+ // TODO(gri) at the moment, method sets don't correctly track
+ // pointer vs non-pointer receivers => typechecker is too lenient
+ x.mode = value
+ x.typ = &Signature{
+ Params: append([]*Var{{Type: x.typ}}, sig.Params...),
+ Results: sig.Results,
+ IsVariadic: sig.IsVariadic,
+ }
+ } else {
+ // regular selector
+ x.mode = res.mode
+ x.typ = res.typ
+ }
+
+ case *ast.IndexExpr:
+ check.expr(x, e.X, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := underlying(x.typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ valid = true
+ if x.mode == constant {
+ length = int64(len(x.val.(string)))
+ }
+ // an indexed string always yields a byte value
+ // (not a constant) even if the string and the
+ // index are constant
+ x.mode = value
+ x.typ = Typ[Byte]
+ }
+
+ case *Array:
+ valid = true
+ length = typ.Len
+ if x.mode != variable {
+ x.mode = value
+ }
+ x.typ = typ.Elt
+
+ case *Pointer:
+ if typ, _ := underlying(typ.Base).(*Array); typ != nil {
+ valid = true
+ length = typ.Len
+ x.mode = variable
+ x.typ = typ.Elt
+ }
+
+ case *Slice:
+ valid = true
+ x.mode = variable
+ x.typ = typ.Elt
+
+ case *Map:
+ var key operand
+ check.expr(&key, e.Index, nil, iota)
+ if !check.assignment(&key, typ.Key) {
+ if key.mode != invalid {
+ check.invalidOp(key.pos(), "cannot use %s as map index of type %s", &key, typ.Key)
+ }
+ goto Error
+ }
+ x.mode = valueok
+ x.typ = typ.Elt
+ x.expr = e
+ return
+ }
+
+ if !valid {
+ check.invalidOp(x.pos(), "cannot index %s", x)
+ goto Error
+ }
+
+ if e.Index == nil {
+ check.invalidAST(e.Pos(), "missing index expression for %s", x)
+ return
+ }
+
+ check.index(e.Index, length, iota)
+ // ok to continue
+
+ case *ast.SliceExpr:
+ check.expr(x, e.X, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := underlying(x.typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ valid = true
+ if x.mode == constant {
+ length = int64(len(x.val.(string))) + 1 // +1 for slice
+ }
+ // a sliced string always yields a string value
+ // of the same type as the original string (not
+ // a constant) even if the string and the indices
+ // are constant
+ x.mode = value
+ // x.typ doesn't change, but if it is an untyped
+ // string it becomes string (see also issue 4913).
+ if typ.Kind == UntypedString {
+ x.typ = Typ[String]
+ }
+ }
+
+ case *Array:
+ valid = true
+ length = typ.Len + 1 // +1 for slice
+ if x.mode != variable {
+ check.invalidOp(x.pos(), "cannot slice %s (value not addressable)", x)
+ goto Error
+ }
+ x.typ = &Slice{Elt: typ.Elt}
+
+ case *Pointer:
+ if typ, _ := underlying(typ.Base).(*Array); typ != nil {
+ valid = true
+ length = typ.Len + 1 // +1 for slice
+ x.mode = variable
+ x.typ = &Slice{Elt: typ.Elt}
+ }
+
+ case *Slice:
+ valid = true
+ x.mode = variable
+ // x.typ doesn't change
+ }
+
+ if !valid {
+ check.invalidOp(x.pos(), "cannot slice %s", x)
+ goto Error
+ }
+
+ lo := int64(0)
+ if e.Low != nil {
+ lo = check.index(e.Low, length, iota)
+ }
+
+ hi := int64(-1)
+ if e.High != nil {
+ hi = check.index(e.High, length, iota)
+ } else if length >= 0 {
+ hi = length
+ }
+
+ if lo >= 0 && hi >= 0 && lo > hi {
+ check.errorf(e.Low.Pos(), "inverted slice range: %d > %d", lo, hi)
+ // ok to continue
+ }
+
+ case *ast.TypeAssertExpr:
+ check.expr(x, e.X, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ var T *Interface
+ if T, _ = underlying(x.typ).(*Interface); T == nil {
+ check.invalidOp(x.pos(), "%s is not an interface", x)
+ goto Error
+ }
+ // x.(type) expressions are handled explicitly in type switches
+ if e.Type == nil {
+ check.errorf(e.Pos(), "use of .(type) outside type switch")
+ goto Error
+ }
+ typ := check.typ(e.Type, false)
+ if typ == Typ[Invalid] {
+ goto Error
+ }
+ if method, wrongType := missingMethod(typ, T); method != nil {
+ var msg string
+ if wrongType {
+ msg = "%s cannot have dynamic type %s (wrong type for method %s)"
+ } else {
+ msg = "%s cannot have dynamic type %s (missing method %s)"
+ }
+ check.errorf(e.Type.Pos(), msg, x, typ, method.Name)
+ // ok to continue
+ }
+ x.mode = valueok
+ x.expr = e
+ x.typ = typ
+
+ case *ast.CallExpr:
+ check.exprOrType(x, e.Fun, iota, false)
+ if x.mode == invalid {
+ goto Error
+ } else if x.mode == typexpr {
+ check.conversion(x, e, x.typ, iota)
+ } else if sig, ok := underlying(x.typ).(*Signature); ok {
+ // check parameters
+
+ // If we have a trailing ... at the end of the parameter
+ // list, the last argument must match the parameter type
+ // []T of a variadic function parameter x ...T.
+ passSlice := false
+ if e.Ellipsis.IsValid() {
+ if sig.IsVariadic {
+ passSlice = true
+ } else {
+ check.errorf(e.Ellipsis, "cannot use ... in call to %s", e.Fun)
+ // ok to continue
+ }
+ }
+
+ // If we have a single argument that is a function call
+ // we need to handle it separately. Determine if this
+ // is the case without checking the argument.
+ var call *ast.CallExpr
+ if len(e.Args) == 1 {
+ call, _ = unparen(e.Args[0]).(*ast.CallExpr)
+ }
+
+ n := 0 // parameter count
+ if call != nil {
+ // We have a single argument that is a function call.
+ check.expr(x, call, nil, -1)
+ if x.mode == invalid {
+ goto Error // TODO(gri): we can do better
+ }
+ if t, _ := x.typ.(*Result); t != nil {
+ // multiple result values
+ n = len(t.Values)
+ for i, obj := range t.Values {
+ x.mode = value
+ x.expr = nil // TODO(gri) can we do better here? (for good error messages)
+ x.typ = obj.Type
+ check.argument(sig, i, nil, x, passSlice && i+1 == n)
+ }
+ } else {
+ // single result value
+ n = 1
+ check.argument(sig, 0, nil, x, passSlice)
+ }
+
+ } else {
+ // We don't have a single argument or it is not a function call.
+ n = len(e.Args)
+ for i, arg := range e.Args {
+ check.argument(sig, i, arg, x, passSlice && i+1 == n)
+ }
+ }
+
+ // determine if we have enough arguments
+ if sig.IsVariadic {
+ // a variadic function accepts an "empty"
+ // last argument: count one extra
+ n++
+ }
+ if n < len(sig.Params) {
+ check.errorf(e.Fun.Pos(), "too few arguments in call to %s", e.Fun)
+ // ok to continue
+ }
+
+ // determine result
+ switch len(sig.Results) {
+ case 0:
+ x.mode = novalue
+ case 1:
+ x.mode = value
+ x.typ = sig.Results[0].Type
+ default:
+ x.mode = value
+ x.typ = &Result{Values: sig.Results}
+ }
+
+ } else if bin, ok := x.typ.(*builtin); ok {
+ check.builtin(x, e, bin, iota)
+
+ } else {
+ check.invalidOp(x.pos(), "cannot call non-function %s", x)
+ goto Error
+ }
+
+ case *ast.StarExpr:
+ check.exprOrType(x, e.X, iota, true)
+ switch x.mode {
+ case invalid:
+ goto Error
+ case typexpr:
+ x.typ = &Pointer{Base: x.typ}
+ default:
+ if typ, ok := underlying(x.typ).(*Pointer); ok {
+ x.mode = variable
+ x.typ = typ.Base
+ } else {
+ check.invalidOp(x.pos(), "cannot indirect %s", x)
+ goto Error
+ }
+ }
+
+ case *ast.UnaryExpr:
+ check.expr(x, e.X, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ check.unary(x, e.Op)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *ast.BinaryExpr:
+ check.binary(x, e.X, e.Y, e.Op, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *ast.KeyValueExpr:
+ // key:value expressions are handled in composite literals
+ check.invalidAST(e.Pos(), "no key:value expected")
+ goto Error
+
+ case *ast.ArrayType:
+ if e.Len != nil {
+ check.expr(x, e.Len, nil, iota)
+ if x.mode == invalid {
+ goto Error
+ }
+ if x.mode != constant {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "array length %s must be constant", x)
+ }
+ goto Error
+ }
+ n, ok := x.val.(int64)
+ if !ok || n < 0 {
+ check.errorf(x.pos(), "invalid array length %s", x)
+ goto Error
+ }
+ x.typ = &Array{Len: n, Elt: check.typ(e.Elt, cycleOk)}
+ } else {
+ x.typ = &Slice{Elt: check.typ(e.Elt, true)}
+ }
+ x.mode = typexpr
+
+ case *ast.StructType:
+ x.mode = typexpr
+ x.typ = &Struct{Fields: check.collectFields(e.Fields, cycleOk)}
+
+ case *ast.FuncType:
+ params, isVariadic := check.collectParams(e.Params, true)
+ results, _ := check.collectParams(e.Results, false)
+ x.mode = typexpr
+ x.typ = &Signature{Recv: nil, Params: params, Results: results, IsVariadic: isVariadic}
+
+ case *ast.InterfaceType:
+ x.mode = typexpr
+ x.typ = &Interface{Methods: check.collectMethods(e.Methods)}
+
+ case *ast.MapType:
+ x.mode = typexpr
+ x.typ = &Map{Key: check.typ(e.Key, true), Elt: check.typ(e.Value, true)}
+
+ case *ast.ChanType:
+ x.mode = typexpr
+ x.typ = &Chan{Dir: e.Dir, Elt: check.typ(e.Value, true)}
+
+ default:
+ if debug {
+ check.dump("expr = %v (%T)", e, e)
+ }
+ unreachable()
+ }
+
+ // everything went well
+ x.expr = e
+ return
+
+Error:
+ x.mode = invalid
+ x.expr = e
+}
+
+// exprOrType is like rawExpr but reports an error if e doesn't represents a value or type.
+func (check *checker) exprOrType(x *operand, e ast.Expr, iota int, cycleOk bool) {
+ check.rawExpr(x, e, nil, iota, cycleOk)
+ if x.mode == novalue {
+ check.errorf(x.pos(), "%s used as value or type", x)
+ x.mode = invalid
+ }
+}
+
+// expr is like rawExpr but reports an error if e doesn't represents a value.
+func (check *checker) expr(x *operand, e ast.Expr, hint Type, iota int) {
+ check.rawExpr(x, e, hint, iota, false)
+ switch x.mode {
+ case novalue:
+ check.errorf(x.pos(), "%s used as value", x)
+ x.mode = invalid
+ case typexpr:
+ check.errorf(x.pos(), "%s is not an expression", x)
+ x.mode = invalid
+ }
+}
+
+func (check *checker) rawTyp(e ast.Expr, cycleOk, nilOk bool) Type {
+ var x operand
+ check.rawExpr(&x, e, nil, -1, cycleOk)
+ switch x.mode {
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(x.pos(), "%s used as type", &x)
+ case typexpr:
+ return x.typ
+ case constant:
+ if nilOk && x.isNil() {
+ return nil
+ }
+ fallthrough
+ default:
+ check.errorf(x.pos(), "%s is not a type", &x)
+ }
+ return Typ[Invalid]
+}
+
+// typOrNil is like rawExpr but reports an error if e doesn't represents a type or the predeclared value nil.
+// It returns e's type, nil, or Typ[Invalid] if an error occurred.
+//
+func (check *checker) typOrNil(e ast.Expr, cycleOk bool) Type {
+ return check.rawTyp(e, cycleOk, true)
+}
+
+// typ is like rawExpr but reports an error if e doesn't represents a type.
+// It returns e's type, or Typ[Invalid] if an error occurred.
+//
+func (check *checker) typ(e ast.Expr, cycleOk bool) Type {
+ return check.rawTyp(e, cycleOk, false)
+}
diff --git a/src/pkg/go/types/gcimporter.go b/src/pkg/go/types/gcimporter.go
new file mode 100644
index 000000000..7f968eb8d
--- /dev/null
+++ b/src/pkg/go/types/gcimporter.go
@@ -0,0 +1,950 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements an Importer for gc-generated object files.
+
+package types
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/scanner"
+)
+
+var pkgExts = [...]string{".a", ".5", ".6", ".8"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context).
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+ if len(path) == 0 {
+ return
+ }
+
+ id = path
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ return
+ }
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// GcImportData imports a package by reading the gc-generated export data,
+// adds the corresponding package object to the imports map indexed by id,
+// and returns the object.
+//
+// The imports map must contains all packages already imported. The data
+// reader position must be the beginning of the export data section. The
+// filename is only used in error messages.
+//
+// If imports[id] contains the completely imported package, that package
+// can be used directly, and there is no need to call this function (but
+// there is also no harm but for extra time used).
+//
+func GcImportData(imports map[string]*Package, filename, id string, data *bufio.Reader) (pkg *Package, err error) {
+ // support for gcParser error handling
+ defer func() {
+ if r := recover(); r != nil {
+ err = r.(importError) // will re-panic if r is not an importError
+ }
+ }()
+
+ var p gcParser
+ p.init(filename, id, data, imports)
+ pkg = p.parseExport()
+
+ return
+}
+
+// GcImport imports a gc-generated package given its import path, adds the
+// corresponding package object to the imports map, and returns the object.
+// Local import paths are interpreted relative to the current working directory.
+// The imports map must contains all packages already imported.
+// GcImport satisfies the ast.Importer signature.
+//
+func GcImport(imports map[string]*Package, path string) (pkg *Package, err error) {
+ if path == "unsafe" {
+ return Unsafe, nil
+ }
+
+ srcDir := "."
+ if build.IsLocalImport(path) {
+ srcDir, err = os.Getwd()
+ if err != nil {
+ return
+ }
+ }
+
+ filename, id := FindPkg(path, srcDir)
+ if filename == "" {
+ err = errors.New("can't find import: " + id)
+ return
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = imports[id]; pkg != nil && pkg.Complete {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ buf := bufio.NewReader(f)
+ if err = FindGcExportData(buf); err != nil {
+ return
+ }
+
+ pkg, err = GcImportData(imports, filename, id, buf)
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// gcParser
+
+// gcParser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type gcParser struct {
+ scanner scanner.Scanner
+ tok rune // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ imports map[string]*Package // package id -> package object
+}
+
+func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]*Package) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.id = id
+ p.imports = imports
+ // leave for debugging
+ if false {
+ // check consistency of imports map
+ for _, pkg := range imports {
+ if pkg.Name == "" {
+ fmt.Printf("no package name for %s\n", pkg.Path)
+ }
+ }
+ }
+}
+
+func (p *gcParser) next() {
+ p.tok = p.scanner.Scan()
+ switch p.tok {
+ case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
+ p.lit = p.scanner.TokenText()
+ default:
+ p.lit = ""
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+ }
+}
+
+func declConst(pkg *Package, name string) *Const {
+ // the constant may have been imported before - if it exists
+ // already in the respective scope, return that constant
+ scope := pkg.Scope
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*Const)
+ }
+ // otherwise create a new constant and insert it into the scope
+ obj := &Const{Pkg: pkg, Name: name}
+ scope.Insert(obj)
+ return obj
+}
+
+func declTypeName(pkg *Package, name string) *TypeName {
+ scope := pkg.Scope
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*TypeName)
+ }
+ obj := &TypeName{Pkg: pkg, Name: name}
+ // a named type may be referred to before the underlying type
+ // is known - set it up
+ obj.Type = &NamedType{Obj: obj}
+ scope.Insert(obj)
+ return obj
+}
+
+func declVar(pkg *Package, name string) *Var {
+ scope := pkg.Scope
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*Var)
+ }
+ obj := &Var{Pkg: pkg, Name: name}
+ scope.Insert(obj)
+ return obj
+}
+
+func declFunc(pkg *Package, name string) *Func {
+ scope := pkg.Scope
+ if obj := scope.Lookup(name); obj != nil {
+ return obj.(*Func)
+ }
+ obj := &Func{Pkg: pkg, Name: name}
+ scope.Insert(obj)
+ return obj
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+ pos scanner.Position
+ err error
+}
+
+func (e importError) Error() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+func (p *gcParser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = errors.New(s)
+ }
+ // panic with a runtime.Error if err is not an error
+ panic(importError{p.scanner.Pos(), err.(error)})
+}
+
+func (p *gcParser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Sprintf(format, args...))
+}
+
+func (p *gcParser) expect(tok rune) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+func (p *gcParser) expectSpecial(tok string) {
+ sep := 'x' // not white space
+ i := 0
+ for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ i++
+ }
+ if i < len(tok) {
+ p.errorf("expected %q, got %q", tok, tok[0:i])
+ }
+}
+
+func (p *gcParser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Qualified and unqualified names
+
+// PackageId = string_lit .
+//
+func (p *gcParser) parsePackageId() string {
+ id, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+ // id == "" stands for the imported package id
+ // (only known at time of package installation)
+ if id == "" {
+ id = p.id
+ }
+ return id
+}
+
+// PackageName = ident .
+//
+func (p *gcParser) parsePackageName() string {
+ return p.expect(scanner.Ident)
+}
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *gcParser) parseDotIdent() string {
+ ident := ""
+ if p.tok != scanner.Int {
+ sep := 'x' // not white space
+ for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+ ident += p.lit
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ }
+ }
+ if ident == "" {
+ p.expect(scanner.Ident) // use expect() for error handling
+ }
+ return ident
+}
+
+// QualifiedName = "@" PackageId "." dotIdentifier .
+//
+func (p *gcParser) parseQualifiedName() (id, name string) {
+ p.expect('@')
+ id = p.parsePackageId()
+ p.expect('.')
+ name = p.parseDotIdent()
+ return
+}
+
+// getPkg returns the package for a given id. If the package is
+// not found but we have a package name, create the package and
+// add it to the p.imports map.
+//
+func (p *gcParser) getPkg(id, name string) *Package {
+ // package unsafe is not in the imports map - handle explicitly
+ if id == "unsafe" {
+ return Unsafe
+ }
+ pkg := p.imports[id]
+ if pkg == nil && name != "" {
+ pkg = &Package{Name: name, Path: id, Scope: new(Scope)}
+ p.imports[id] = pkg
+ }
+ return pkg
+}
+
+// parseExportedName is like parseQualifiedName, but
+// the package id is resolved to an imported *Package.
+//
+func (p *gcParser) parseExportedName() (pkg *Package, name string) {
+ id, name := p.parseQualifiedName()
+ pkg = p.getPkg(id, "")
+ if pkg == nil {
+ p.errorf("%s package not found", id)
+ }
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *gcParser) parseBasicType() Type {
+ id := p.expect(scanner.Ident)
+ obj := Universe.Lookup(id)
+ if obj, ok := obj.(*TypeName); ok {
+ return obj.Type
+ }
+ p.errorf("not a basic type: %s", id)
+ return nil
+}
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *gcParser) parseArrayType() Type {
+ // "[" already consumed and lookahead known not to be "]"
+ lit := p.expect(scanner.Int)
+ p.expect(']')
+ elt := p.parseType()
+ n, err := strconv.ParseInt(lit, 10, 64)
+ if err != nil {
+ p.error(err)
+ }
+ return &Array{Len: n, Elt: elt}
+}
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *gcParser) parseMapType() Type {
+ p.expectKeyword("map")
+ p.expect('[')
+ key := p.parseType()
+ p.expect(']')
+ elt := p.parseType()
+ return &Map{Key: key, Elt: elt}
+}
+
+// Name = identifier | "?" | QualifiedName .
+//
+// If materializePkg is set, a package is returned for fully qualified names.
+// That package may be a fake package (without name, scope, and not in the
+// p.imports map), created for the sole purpose of providing a package path
+// for QualifiedNames. Fake packages are created when the package id is not
+// found in the p.imports map; we cannot create a real package in that case
+// because we don't have a package name.
+//
+// TODO(gri): consider changing QualifiedIdents to (path, name) pairs to
+// simplify this code.
+//
+func (p *gcParser) parseName(materializePkg bool) (pkg *Package, name string) {
+ switch p.tok {
+ case scanner.Ident:
+ name = p.lit
+ p.next()
+ case '?':
+ // anonymous
+ p.next()
+ case '@':
+ // exported name prefixed with package path
+ var id string
+ id, name = p.parseQualifiedName()
+ if materializePkg {
+ // we don't have a package name - if the package
+ // doesn't exist yet, create a fake package instead
+ pkg = p.getPkg(id, "")
+ if pkg == nil {
+ pkg = &Package{Path: id}
+ }
+ }
+ default:
+ p.error("name expected")
+ }
+ return
+}
+
+// Field = Name Type [ string_lit ] .
+//
+func (p *gcParser) parseField() *Field {
+ var f Field
+ f.Pkg, f.Name = p.parseName(true)
+ f.Type = p.parseType()
+ if p.tok == scanner.String {
+ f.Tag = p.expect(scanner.String)
+ }
+ if f.Name == "" {
+ // anonymous field - typ must be T or *T and T must be a type name
+ if typ, ok := deref(f.Type).(*NamedType); ok && typ.Obj != nil {
+ f.Name = typ.Obj.GetName()
+ f.IsAnonymous = true
+ } else {
+ p.errorf("anonymous field expected")
+ }
+ }
+ return &f
+}
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList = Field { ";" Field } .
+//
+func (p *gcParser) parseStructType() Type {
+ var fields []*Field
+
+ p.expectKeyword("struct")
+ p.expect('{')
+ for p.tok != '}' {
+ if len(fields) > 0 {
+ p.expect(';')
+ }
+ fields = append(fields, p.parseField())
+ }
+ p.expect('}')
+
+ return &Struct{Fields: fields}
+}
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
+//
+func (p *gcParser) parseParameter() (par *Var, isVariadic bool) {
+ _, name := p.parseName(false)
+ if name == "" {
+ name = "_" // cannot access unnamed identifiers
+ }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ isVariadic = true
+ }
+ typ := p.parseType()
+ // ignore argument tag (e.g. "noescape")
+ if p.tok == scanner.String {
+ p.next()
+ }
+ par = &Var{Name: name, Type: typ} // Pkg == nil
+ return
+}
+
+// Parameters = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *gcParser) parseParameters() (list []*Var, isVariadic bool) {
+ p.expect('(')
+ for p.tok != ')' {
+ if len(list) > 0 {
+ p.expect(',')
+ }
+ par, variadic := p.parseParameter()
+ list = append(list, par)
+ if variadic {
+ if isVariadic {
+ p.error("... not on final argument")
+ }
+ isVariadic = true
+ }
+ }
+ p.expect(')')
+
+ return
+}
+
+// Signature = Parameters [ Result ] .
+// Result = Type | Parameters .
+//
+func (p *gcParser) parseSignature() *Signature {
+ params, isVariadic := p.parseParameters()
+
+ // optional result type
+ var results []*Var
+ if p.tok == '(' {
+ var variadic bool
+ results, variadic = p.parseParameters()
+ if variadic {
+ p.error("... not permitted on result type")
+ }
+ }
+
+ return &Signature{Params: params, Results: results, IsVariadic: isVariadic}
+}
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList = Method { ";" Method } .
+// Method = Name Signature .
+//
+// The methods of embedded interfaces are always "inlined"
+// by the compiler and thus embedded interfaces are never
+// visible in the export data.
+//
+func (p *gcParser) parseInterfaceType() Type {
+ var methods []*Method
+
+ p.expectKeyword("interface")
+ p.expect('{')
+ for p.tok != '}' {
+ if len(methods) > 0 {
+ p.expect(';')
+ }
+ pkg, name := p.parseName(true)
+ typ := p.parseSignature()
+ methods = append(methods, &Method{QualifiedName{pkg, name}, typ})
+ }
+ p.expect('}')
+
+ return &Interface{Methods: methods}
+}
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *gcParser) parseChanType() Type {
+ dir := ast.SEND | ast.RECV
+ if p.tok == scanner.Ident {
+ p.expectKeyword("chan")
+ if p.tok == '<' {
+ p.expectSpecial("<-")
+ dir = ast.SEND
+ }
+ } else {
+ p.expectSpecial("<-")
+ p.expectKeyword("chan")
+ dir = ast.RECV
+ }
+ elt := p.parseType()
+ return &Chan{Dir: dir, Elt: elt}
+}
+
+// Type =
+// BasicType | TypeName | ArrayType | SliceType | StructType |
+// PointerType | FuncType | InterfaceType | MapType | ChanType |
+// "(" Type ")" .
+//
+// BasicType = ident .
+// TypeName = ExportedName .
+// SliceType = "[" "]" Type .
+// PointerType = "*" Type .
+// FuncType = "func" Signature .
+//
+func (p *gcParser) parseType() Type {
+ switch p.tok {
+ case scanner.Ident:
+ switch p.lit {
+ default:
+ return p.parseBasicType()
+ case "struct":
+ return p.parseStructType()
+ case "func":
+ // FuncType
+ p.next()
+ return p.parseSignature()
+ case "interface":
+ return p.parseInterfaceType()
+ case "map":
+ return p.parseMapType()
+ case "chan":
+ return p.parseChanType()
+ }
+ case '@':
+ // TypeName
+ pkg, name := p.parseExportedName()
+ return declTypeName(pkg, name).Type
+ case '[':
+ p.next() // look ahead
+ if p.tok == ']' {
+ // SliceType
+ p.next()
+ return &Slice{Elt: p.parseType()}
+ }
+ return p.parseArrayType()
+ case '*':
+ // PointerType
+ p.next()
+ return &Pointer{Base: p.parseType()}
+ case '<':
+ return p.parseChanType()
+ case '(':
+ // "(" Type ")"
+ p.next()
+ typ := p.parseType()
+ p.expect(')')
+ return typ
+ }
+ p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" PackageName PackageId .
+//
+func (p *gcParser) parseImportDecl() {
+ p.expectKeyword("import")
+ name := p.parsePackageName()
+ p.getPkg(p.parsePackageId(), name)
+}
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *gcParser) parseInt() (neg bool, val string) {
+ switch p.tok {
+ case '-':
+ neg = true
+ fallthrough
+ case '+':
+ p.next()
+ }
+ val = p.expect(scanner.Int)
+ return
+}
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *gcParser) parseNumber() (x operand) {
+ x.mode = constant
+
+ // mantissa
+ neg, val := p.parseInt()
+ mant, ok := new(big.Int).SetString(val, 0)
+ assert(ok)
+ if neg {
+ mant.Neg(mant)
+ }
+
+ if p.lit == "p" {
+ // exponent (base 2)
+ p.next()
+ neg, val = p.parseInt()
+ exp64, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ p.error(err)
+ }
+ exp := uint(exp64)
+ if neg {
+ denom := big.NewInt(1)
+ denom.Lsh(denom, exp)
+ x.typ = Typ[UntypedFloat]
+ x.val = normalizeRatConst(new(big.Rat).SetFrac(mant, denom))
+ return
+ }
+ if exp > 0 {
+ mant.Lsh(mant, exp)
+ }
+ x.typ = Typ[UntypedFloat]
+ x.val = normalizeIntConst(mant)
+ return
+ }
+
+ x.typ = Typ[UntypedInt]
+ x.val = normalizeIntConst(mant)
+ return
+}
+
+// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
+// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
+// bool_lit = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit "i" ")" .
+// rune_lit = "(" int_lit "+" int_lit ")" .
+// string_lit = `"` { unicode_char } `"` .
+//
+func (p *gcParser) parseConstDecl() {
+ p.expectKeyword("const")
+ pkg, name := p.parseExportedName()
+ obj := declConst(pkg, name)
+ var x operand
+ if p.tok != '=' {
+ obj.Type = p.parseType()
+ }
+ p.expect('=')
+ switch p.tok {
+ case scanner.Ident:
+ // bool_lit
+ if p.lit != "true" && p.lit != "false" {
+ p.error("expected true or false")
+ }
+ x.typ = Typ[UntypedBool]
+ x.val = p.lit == "true"
+ p.next()
+
+ case '-', scanner.Int:
+ // int_lit
+ x = p.parseNumber()
+
+ case '(':
+ // complex_lit or rune_lit
+ p.next()
+ if p.tok == scanner.Char {
+ p.next()
+ p.expect('+')
+ x = p.parseNumber()
+ x.typ = Typ[UntypedRune]
+ p.expect(')')
+ break
+ }
+ re := p.parseNumber()
+ p.expect('+')
+ im := p.parseNumber()
+ p.expectKeyword("i")
+ p.expect(')')
+ x.typ = Typ[UntypedComplex]
+ // TODO(gri) fix this
+ _, _ = re, im
+ x.val = zeroConst
+
+ case scanner.Char:
+ // rune_lit
+ x.setConst(token.CHAR, p.lit)
+ p.next()
+
+ case scanner.String:
+ // string_lit
+ x.setConst(token.STRING, p.lit)
+ p.next()
+
+ default:
+ p.errorf("expected literal got %s", scanner.TokenString(p.tok))
+ }
+ if obj.Type == nil {
+ obj.Type = x.typ
+ }
+ assert(x.val != nil)
+ obj.Val = x.val
+}
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *gcParser) parseTypeDecl() {
+ p.expectKeyword("type")
+ pkg, name := p.parseExportedName()
+ obj := declTypeName(pkg, name)
+
+ // The type object may have been imported before and thus already
+ // have a type associated with it. We still need to parse the type
+ // structure, but throw it away if the object already has a type.
+ // This ensures that all imports refer to the same type object for
+ // a given type declaration.
+ typ := p.parseType()
+
+ if name := obj.Type.(*NamedType); name.Underlying == nil {
+ name.Underlying = typ
+ }
+}
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *gcParser) parseVarDecl() {
+ p.expectKeyword("var")
+ pkg, name := p.parseExportedName()
+ obj := declVar(pkg, name)
+ obj.Type = p.parseType()
+}
+
+// Func = Signature [ Body ] .
+// Body = "{" ... "}" .
+//
+func (p *gcParser) parseFunc() *Signature {
+ sig := p.parseSignature()
+ if p.tok == '{' {
+ p.next()
+ for i := 1; i > 0; p.next() {
+ switch p.tok {
+ case '{':
+ i++
+ case '}':
+ i--
+ }
+ }
+ }
+ return sig
+}
+
+// MethodDecl = "func" Receiver Name Func .
+// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *gcParser) parseMethodDecl() {
+ // "func" already consumed
+ p.expect('(')
+ recv, _ := p.parseParameter() // receiver
+ p.expect(')')
+
+ // determine receiver base type object
+ typ := recv.Type
+ if ptr, ok := typ.(*Pointer); ok {
+ typ = ptr.Base
+ }
+ base := typ.(*NamedType)
+
+ // parse method name, signature, and possibly inlined body
+ pkg, name := p.parseName(true) // unexported method names in imports are qualified with their package.
+ sig := p.parseFunc()
+ sig.Recv = recv
+
+ // add method to type unless type was imported before
+ // and method exists already
+ // TODO(gri) investigate if this can be avoided
+ for _, m := range base.Methods {
+ if m.Name == name {
+ return // method was added before
+ }
+ }
+ base.Methods = append(base.Methods, &Method{QualifiedName{pkg, name}, sig})
+}
+
+// FuncDecl = "func" ExportedName Func .
+//
+func (p *gcParser) parseFuncDecl() {
+ // "func" already consumed
+ pkg, name := p.parseExportedName()
+ typ := p.parseFunc()
+ declFunc(pkg, name).Type = typ
+}
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *gcParser) parseDecl() {
+ switch p.lit {
+ case "import":
+ p.parseImportDecl()
+ case "const":
+ p.parseConstDecl()
+ case "type":
+ p.parseTypeDecl()
+ case "var":
+ p.parseVarDecl()
+ case "func":
+ p.next() // look ahead
+ if p.tok == '(' {
+ p.parseMethodDecl()
+ } else {
+ p.parseFuncDecl()
+ }
+ }
+ p.expect('\n')
+}
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export = "PackageClause { Decl } "$$" .
+// PackageClause = "package" PackageName [ "safe" ] "\n" .
+//
+func (p *gcParser) parseExport() *Package {
+ p.expectKeyword("package")
+ name := p.parsePackageName()
+ if p.tok != '\n' {
+ // A package is safe if it was compiled with the -u flag,
+ // which disables the unsafe package.
+ // TODO(gri) remember "safe" package
+ p.expectKeyword("safe")
+ }
+ p.expect('\n')
+
+ pkg := p.getPkg(p.id, name)
+
+ for p.tok != '$' && p.tok != scanner.EOF {
+ p.parseDecl()
+ }
+
+ if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+ // don't call next()/expect() since reading past the
+ // export data may cause scanner errors (e.g. NUL chars)
+ p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+ }
+
+ if n := p.scanner.ErrorCount; n != 0 {
+ p.errorf("expected no scanner errors, got %d", n)
+ }
+
+ // package was imported completely and without errors
+ pkg.Complete = true
+
+ return pkg
+}
diff --git a/src/pkg/go/types/gcimporter_test.go b/src/pkg/go/types/gcimporter_test.go
new file mode 100644
index 000000000..b793eb4cb
--- /dev/null
+++ b/src/pkg/go/types/gcimporter_test.go
@@ -0,0 +1,180 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+var gcPath string // Go compiler path
+
+func init() {
+ // determine compiler
+ var gc string
+ switch runtime.GOARCH {
+ case "386":
+ gc = "8g"
+ case "amd64":
+ gc = "6g"
+ case "arm":
+ gc = "5g"
+ default:
+ gcPath = "unknown-GOARCH-compiler"
+ return
+ }
+ gcPath = filepath.Join(build.ToolDir, gc)
+}
+
+func compile(t *testing.T, dirname, filename string) string {
+ cmd := exec.Command(gcPath, filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("%s %s failed: %s", gcPath, filename, err)
+ }
+ archCh, _ := build.ArchChar(runtime.GOARCH)
+ // filename should end with ".go"
+ return filepath.Join(dirname, filename[:len(filename)-2]+archCh)
+}
+
+// Use the same global imports map for all tests. The effect is
+// as if all tested packages were imported into a single package.
+var imports = make(map[string]*Package)
+
+func testPath(t *testing.T, path string) bool {
+ t0 := time.Now()
+ _, err := GcImport(imports, path)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return false
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return true
+}
+
+const maxTime = 30 * time.Second
+
+func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
+ dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
+ list, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ t.Fatalf("testDir(%s): %s", dirname, err)
+ }
+ for _, f := range list {
+ if time.Now().After(endTime) {
+ t.Log("testing time used up")
+ return
+ }
+ switch {
+ case !f.IsDir():
+ // try extensions
+ for _, ext := range pkgExts {
+ if strings.HasSuffix(f.Name(), ext) {
+ name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
+ if testPath(t, filepath.Join(dir, name)) {
+ nimports++
+ }
+ }
+ }
+ case f.IsDir():
+ nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
+ }
+ }
+ return
+}
+
+func TestGcImport(t *testing.T) {
+ // On cross-compile builds, the path will not exist.
+ // Need to use GOHOSTOS, which is not available.
+ if _, err := os.Stat(gcPath); err != nil {
+ t.Skipf("skipping test: %v", err)
+ }
+
+ if outFn := compile(t, "testdata", "exports.go"); outFn != "" {
+ defer os.Remove(outFn)
+ }
+
+ nimports := 0
+ if testPath(t, "./testdata/exports") {
+ nimports++
+ }
+ nimports += testDir(t, "", time.Now().Add(maxTime)) // installed packages
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ kind ast.ObjKind
+ typ string
+}{
+ {"unsafe.Pointer", ast.Typ, "Pointer"},
+ {"math.Pi", ast.Con, "untyped float"},
+ {"io.Reader", ast.Typ, "interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", ast.Typ, "interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}"},
+ {"math.Sin", ast.Fun, "func(x·2 float64) (_ float64)"},
+ // TODO(gri) add more tests
+}
+
+func TestGcImportedTypes(t *testing.T) {
+ // This package does not yet know how to read gccgo export data.
+ if runtime.Compiler == "gccgo" {
+ return
+ }
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := GcImport(imports, importPath)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope.Lookup(objName)
+
+ // TODO(gri) should define an accessor on Object
+ var kind ast.ObjKind
+ var typ Type
+ switch obj := obj.(type) {
+ case *Const:
+ kind = ast.Con
+ typ = obj.Type
+ case *TypeName:
+ kind = ast.Typ
+ typ = obj.Type
+ case *Var:
+ kind = ast.Var
+ typ = obj.Type
+ case *Func:
+ kind = ast.Fun
+ typ = obj.Type
+ default:
+ unreachable()
+ }
+
+ if kind != test.kind {
+ t.Errorf("%s: got kind = %q; want %q", test.name, kind, test.kind)
+ }
+
+ str := typeString(underlying(typ))
+ if str != test.typ {
+ t.Errorf("%s: got type = %q; want %q", test.name, typ, test.typ)
+ }
+ }
+}
diff --git a/src/pkg/go/types/objects.go b/src/pkg/go/types/objects.go
new file mode 100644
index 000000000..02291d34c
--- /dev/null
+++ b/src/pkg/go/types/objects.go
@@ -0,0 +1,186 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+// All objects implement the Object interface.
+//
+type Object interface {
+ GetPkg() *Package
+ GetName() string
+ GetType() Type
+ GetPos() token.Pos
+
+ anObject()
+}
+
+// A Package represents the contents (objects) of a Go package.
+type Package struct {
+ Name string
+ Path string // import path, "" for current (non-imported) package
+ Scope *Scope // package-level scope
+ Imports map[string]*Package // map of import paths to imported packages
+ Complete bool // if set, this package was imported completely
+
+ spec *ast.ImportSpec
+}
+
+// A Const represents a declared constant.
+type Const struct {
+ Pkg *Package
+ Name string
+ Type Type
+ Val interface{}
+
+ spec *ast.ValueSpec
+}
+
+// A TypeName represents a declared type.
+type TypeName struct {
+ Pkg *Package
+ Name string
+ Type Type // *NamedType or *Basic
+
+ spec *ast.TypeSpec
+}
+
+// A Variable represents a declared variable (including function parameters and results).
+type Var struct {
+ Pkg *Package // nil for parameters
+ Name string
+ Type Type
+
+ visited bool // for initialization cycle detection
+ decl interface{}
+}
+
+// A Func represents a declared function.
+type Func struct {
+ Pkg *Package
+ Name string
+ Type Type // *Signature or *Builtin
+
+ decl *ast.FuncDecl
+}
+
+func (obj *Package) GetPkg() *Package { return obj }
+func (obj *Const) GetPkg() *Package { return obj.Pkg }
+func (obj *TypeName) GetPkg() *Package { return obj.Pkg }
+func (obj *Var) GetPkg() *Package { return obj.Pkg }
+func (obj *Func) GetPkg() *Package { return obj.Pkg }
+
+func (obj *Package) GetName() string { return obj.Name }
+func (obj *Const) GetName() string { return obj.Name }
+func (obj *TypeName) GetName() string { return obj.Name }
+func (obj *Var) GetName() string { return obj.Name }
+func (obj *Func) GetName() string { return obj.Name }
+
+func (obj *Package) GetType() Type { return Typ[Invalid] }
+func (obj *Const) GetType() Type { return obj.Type }
+func (obj *TypeName) GetType() Type { return obj.Type }
+func (obj *Var) GetType() Type { return obj.Type }
+func (obj *Func) GetType() Type { return obj.Type }
+
+func (obj *Package) GetPos() token.Pos {
+ if obj.spec != nil {
+ return obj.spec.Pos()
+ }
+ return token.NoPos
+}
+
+func (obj *Const) GetPos() token.Pos {
+ for _, n := range obj.spec.Names {
+ if n.Name == obj.Name {
+ return n.Pos()
+ }
+ }
+ return token.NoPos
+}
+func (obj *TypeName) GetPos() token.Pos {
+ if obj.spec != nil {
+ return obj.spec.Pos()
+ }
+ return token.NoPos
+}
+
+func (obj *Var) GetPos() token.Pos {
+ switch d := obj.decl.(type) {
+ case *ast.Field:
+ for _, n := range d.Names {
+ if n.Name == obj.Name {
+ return n.Pos()
+ }
+ }
+ case *ast.ValueSpec:
+ for _, n := range d.Names {
+ if n.Name == obj.Name {
+ return n.Pos()
+ }
+ }
+ case *ast.AssignStmt:
+ for _, x := range d.Lhs {
+ if ident, isIdent := x.(*ast.Ident); isIdent && ident.Name == obj.Name {
+ return ident.Pos()
+ }
+ }
+ }
+ return token.NoPos
+}
+func (obj *Func) GetPos() token.Pos {
+ if obj.decl != nil && obj.decl.Name != nil {
+ return obj.decl.Name.Pos()
+ }
+ return token.NoPos
+}
+
+func (*Package) anObject() {}
+func (*Const) anObject() {}
+func (*TypeName) anObject() {}
+func (*Var) anObject() {}
+func (*Func) anObject() {}
+
+// newObj returns a new Object for a given *ast.Object.
+// It does not canonicalize them (it always returns a new one).
+// For canonicalization, see check.lookup.
+//
+// TODO(gri) Once we do identifier resolution completely in
+// in the typechecker, this functionality can go.
+//
+func newObj(pkg *Package, astObj *ast.Object) Object {
+ assert(pkg != nil)
+ name := astObj.Name
+ typ, _ := astObj.Type.(Type)
+ switch astObj.Kind {
+ case ast.Bad:
+ // ignore
+ case ast.Pkg:
+ unreachable()
+ case ast.Con:
+ return &Const{Pkg: pkg, Name: name, Type: typ, Val: astObj.Data, spec: astObj.Decl.(*ast.ValueSpec)}
+ case ast.Typ:
+ return &TypeName{Pkg: pkg, Name: name, Type: typ, spec: astObj.Decl.(*ast.TypeSpec)}
+ case ast.Var:
+ switch astObj.Decl.(type) {
+ case *ast.Field: // function parameters
+ case *ast.ValueSpec: // proper variable declarations
+ case *ast.AssignStmt: // short variable declarations
+ default:
+ unreachable() // everything else is not ok
+ }
+ return &Var{Pkg: pkg, Name: name, Type: typ, decl: astObj.Decl}
+ case ast.Fun:
+ return &Func{Pkg: pkg, Name: name, Type: typ, decl: astObj.Decl.(*ast.FuncDecl)}
+ case ast.Lbl:
+ unreachable() // for now
+ }
+ unreachable()
+ return nil
+}
diff --git a/src/pkg/go/types/operand.go b/src/pkg/go/types/operand.go
new file mode 100644
index 000000000..982ffef8d
--- /dev/null
+++ b/src/pkg/go/types/operand.go
@@ -0,0 +1,411 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines operands and associated operations.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+)
+
+// An operandMode specifies the (addressing) mode of an operand.
+type operandMode int
+
+const (
+ invalid operandMode = iota // operand is invalid (due to an earlier error) - ignore
+ novalue // operand represents no value (result of a function call w/o result)
+ typexpr // operand is a type
+ constant // operand is a constant; the operand's typ is a Basic type
+ variable // operand is an addressable variable
+ value // operand is a computed value
+ valueok // like mode == value, but operand may be used in a comma,ok expression
+)
+
+var operandModeString = [...]string{
+ invalid: "invalid",
+ novalue: "no value",
+ typexpr: "type",
+ constant: "constant",
+ variable: "variable",
+ value: "value",
+ valueok: "value,ok",
+}
+
+// An operand represents an intermediate value during type checking.
+// Operands have an (addressing) mode, the expression evaluating to
+// the operand, the operand's type, and for constants a constant value.
+//
+type operand struct {
+ mode operandMode
+ expr ast.Expr
+ typ Type
+ val interface{}
+}
+
+// pos returns the position of the expression corresponding to x.
+// If x is invalid the position is token.NoPos.
+//
+func (x *operand) pos() token.Pos {
+ // x.expr may not be set if x is invalid
+ if x.expr == nil {
+ return token.NoPos
+ }
+ return x.expr.Pos()
+}
+
+func (x *operand) String() string {
+ if x.mode == invalid {
+ return "invalid operand"
+ }
+ var buf bytes.Buffer
+ if x.expr != nil {
+ buf.WriteString(exprString(x.expr))
+ buf.WriteString(" (")
+ }
+ buf.WriteString(operandModeString[x.mode])
+ if x.mode == constant {
+ format := " %v"
+ if isString(x.typ) {
+ format = " %q"
+ }
+ fmt.Fprintf(&buf, format, x.val)
+ }
+ if x.mode != novalue && (x.mode != constant || !isUntyped(x.typ)) {
+ fmt.Fprintf(&buf, " of type %s", typeString(x.typ))
+ }
+ if x.expr != nil {
+ buf.WriteByte(')')
+ }
+ return buf.String()
+}
+
+// setConst sets x to the untyped constant for literal lit.
+func (x *operand) setConst(tok token.Token, lit string) {
+ x.mode = invalid
+
+ var kind BasicKind
+ var val interface{}
+ switch tok {
+ case token.INT:
+ kind = UntypedInt
+ val = makeIntConst(lit)
+
+ case token.FLOAT:
+ kind = UntypedFloat
+ val = makeFloatConst(lit)
+
+ case token.IMAG:
+ kind = UntypedComplex
+ val = makeComplexConst(lit)
+
+ case token.CHAR:
+ kind = UntypedRune
+ val = makeRuneConst(lit)
+
+ case token.STRING:
+ kind = UntypedString
+ val = makeStringConst(lit)
+ }
+
+ if val != nil {
+ x.mode = constant
+ x.typ = Typ[kind]
+ x.val = val
+ }
+}
+
+// isNil reports whether x is the predeclared nil constant.
+func (x *operand) isNil() bool {
+ return x.mode == constant && x.val == nilConst
+}
+
+// TODO(gri) The functions operand.isAssignable, checker.convertUntyped,
+// checker.isRepresentable, and checker.assignOperand are
+// overlapping in functionality. Need to simplify and clean up.
+
+// isAssignable reports whether x is assignable to a variable of type T.
+func (x *operand) isAssignable(ctxt *Context, T Type) bool {
+ if x.mode == invalid || T == Typ[Invalid] {
+ return true // avoid spurious errors
+ }
+
+ V := x.typ
+
+ // x's type is identical to T
+ if IsIdentical(V, T) {
+ return true
+ }
+
+ Vu := underlying(V)
+ Tu := underlying(T)
+
+ // x's type V and T have identical underlying types
+ // and at least one of V or T is not a named type
+ if IsIdentical(Vu, Tu) {
+ return !isNamed(V) || !isNamed(T)
+ }
+
+ // T is an interface type and x implements T
+ if Ti, ok := Tu.(*Interface); ok {
+ if m, _ := missingMethod(x.typ, Ti); m == nil {
+ return true
+ }
+ }
+
+ // x is a bidirectional channel value, T is a channel
+ // type, x's type V and T have identical element types,
+ // and at least one of V or T is not a named type
+ if Vc, ok := Vu.(*Chan); ok && Vc.Dir == ast.SEND|ast.RECV {
+ if Tc, ok := Tu.(*Chan); ok && IsIdentical(Vc.Elt, Tc.Elt) {
+ return !isNamed(V) || !isNamed(T)
+ }
+ }
+
+ // x is the predeclared identifier nil and T is a pointer,
+ // function, slice, map, channel, or interface type
+ if x.isNil() {
+ switch t := Tu.(type) {
+ case *Basic:
+ if t.Kind == UnsafePointer {
+ return true
+ }
+ case *Pointer, *Signature, *Slice, *Map, *Chan, *Interface:
+ return true
+ }
+ return false
+ }
+
+ // x is an untyped constant representable by a value of type T
+ // TODO(gri) This is borrowing from checker.convertUntyped and
+ // checker.isRepresentable. Need to clean up.
+ if isUntyped(Vu) {
+ switch t := Tu.(type) {
+ case *Basic:
+ if x.mode == constant {
+ return isRepresentableConst(x.val, ctxt, t.Kind)
+ }
+ // The result of a comparison is an untyped boolean,
+ // but may not be a constant.
+ if Vb, _ := Vu.(*Basic); Vb != nil {
+ return Vb.Kind == UntypedBool && isBoolean(Tu)
+ }
+ case *Interface:
+ return x.isNil() || len(t.Methods) == 0
+ case *Pointer, *Signature, *Slice, *Map, *Chan:
+ return x.isNil()
+ }
+ }
+
+ return false
+}
+
+// isInteger reports whether x is a (typed or untyped) integer value.
+func (x *operand) isInteger(ctxt *Context) bool {
+ return x.mode == invalid ||
+ isInteger(x.typ) ||
+ x.mode == constant && isRepresentableConst(x.val, ctxt, UntypedInt)
+}
+
+// lookupResult represents the result of a struct field/method lookup.
+type lookupResult struct {
+ mode operandMode
+ typ Type
+ index []int // field index sequence; nil for methods
+}
+
+type embeddedType struct {
+ typ *NamedType
+ index []int // field index sequence
+ multiples bool // if set, typ is embedded multiple times at the same level
+}
+
+// lookupFieldBreadthFirst searches all types in list for a single entry (field
+// or method) of the given name from the given package. If such a field is found,
+// the result describes the field mode and type; otherwise the result mode is invalid.
+// (This function is similar in structure to FieldByNameFunc in reflect/type.go)
+//
+func lookupFieldBreadthFirst(list []embeddedType, name QualifiedName) (res lookupResult) {
+ // visited records the types that have been searched already.
+ visited := make(map[*NamedType]bool)
+
+ // embedded types of the next lower level
+ var next []embeddedType
+
+ // potentialMatch is invoked every time a match is found.
+ potentialMatch := func(multiples bool, mode operandMode, typ Type) bool {
+ if multiples || res.mode != invalid {
+ // name appeared already at this level - annihilate
+ res.mode = invalid
+ return false
+ }
+ // first appearance of name
+ res.mode = mode
+ res.typ = typ
+ res.index = nil
+ return true
+ }
+
+ // Search the current level if there is any work to do and collect
+ // embedded types of the next lower level in the next list.
+ for len(list) > 0 {
+ // The res.mode indicates whether we have found a match already
+ // on this level (mode != invalid), or not (mode == invalid).
+ assert(res.mode == invalid)
+
+ // start with empty next list (don't waste underlying array)
+ next = next[:0]
+
+ // look for name in all types at this level
+ for _, e := range list {
+ typ := e.typ
+ if visited[typ] {
+ continue
+ }
+ visited[typ] = true
+
+ // look for a matching attached method
+ for _, m := range typ.Methods {
+ if name.IsSame(m.QualifiedName) {
+ assert(m.Type != nil)
+ if !potentialMatch(e.multiples, value, m.Type) {
+ return // name collision
+ }
+ }
+ }
+
+ switch t := typ.Underlying.(type) {
+ case *Struct:
+ // look for a matching field and collect embedded types
+ for i, f := range t.Fields {
+ if name.IsSame(f.QualifiedName) {
+ assert(f.Type != nil)
+ if !potentialMatch(e.multiples, variable, f.Type) {
+ return // name collision
+ }
+ var index []int
+ index = append(index, e.index...) // copy e.index
+ index = append(index, i)
+ res.index = index
+ continue
+ }
+ // Collect embedded struct fields for searching the next
+ // lower level, but only if we have not seen a match yet
+ // (if we have a match it is either the desired field or
+ // we have a name collision on the same level; in either
+ // case we don't need to look further).
+ // Embedded fields are always of the form T or *T where
+ // T is a named type. If typ appeared multiple times at
+ // this level, f.Type appears multiple times at the next
+ // level.
+ if f.IsAnonymous && res.mode == invalid {
+ // Ignore embedded basic types - only user-defined
+ // named types can have methods or have struct fields.
+ if t, _ := deref(f.Type).(*NamedType); t != nil {
+ var index []int
+ index = append(index, e.index...) // copy e.index
+ index = append(index, i)
+ next = append(next, embeddedType{t, index, e.multiples})
+ }
+ }
+ }
+
+ case *Interface:
+ // look for a matching method
+ for _, m := range t.Methods {
+ if name.IsSame(m.QualifiedName) {
+ assert(m.Type != nil)
+ if !potentialMatch(e.multiples, value, m.Type) {
+ return // name collision
+ }
+ }
+ }
+ }
+ }
+
+ if res.mode != invalid {
+ // we found a single match on this level
+ return
+ }
+
+ // No match and no collision so far.
+ // Compute the list to search for the next level.
+ list = list[:0] // don't waste underlying array
+ for _, e := range next {
+ // Instead of adding the same type multiple times, look for
+ // it in the list and mark it as multiple if it was added
+ // before.
+ // We use a sequential search (instead of a map for next)
+ // because the lists tend to be small, can easily be reused,
+ // and explicit search appears to be faster in this case.
+ if alt := findType(list, e.typ); alt != nil {
+ alt.multiples = true
+ } else {
+ list = append(list, e)
+ }
+ }
+
+ }
+
+ return
+}
+
+func findType(list []embeddedType, typ *NamedType) *embeddedType {
+ for i := range list {
+ if p := &list[i]; p.typ == typ {
+ return p
+ }
+ }
+ return nil
+}
+
+func lookupField(typ Type, name QualifiedName) lookupResult {
+ typ = deref(typ)
+
+ if t, ok := typ.(*NamedType); ok {
+ for _, m := range t.Methods {
+ if name.IsSame(m.QualifiedName) {
+ assert(m.Type != nil)
+ return lookupResult{value, m.Type, nil}
+ }
+ }
+ typ = t.Underlying
+ }
+
+ switch t := typ.(type) {
+ case *Struct:
+ var next []embeddedType
+ for i, f := range t.Fields {
+ if name.IsSame(f.QualifiedName) {
+ return lookupResult{variable, f.Type, []int{i}}
+ }
+ if f.IsAnonymous {
+ // Possible optimization: If the embedded type
+ // is a pointer to the current type we could
+ // ignore it.
+ // Ignore embedded basic types - only user-defined
+ // named types can have methods or have struct fields.
+ if t, _ := deref(f.Type).(*NamedType); t != nil {
+ next = append(next, embeddedType{t, []int{i}, false})
+ }
+ }
+ }
+ if len(next) > 0 {
+ return lookupFieldBreadthFirst(next, name)
+ }
+
+ case *Interface:
+ for _, m := range t.Methods {
+ if name.IsSame(m.QualifiedName) {
+ return lookupResult{value, m.Type, nil}
+ }
+ }
+ }
+
+ // not found
+ return lookupResult{mode: invalid}
+}
diff --git a/src/pkg/go/types/predicates.go b/src/pkg/go/types/predicates.go
new file mode 100644
index 000000000..a99c91a4e
--- /dev/null
+++ b/src/pkg/go/types/predicates.go
@@ -0,0 +1,303 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements commonly used type predicates.
+
+package types
+
+func isNamed(typ Type) bool {
+ if _, ok := typ.(*Basic); ok {
+ return ok
+ }
+ _, ok := typ.(*NamedType)
+ return ok
+}
+
+func isBoolean(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsBoolean != 0
+}
+
+func isInteger(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsInteger != 0
+}
+
+func isUnsigned(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsUnsigned != 0
+}
+
+func isFloat(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsFloat != 0
+}
+
+func isComplex(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsComplex != 0
+}
+
+func isNumeric(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsNumeric != 0
+}
+
+func isString(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsString != 0
+}
+
+func isUntyped(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsUntyped != 0
+}
+
+func isOrdered(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsOrdered != 0
+}
+
+func isConstType(typ Type) bool {
+ t, ok := underlying(typ).(*Basic)
+ return ok && t.Info&IsConstType != 0
+}
+
+func isComparable(typ Type) bool {
+ switch t := underlying(typ).(type) {
+ case *Basic:
+ return t.Kind != Invalid && t.Kind != UntypedNil
+ case *Pointer, *Interface, *Chan:
+ // assumes types are equal for pointers and channels
+ return true
+ case *Struct:
+ for _, f := range t.Fields {
+ if !isComparable(f.Type) {
+ return false
+ }
+ }
+ return true
+ case *Array:
+ return isComparable(t.Elt)
+ }
+ return false
+}
+
+func hasNil(typ Type) bool {
+ switch underlying(typ).(type) {
+ case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
+ return true
+ }
+ return false
+}
+
+// IsIdentical returns true if x and y are identical.
+func IsIdentical(x, y Type) bool {
+ if x == y {
+ return true
+ }
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above.
+ if y, ok := y.(*Basic); ok {
+ return x.Kind == y.Kind
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ return x.Len == y.Len && IsIdentical(x.Elt, y.Elt)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return IsIdentical(x.Elt, y.Elt)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two anonymous fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if len(x.Fields) == len(y.Fields) {
+ for i, f := range x.Fields {
+ g := y.Fields[i]
+ if !f.QualifiedName.IsSame(g.QualifiedName) ||
+ !IsIdentical(f.Type, g.Type) ||
+ f.Tag != g.Tag ||
+ f.IsAnonymous != g.IsAnonymous {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return IsIdentical(x.Base, y.Base)
+ }
+
+ case *Signature:
+ // Two function types are identical if they have the same number of parameters
+ // and result values, corresponding parameter and result types are identical,
+ // and either both functions are variadic or neither is. Parameter and result
+ // names are not required to match.
+ if y, ok := y.(*Signature); ok {
+ return identicalTypes(x.Params, y.Params) &&
+ identicalTypes(x.Results, y.Results) &&
+ x.IsVariadic == y.IsVariadic
+ }
+
+ case *Interface:
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types. Lower-case method names from
+ // different packages are always different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ return identicalMethods(x.Methods, y.Methods) // methods are sorted
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return IsIdentical(x.Key, y.Key) && IsIdentical(x.Elt, y.Elt)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types
+ // and the same direction.
+ if y, ok := y.(*Chan); ok {
+ return x.Dir == y.Dir && IsIdentical(x.Elt, y.Elt)
+ }
+
+ case *NamedType:
+ // Two named types are identical if their type names originate
+ // in the same type declaration.
+ if y, ok := y.(*NamedType); ok {
+ return x.Obj == y.Obj
+ }
+ }
+
+ return false
+}
+
+// identicalTypes returns true if both lists a and b have the
+// same length and corresponding objects have identical types.
+func identicalTypes(a, b []*Var) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ y := b[i]
+ if !IsIdentical(x.Type, y.Type) {
+ return false
+ }
+ }
+ return true
+}
+
+// identicalMethods returns true if both lists a and b have the
+// same length and corresponding methods have identical types.
+// TODO(gri) make this more efficient
+func identicalMethods(a, b []*Method) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ m := make(map[QualifiedName]*Method)
+ for _, x := range a {
+ assert(m[x.QualifiedName] == nil) // method list must not have duplicate entries
+ m[x.QualifiedName] = x
+ }
+ for _, y := range b {
+ if x := m[y.QualifiedName]; x == nil || !IsIdentical(x.Type, y.Type) {
+ return false
+ }
+ }
+ return true
+}
+
+// underlying returns the underlying type of typ.
+func underlying(typ Type) Type {
+ // Basic types are representing themselves directly even though they are named.
+ if typ, ok := typ.(*NamedType); ok {
+ return typ.Underlying // underlying types are never NamedTypes
+ }
+ return typ
+}
+
+// deref returns a pointer's base type; otherwise it returns typ.
+func deref(typ Type) Type {
+ if typ, ok := underlying(typ).(*Pointer); ok {
+ return typ.Base
+ }
+ return typ
+}
+
+// defaultType returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. If there is no
+// corresponding untyped type, the result is Typ[Invalid].
+//
+func defaultType(typ Type) Type {
+ if t, ok := typ.(*Basic); ok {
+ k := Invalid
+ switch t.Kind {
+ // case UntypedNil:
+ // There is no default type for nil. For a good error message,
+ // catch this case before calling this function.
+ case UntypedBool:
+ k = Bool
+ case UntypedInt:
+ k = Int
+ case UntypedRune:
+ k = Rune
+ case UntypedFloat:
+ k = Float64
+ case UntypedComplex:
+ k = Complex128
+ case UntypedString:
+ k = String
+ }
+ typ = Typ[k]
+ }
+ return typ
+}
+
+// missingMethod returns (nil, false) if typ implements T, otherwise
+// it returns the first missing method required by T and whether it
+// is missing or simply has the wrong type.
+//
+func missingMethod(typ Type, T *Interface) (method *Method, wrongType bool) {
+ // TODO(gri): this needs to correctly compare method names (taking package into account)
+ // TODO(gri): distinguish pointer and non-pointer receivers
+ // an interface type implements T if it has no methods with conflicting signatures
+ // Note: This is stronger than the current spec. Should the spec require this?
+ if ityp, _ := underlying(typ).(*Interface); ityp != nil {
+ for _, m := range T.Methods {
+ res := lookupField(ityp, m.QualifiedName) // TODO(gri) no need to go via lookupField
+ if res.mode != invalid && !IsIdentical(res.typ, m.Type) {
+ return m, true
+ }
+ }
+ return
+ }
+
+ // a concrete type implements T if it implements all methods of T.
+ for _, m := range T.Methods {
+ res := lookupField(typ, m.QualifiedName)
+ if res.mode == invalid {
+ return m, false
+ }
+ if !IsIdentical(res.typ, m.Type) {
+ return m, true
+ }
+ }
+ return
+}
diff --git a/src/pkg/go/types/resolve.go b/src/pkg/go/types/resolve.go
new file mode 100644
index 000000000..43db60708
--- /dev/null
+++ b/src/pkg/go/types/resolve.go
@@ -0,0 +1,197 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strconv"
+)
+
+func (check *checker) declareObj(scope, altScope *Scope, obj Object, dotImport token.Pos) {
+ alt := scope.Insert(obj)
+ if alt == nil && altScope != nil {
+ // see if there is a conflicting declaration in altScope
+ alt = altScope.Lookup(obj.GetName())
+ }
+ if alt != nil {
+ prevDecl := ""
+
+ // for dot-imports, local declarations are declared first - swap messages
+ if dotImport.IsValid() {
+ if pos := alt.GetPos(); pos.IsValid() {
+ check.errorf(pos, fmt.Sprintf("%s redeclared in this block by dot-import at %s",
+ obj.GetName(), check.fset.Position(dotImport)))
+ return
+ }
+
+ // get by w/o other position
+ check.errorf(dotImport, fmt.Sprintf("dot-import redeclares %s", obj.GetName()))
+ return
+ }
+
+ if pos := alt.GetPos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tother declaration at %s", check.fset.Position(pos))
+ }
+ check.errorf(obj.GetPos(), fmt.Sprintf("%s redeclared in this block%s", obj.GetName(), prevDecl))
+ }
+}
+
+func (check *checker) resolveIdent(scope *Scope, ident *ast.Ident) bool {
+ for ; scope != nil; scope = scope.Outer {
+ if obj := scope.Lookup(ident.Name); obj != nil {
+ check.register(ident, obj)
+ return true
+ }
+ }
+ return false
+}
+
+func (check *checker) resolve(importer Importer) (methods []*ast.FuncDecl) {
+ pkg := &Package{Scope: &Scope{Outer: Universe}, Imports: make(map[string]*Package)}
+ check.pkg = pkg
+
+ // complete package scope
+ i := 0
+ for _, file := range check.files {
+ // package names must match
+ switch name := file.Name.Name; {
+ case pkg.Name == "":
+ pkg.Name = name
+ case name != pkg.Name:
+ check.errorf(file.Package, "package %s; expected %s", name, pkg.Name)
+ continue // ignore this file
+ }
+
+ // keep this file
+ check.files[i] = file
+ i++
+
+ // the package identifier denotes the current package
+ check.register(file.Name, pkg)
+
+ // insert top-level file objects in package scope
+ // (the parser took care of declaration errors)
+ for _, decl := range file.Decls {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ // ignore
+ case *ast.GenDecl:
+ if d.Tok == token.CONST {
+ check.assocInitvals(d)
+ }
+ for _, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // handled separately below
+ case *ast.ValueSpec:
+ for _, name := range s.Names {
+ if name.Name == "_" {
+ continue
+ }
+ pkg.Scope.Insert(check.lookup(name))
+ }
+ case *ast.TypeSpec:
+ if s.Name.Name == "_" {
+ continue
+ }
+ pkg.Scope.Insert(check.lookup(s.Name))
+ default:
+ check.invalidAST(s.Pos(), "unknown ast.Spec node %T", s)
+ }
+ }
+ case *ast.FuncDecl:
+ if d.Recv != nil {
+ // collect method
+ methods = append(methods, d)
+ continue
+ }
+ if d.Name.Name == "_" || d.Name.Name == "init" {
+ continue // blank (_) and init functions are inaccessible
+ }
+ pkg.Scope.Insert(check.lookup(d.Name))
+ default:
+ check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d)
+ }
+ }
+ }
+ check.files = check.files[0:i]
+
+ // complete file scopes with imports and resolve identifiers
+ for _, file := range check.files {
+ // build file scope by processing all imports
+ importErrors := false
+ fileScope := &Scope{Outer: pkg.Scope}
+ for _, spec := range file.Imports {
+ if importer == nil {
+ importErrors = true
+ continue
+ }
+ path, _ := strconv.Unquote(spec.Path.Value)
+ imp, err := importer(pkg.Imports, path)
+ if err != nil {
+ check.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+ importErrors = true
+ continue
+ }
+ // TODO(gri) If a local package name != "." is provided,
+ // global identifier resolution could proceed even if the
+ // import failed. Consider adjusting the logic here a bit.
+
+ // local name overrides imported package name
+ name := imp.Name
+ if spec.Name != nil {
+ name = spec.Name.Name
+ }
+
+ // add import to file scope
+ if name == "." {
+ // merge imported scope with file scope
+ for _, obj := range imp.Scope.Entries {
+ // gcimported package scopes contain non-exported
+ // objects such as types used in partially exported
+ // objects - do not accept them
+ if ast.IsExported(obj.GetName()) {
+ check.declareObj(fileScope, pkg.Scope, obj, spec.Pos())
+ }
+ }
+ // TODO(gri) consider registering the "." identifier
+ // if we have Context.Ident callbacks for say blank
+ // (_) identifiers
+ // check.register(spec.Name, pkg)
+ } else if name != "_" {
+ // declare imported package object in file scope
+ // (do not re-use imp in the file scope but create
+ // a new object instead; the Decl field is different
+ // for different files)
+ obj := &Package{Name: name, Scope: imp.Scope, spec: spec}
+ check.declareObj(fileScope, pkg.Scope, obj, token.NoPos)
+ }
+ }
+
+ // resolve identifiers
+ if importErrors {
+ // don't use the universe scope without correct imports
+ // (objects in the universe may be shadowed by imports;
+ // with missing imports, identifiers might get resolved
+ // incorrectly to universe objects)
+ pkg.Scope.Outer = nil
+ }
+ i := 0
+ for _, ident := range file.Unresolved {
+ if !check.resolveIdent(fileScope, ident) {
+ check.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+ file.Unresolved[i] = ident
+ i++
+ }
+
+ }
+ file.Unresolved = file.Unresolved[0:i]
+ pkg.Scope.Outer = Universe // reset outer scope (is nil if there were importErrors)
+ }
+
+ return
+}
diff --git a/src/pkg/go/types/resolver_test.go b/src/pkg/go/types/resolver_test.go
new file mode 100644
index 000000000..d4e364451
--- /dev/null
+++ b/src/pkg/go/types/resolver_test.go
@@ -0,0 +1,167 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "testing"
+)
+
+var sources = []string{
+ `
+ package p
+ import "fmt"
+ import "math"
+ const pi = math.Pi
+ func sin(x float64) float64 {
+ return math.Sin(x)
+ }
+ var Println = fmt.Println
+ `,
+ `
+ package p
+ import "fmt"
+ func f() string {
+ _ = "foo"
+ return fmt.Sprintf("%d", g())
+ }
+ func g() (x int) { return }
+ `,
+ `
+ package p
+ import . "go/parser"
+ import "sync"
+ func g() Mode { return ImportsOnly }
+ var _, x int = 1, 2
+ func init() {}
+ type T struct{ sync.Mutex; a, b, c int}
+ type I interface{ m() }
+ var _ = T{a: 1, b: 2, c: 3}
+ func (_ T) m() {}
+ `,
+}
+
+var pkgnames = []string{
+ "fmt",
+ "math",
+}
+
+func TestResolveQualifiedIdents(t *testing.T) {
+ // parse package files
+ fset := token.NewFileSet()
+ var files []*ast.File
+ for _, src := range sources {
+ f, err := parser.ParseFile(fset, "", src, parser.DeclarationErrors)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files = append(files, f)
+ }
+
+ // resolve and type-check package AST
+ idents := make(map[*ast.Ident]Object)
+ var ctxt Context
+ ctxt.Ident = func(id *ast.Ident, obj Object) { idents[id] = obj }
+ pkg, err := ctxt.Check(fset, files)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check that all packages were imported
+ for _, name := range pkgnames {
+ if pkg.Imports[name] == nil {
+ t.Errorf("package %s not imported", name)
+ }
+ }
+
+ // check that there are no top-level unresolved identifiers
+ for _, f := range files {
+ for _, x := range f.Unresolved {
+ t.Errorf("%s: unresolved global identifier %s", fset.Position(x.Pos()), x.Name)
+ }
+ }
+
+ // check that qualified identifiers are resolved
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ if s, ok := n.(*ast.SelectorExpr); ok {
+ if x, ok := s.X.(*ast.Ident); ok {
+ obj := idents[x]
+ if obj == nil {
+ t.Errorf("%s: unresolved qualified identifier %s", fset.Position(x.Pos()), x.Name)
+ return false
+ }
+ if _, ok := obj.(*Package); ok && idents[s.Sel] == nil {
+ t.Errorf("%s: unresolved selector %s", fset.Position(s.Sel.Pos()), s.Sel.Name)
+ return false
+ }
+ return false
+ }
+ return false
+ }
+ return true
+ })
+ }
+
+ // Currently, the Check API doesn't call Ident for fields, methods, and composite literal keys.
+ // Introduce them artifically so that we can run the check below.
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ switch x := n.(type) {
+ case *ast.StructType:
+ for _, list := range x.Fields.List {
+ for _, f := range list.Names {
+ assert(idents[f] == nil)
+ idents[f] = &Var{Pkg: pkg, Name: f.Name}
+ }
+ }
+ case *ast.InterfaceType:
+ for _, list := range x.Methods.List {
+ for _, f := range list.Names {
+ assert(idents[f] == nil)
+ idents[f] = &Func{Pkg: pkg, Name: f.Name}
+ }
+ }
+ case *ast.CompositeLit:
+ for _, e := range x.Elts {
+ if kv, ok := e.(*ast.KeyValueExpr); ok {
+ if k, ok := kv.Key.(*ast.Ident); ok {
+ assert(idents[k] == nil)
+ idents[k] = &Var{Pkg: pkg, Name: k.Name}
+ }
+ }
+ }
+ }
+ return true
+ })
+ }
+
+ // check that each identifier in the source is enumerated by the Context.Ident callback
+ for _, f := range files {
+ ast.Inspect(f, func(n ast.Node) bool {
+ if x, ok := n.(*ast.Ident); ok && x.Name != "_" && x.Name != "." {
+ obj := idents[x]
+ if obj == nil {
+ t.Errorf("%s: unresolved identifier %s", fset.Position(x.Pos()), x.Name)
+ } else {
+ delete(idents, x)
+ }
+ return false
+ }
+ return true
+ })
+ }
+
+ // TODO(gri) enable code below
+ // At the moment, the type checker introduces artifical identifiers which are not
+ // present in the source. Once it doesn't do that anymore, enable the checks below.
+ /*
+ for x := range idents {
+ t.Errorf("%s: identifier %s not present in source", fset.Position(x.Pos()), x.Name)
+ }
+ */
+}
diff --git a/src/pkg/go/types/scope.go b/src/pkg/go/types/scope.go
new file mode 100644
index 000000000..463ee40c5
--- /dev/null
+++ b/src/pkg/go/types/scope.go
@@ -0,0 +1,78 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// A Scope maintains the set of named language entities declared
+// in the scope and a link to the immediately surrounding (outer)
+// scope.
+//
+type Scope struct {
+ Outer *Scope
+ Entries []Object // scope entries in insertion order
+ large map[string]Object // for fast lookup - only used for larger scopes
+}
+
+// Lookup returns the object with the given name if it is
+// found in scope s, otherwise it returns nil. Outer scopes
+// are ignored.
+//
+func (s *Scope) Lookup(name string) Object {
+ if s.large != nil {
+ return s.large[name]
+ }
+ for _, obj := range s.Entries {
+ if obj.GetName() == name {
+ return obj
+ }
+ }
+ return nil
+}
+
+// Insert attempts to insert an object obj into scope s.
+// If s already contains an object with the same name,
+// Insert leaves s unchanged and returns that object.
+// Otherwise it inserts obj and returns nil.
+//
+func (s *Scope) Insert(obj Object) Object {
+ name := obj.GetName()
+ if alt := s.Lookup(name); alt != nil {
+ return alt
+ }
+ s.Entries = append(s.Entries, obj)
+
+ // If the scope size reaches a threshold, use a map for faster lookups.
+ const threshold = 20
+ if len(s.Entries) > threshold {
+ if s.large == nil {
+ m := make(map[string]Object, len(s.Entries))
+ for _, obj := range s.Entries {
+ m[obj.GetName()] = obj
+ }
+ s.large = m
+ }
+ s.large[name] = obj
+ }
+
+ return nil
+}
+
+// Debugging support
+func (s *Scope) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "scope %p {", s)
+ if s != nil && len(s.Entries) > 0 {
+ fmt.Fprintln(&buf)
+ for _, obj := range s.Entries {
+ fmt.Fprintf(&buf, "\t%s\t%T\n", obj.GetName(), obj)
+ }
+ }
+ fmt.Fprintf(&buf, "}\n")
+ return buf.String()
+}
diff --git a/src/pkg/go/types/sizes.go b/src/pkg/go/types/sizes.go
new file mode 100644
index 000000000..ef6499ba4
--- /dev/null
+++ b/src/pkg/go/types/sizes.go
@@ -0,0 +1,162 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements support for (unsafe) Alignof, Offsetof, and Sizeof.
+
+package types
+
+func (ctxt *Context) alignof(typ Type) int64 {
+ if f := ctxt.Alignof; f != nil {
+ if a := f(typ); a >= 1 {
+ return a
+ }
+ panic("Context.Alignof returned an alignment < 1")
+ }
+ return DefaultAlignof(typ)
+}
+
+func (ctxt *Context) offsetsof(s *Struct) []int64 {
+ offsets := s.offsets
+ if offsets == nil {
+ // compute offsets on demand
+ if f := ctxt.Offsetsof; f != nil {
+ offsets = f(s.Fields)
+ // sanity checks
+ if len(offsets) != len(s.Fields) {
+ panic("Context.Offsetsof returned the wrong number of offsets")
+ }
+ for _, o := range offsets {
+ if o < 0 {
+ panic("Context.Offsetsof returned an offset < 0")
+ }
+ }
+ } else {
+ offsets = DefaultOffsetsof(s.Fields)
+ }
+ s.offsets = offsets
+ }
+ return offsets
+}
+
+// offsetof returns the offset of the field specified via
+// the index sequence relative to typ. It returns a value
+// < 0 if the field is in an embedded pointer type.
+func (ctxt *Context) offsetof(typ Type, index []int) int64 {
+ var o int64
+ for _, i := range index {
+ s, _ := underlying(typ).(*Struct)
+ if s == nil {
+ return -1
+ }
+ o += ctxt.offsetsof(s)[i]
+ typ = s.Fields[i].Type
+ }
+ return o
+}
+
+func (ctxt *Context) sizeof(typ Type) int64 {
+ if f := ctxt.Sizeof; f != nil {
+ if s := f(typ); s >= 0 {
+ return s
+ }
+ panic("Context.Sizeof returned a size < 0")
+ }
+ return DefaultSizeof(typ)
+}
+
+// DefaultMaxAlign is the default maximum alignment, in bytes,
+// used by DefaultAlignof.
+const DefaultMaxAlign = 8
+
+// DefaultAlignof implements the default alignment computation
+// for unsafe.Alignof. It is used if Context.Alignof == nil.
+func DefaultAlignof(typ Type) int64 {
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := underlying(typ).(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return DefaultAlignof(t.Elt)
+ case *Struct:
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.Fields {
+ if a := DefaultAlignof(f.Type); a > max {
+ max = a
+ }
+ }
+ return max
+ }
+ a := DefaultSizeof(typ) // may be 0
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ if a > DefaultMaxAlign {
+ return DefaultMaxAlign
+ }
+ return a
+}
+
+// align returns the smallest y >= x such that y % a == 0.
+func align(x, a int64) int64 {
+ y := x + a - 1
+ return y - y%a
+}
+
+// DefaultOffsetsof implements the default field offset computation
+// for unsafe.Offsetof. It is used if Context.Offsetsof == nil.
+func DefaultOffsetsof(fields []*Field) []int64 {
+ offsets := make([]int64, len(fields))
+ var o int64
+ for i, f := range fields {
+ a := DefaultAlignof(f.Type)
+ o = align(o, a)
+ offsets[i] = o
+ o += DefaultSizeof(f.Type)
+ }
+ return offsets
+}
+
+// DefaultPtrSize is the default size of ints, uint, and pointers, in bytes,
+// used by DefaultSizeof.
+const DefaultPtrSize = 8
+
+// DefaultSizeof implements the default size computation
+// for unsafe.Sizeof. It is used if Context.Sizeof == nil.
+func DefaultSizeof(typ Type) int64 {
+ switch t := underlying(typ).(type) {
+ case *Basic:
+ if s := t.size; s > 0 {
+ return s
+ }
+ if t.Kind == String {
+ return DefaultPtrSize * 2
+ }
+ case *Array:
+ a := DefaultAlignof(t.Elt)
+ s := DefaultSizeof(t.Elt)
+ return align(s, a) * t.Len // may be 0
+ case *Slice:
+ return DefaultPtrSize * 3
+ case *Struct:
+ n := len(t.Fields)
+ if n == 0 {
+ return 0
+ }
+ offsets := t.offsets
+ if t.offsets == nil {
+ // compute offsets on demand
+ offsets = DefaultOffsetsof(t.Fields)
+ t.offsets = offsets
+ }
+ return offsets[n-1] + DefaultSizeof(t.Fields[n-1].Type)
+ case *Signature:
+ return DefaultPtrSize * 2
+ }
+ return DefaultPtrSize // catch-all
+}
diff --git a/src/pkg/go/types/stmt.go b/src/pkg/go/types/stmt.go
new file mode 100644
index 000000000..53c46a167
--- /dev/null
+++ b/src/pkg/go/types/stmt.go
@@ -0,0 +1,743 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of statements.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// assigment reports whether x can be assigned to a variable of type 'to',
+// if necessary by attempting to convert untyped values to the appropriate
+// type. If x.mode == invalid upon return, then assignment has already
+// issued an error message and the caller doesn't have to report another.
+// TODO(gri) This latter behavior is for historic reasons and complicates
+// callers. Needs to be cleaned up.
+func (check *checker) assignment(x *operand, to Type) bool {
+ if x.mode == invalid {
+ return false
+ }
+
+ if t, ok := x.typ.(*Result); ok {
+ // TODO(gri) elsewhere we use "assignment count mismatch" (consolidate)
+ check.errorf(x.pos(), "%d-valued expression %s used as single value", len(t.Values), x)
+ x.mode = invalid
+ return false
+ }
+
+ check.convertUntyped(x, to)
+
+ return x.mode != invalid && x.isAssignable(check.ctxt, to)
+}
+
+// assign1to1 typechecks a single assignment of the form lhs = rhs (if rhs != nil),
+// or lhs = x (if rhs == nil). If decl is set, the lhs operand must be an identifier.
+// If its type is not set, it is deduced from the type or value of x. If lhs has a
+// type it is used as a hint when evaluating rhs, if present.
+//
+func (check *checker) assign1to1(lhs, rhs ast.Expr, x *operand, decl bool, iota int) {
+ ident, _ := lhs.(*ast.Ident)
+ if x == nil {
+ assert(rhs != nil)
+ x = new(operand)
+ }
+
+ if ident != nil && ident.Name == "_" {
+ // anything can be assigned to a blank identifier - check rhs only, if present
+ if rhs != nil {
+ check.expr(x, rhs, nil, iota)
+ }
+ return
+ }
+
+ if !decl {
+ // regular assignment - start with lhs to obtain a type hint
+ // TODO(gri) clean this up - we don't need type hints anymore
+ var z operand
+ check.expr(&z, lhs, nil, -1)
+ if z.mode == invalid {
+ z.typ = nil // so we can proceed with rhs
+ }
+
+ if rhs != nil {
+ check.expr(x, rhs, z.typ, -1)
+ if x.mode == invalid {
+ return
+ }
+ }
+
+ if x.mode == invalid || z.mode == invalid {
+ return
+ }
+
+ if !check.assignment(x, z.typ) {
+ if x.mode != invalid {
+ check.errorf(x.pos(), "cannot assign %s to %s", x, &z)
+ }
+ return
+ }
+ if z.mode == constant {
+ check.errorf(x.pos(), "cannot assign %s to %s", x, &z)
+ }
+ return
+ }
+
+ // declaration - lhs must be an identifier
+ if ident == nil {
+ check.errorf(lhs.Pos(), "cannot declare %s", lhs)
+ return
+ }
+
+ // lhs may or may not be typed yet
+ obj := check.lookup(ident)
+ var typ Type
+ if t := obj.GetType(); t != nil {
+ typ = t
+ }
+
+ if rhs != nil {
+ check.expr(x, rhs, typ, iota)
+ // continue even if x.mode == invalid
+ }
+
+ if typ == nil {
+ // determine lhs type from rhs expression;
+ // for variables, convert untyped types to
+ // default types
+ typ = Typ[Invalid]
+ if x.mode != invalid {
+ typ = x.typ
+ if _, ok := obj.(*Var); ok && isUntyped(typ) {
+ if x.isNil() {
+ check.errorf(x.pos(), "use of untyped nil")
+ x.mode = invalid
+ } else {
+ typ = defaultType(typ)
+ }
+ }
+ }
+ switch obj := obj.(type) {
+ case *Const:
+ obj.Type = typ
+ case *Var:
+ obj.Type = typ
+ default:
+ unreachable()
+ }
+ }
+
+ if x.mode != invalid {
+ if !check.assignment(x, typ) {
+ if x.mode != invalid {
+ switch obj.(type) {
+ case *Const:
+ check.errorf(x.pos(), "cannot assign %s to variable of type %s", x, typ)
+ case *Var:
+ check.errorf(x.pos(), "cannot initialize constant of type %s with %s", typ, x)
+ default:
+ unreachable()
+ }
+ x.mode = invalid
+ }
+ }
+ }
+
+ // for constants, set their value
+ if obj, ok := obj.(*Const); ok {
+ assert(obj.Val == nil)
+ if x.mode != invalid {
+ if x.mode == constant {
+ if isConstType(x.typ) {
+ obj.Val = x.val
+ } else {
+ check.errorf(x.pos(), "%s has invalid constant type", x)
+ }
+ } else {
+ check.errorf(x.pos(), "%s is not constant", x)
+ }
+ }
+ if obj.Val == nil {
+ // set the constant to its type's zero value to reduce spurious errors
+ switch typ := underlying(obj.Type); {
+ case typ == Typ[Invalid]:
+ // ignore
+ case isBoolean(typ):
+ obj.Val = false
+ case isNumeric(typ):
+ obj.Val = int64(0)
+ case isString(typ):
+ obj.Val = ""
+ case hasNil(typ):
+ obj.Val = nilConst
+ default:
+ // in all other cases just prevent use of the constant
+ // TODO(gri) re-evaluate this code
+ obj.Val = nilConst
+ }
+ }
+ }
+}
+
+// assignNtoM typechecks a general assignment. If decl is set, the lhs operands
+// must be identifiers. If their types are not set, they are deduced from the
+// types of the corresponding rhs expressions. iota >= 0 indicates that the
+// "assignment" is part of a constant/variable declaration.
+// Precondition: len(lhs) > 0 .
+//
+func (check *checker) assignNtoM(lhs, rhs []ast.Expr, decl bool, iota int) {
+ assert(len(lhs) > 0)
+
+ if len(lhs) == len(rhs) {
+ for i, e := range rhs {
+ check.assign1to1(lhs[i], e, nil, decl, iota)
+ }
+ return
+ }
+
+ if len(rhs) == 1 {
+ // len(lhs) > 1, therefore a correct rhs expression
+ // cannot be a shift and we don't need a type hint;
+ // ok to evaluate rhs first
+ var x operand
+ check.expr(&x, rhs[0], nil, iota)
+ if x.mode == invalid {
+ // If decl is set, this leaves the lhs identifiers
+ // untyped. We catch this when looking up the respective
+ // object.
+ return
+ }
+
+ if t, ok := x.typ.(*Result); ok && len(lhs) == len(t.Values) {
+ // function result
+ x.mode = value
+ for i, obj := range t.Values {
+ x.expr = nil // TODO(gri) should do better here
+ x.typ = obj.Type
+ check.assign1to1(lhs[i], nil, &x, decl, iota)
+ }
+ return
+ }
+
+ if x.mode == valueok && len(lhs) == 2 {
+ // comma-ok expression
+ x.mode = value
+ check.assign1to1(lhs[0], nil, &x, decl, iota)
+
+ x.mode = value
+ x.typ = Typ[UntypedBool]
+ check.assign1to1(lhs[1], nil, &x, decl, iota)
+ return
+ }
+ }
+
+ check.errorf(lhs[0].Pos(), "assignment count mismatch: %d = %d", len(lhs), len(rhs))
+
+ // avoid checking the same declaration over and over
+ // again for each lhs identifier that has no type yet
+ if iota >= 0 {
+ // declaration
+ for _, e := range lhs {
+ if name, ok := e.(*ast.Ident); ok {
+ switch obj := check.lookup(name).(type) {
+ case *Const:
+ obj.Type = Typ[Invalid]
+ case *Var:
+ obj.Type = Typ[Invalid]
+ default:
+ unreachable()
+ }
+ }
+ }
+ }
+}
+
+func (check *checker) optionalStmt(s ast.Stmt) {
+ if s != nil {
+ check.stmt(s)
+ }
+}
+
+func (check *checker) stmtList(list []ast.Stmt) {
+ for _, s := range list {
+ check.stmt(s)
+ }
+}
+
+func (check *checker) call(call *ast.CallExpr) {
+ var x operand
+ check.rawExpr(&x, call, nil, -1, false) // don't check if value is used
+ // TODO(gri) If a builtin is called, the builtin must be valid in statement context.
+}
+
+func (check *checker) multipleDefaults(list []ast.Stmt) {
+ var first ast.Stmt
+ for _, s := range list {
+ var d ast.Stmt
+ switch c := s.(type) {
+ case *ast.CaseClause:
+ if len(c.List) == 0 {
+ d = s
+ }
+ case *ast.CommClause:
+ if c.Comm == nil {
+ d = s
+ }
+ default:
+ check.invalidAST(s.Pos(), "case/communication clause expected")
+ }
+ if d != nil {
+ if first != nil {
+ check.errorf(d.Pos(), "multiple defaults (first at %s)", first.Pos())
+ } else {
+ first = d
+ }
+ }
+ }
+}
+
+// stmt typechecks statement s.
+func (check *checker) stmt(s ast.Stmt) {
+ switch s := s.(type) {
+ case *ast.BadStmt, *ast.EmptyStmt:
+ // ignore
+
+ case *ast.DeclStmt:
+ d, _ := s.Decl.(*ast.GenDecl)
+ if d == nil || (d.Tok != token.CONST && d.Tok != token.TYPE && d.Tok != token.VAR) {
+ check.invalidAST(token.NoPos, "const, type, or var declaration expected")
+ return
+ }
+ if d.Tok == token.CONST {
+ check.assocInitvals(d)
+ }
+ check.decl(d)
+
+ case *ast.LabeledStmt:
+ // TODO(gri) anything to do with label itself?
+ check.stmt(s.Stmt)
+
+ case *ast.ExprStmt:
+ var x operand
+ used := false
+ switch e := unparen(s.X).(type) {
+ case *ast.CallExpr:
+ // function calls are permitted
+ used = true
+ // but some builtins are excluded
+ // (Caution: This evaluates e.Fun twice, once here and once
+ // below as part of s.X. This has consequences for
+ // check.register. Perhaps this can be avoided.)
+ check.expr(&x, e.Fun, nil, -1)
+ if x.mode != invalid {
+ if b, ok := x.typ.(*builtin); ok && !b.isStatement {
+ used = false
+ }
+ }
+ case *ast.UnaryExpr:
+ // receive operations are permitted
+ if e.Op == token.ARROW {
+ used = true
+ }
+ }
+ if !used {
+ check.errorf(s.Pos(), "%s not used", s.X)
+ // ok to continue
+ }
+ check.rawExpr(&x, s.X, nil, -1, false)
+ if x.mode == typexpr {
+ check.errorf(x.pos(), "%s is not an expression", &x)
+ }
+
+ case *ast.SendStmt:
+ var ch, x operand
+ check.expr(&ch, s.Chan, nil, -1)
+ check.expr(&x, s.Value, nil, -1)
+ if ch.mode == invalid || x.mode == invalid {
+ return
+ }
+ if tch, ok := underlying(ch.typ).(*Chan); !ok || tch.Dir&ast.SEND == 0 || !check.assignment(&x, tch.Elt) {
+ if x.mode != invalid {
+ check.invalidOp(ch.pos(), "cannot send %s to channel %s", &x, &ch)
+ }
+ }
+
+ case *ast.IncDecStmt:
+ var op token.Token
+ switch s.Tok {
+ case token.INC:
+ op = token.ADD
+ case token.DEC:
+ op = token.SUB
+ default:
+ check.invalidAST(s.TokPos, "unknown inc/dec operation %s", s.Tok)
+ return
+ }
+ var x operand
+ Y := &ast.BasicLit{ValuePos: s.X.Pos(), Kind: token.INT, Value: "1"} // use x's position
+ check.binary(&x, s.X, Y, op, -1)
+ if x.mode == invalid {
+ return
+ }
+ check.assign1to1(s.X, nil, &x, false, -1)
+
+ case *ast.AssignStmt:
+ switch s.Tok {
+ case token.ASSIGN, token.DEFINE:
+ if len(s.Lhs) == 0 {
+ check.invalidAST(s.Pos(), "missing lhs in assignment")
+ return
+ }
+ check.assignNtoM(s.Lhs, s.Rhs, s.Tok == token.DEFINE, -1)
+ default:
+ // assignment operations
+ if len(s.Lhs) != 1 || len(s.Rhs) != 1 {
+ check.errorf(s.TokPos, "assignment operation %s requires single-valued expressions", s.Tok)
+ return
+ }
+ // TODO(gri) make this conversion more efficient
+ var op token.Token
+ switch s.Tok {
+ case token.ADD_ASSIGN:
+ op = token.ADD
+ case token.SUB_ASSIGN:
+ op = token.SUB
+ case token.MUL_ASSIGN:
+ op = token.MUL
+ case token.QUO_ASSIGN:
+ op = token.QUO
+ case token.REM_ASSIGN:
+ op = token.REM
+ case token.AND_ASSIGN:
+ op = token.AND
+ case token.OR_ASSIGN:
+ op = token.OR
+ case token.XOR_ASSIGN:
+ op = token.XOR
+ case token.SHL_ASSIGN:
+ op = token.SHL
+ case token.SHR_ASSIGN:
+ op = token.SHR
+ case token.AND_NOT_ASSIGN:
+ op = token.AND_NOT
+ default:
+ check.invalidAST(s.TokPos, "unknown assignment operation %s", s.Tok)
+ return
+ }
+ var x operand
+ check.binary(&x, s.Lhs[0], s.Rhs[0], op, -1)
+ if x.mode == invalid {
+ return
+ }
+ check.assign1to1(s.Lhs[0], nil, &x, false, -1)
+ }
+
+ case *ast.GoStmt:
+ check.call(s.Call)
+
+ case *ast.DeferStmt:
+ check.call(s.Call)
+
+ case *ast.ReturnStmt:
+ sig := check.funcsig
+ if n := len(sig.Results); n > 0 {
+ // TODO(gri) should not have to compute lhs, named every single time - clean this up
+ lhs := make([]ast.Expr, n)
+ named := false // if set, function has named results
+ for i, res := range sig.Results {
+ if len(res.Name) > 0 {
+ // a blank (_) result parameter is a named result
+ named = true
+ }
+ name := ast.NewIdent(res.Name)
+ name.NamePos = s.Pos()
+ check.register(name, &Var{Name: res.Name, Type: res.Type}) // Pkg == nil
+ lhs[i] = name
+ }
+ if len(s.Results) > 0 || !named {
+ // TODO(gri) assignNtoM should perhaps not require len(lhs) > 0
+ check.assignNtoM(lhs, s.Results, false, -1)
+ }
+ } else if len(s.Results) > 0 {
+ check.errorf(s.Pos(), "no result values expected")
+ }
+
+ case *ast.BranchStmt:
+ // TODO(gri) implement this
+
+ case *ast.BlockStmt:
+ check.stmtList(s.List)
+
+ case *ast.IfStmt:
+ check.optionalStmt(s.Init)
+ var x operand
+ check.expr(&x, s.Cond, nil, -1)
+ if x.mode != invalid && !isBoolean(x.typ) {
+ check.errorf(s.Cond.Pos(), "non-boolean condition in if statement")
+ }
+ check.stmt(s.Body)
+ check.optionalStmt(s.Else)
+
+ case *ast.SwitchStmt:
+ check.optionalStmt(s.Init)
+ var x operand
+ tag := s.Tag
+ if tag == nil {
+ // use fake true tag value and position it at the opening { of the switch
+ ident := &ast.Ident{NamePos: s.Body.Lbrace, Name: "true"}
+ check.register(ident, Universe.Lookup("true"))
+ tag = ident
+ }
+ check.expr(&x, tag, nil, -1)
+
+ check.multipleDefaults(s.Body.List)
+ seen := make(map[interface{}]token.Pos)
+ for _, s := range s.Body.List {
+ clause, _ := s.(*ast.CaseClause)
+ if clause == nil {
+ continue // error reported before
+ }
+ if x.mode != invalid {
+ for _, expr := range clause.List {
+ x := x // copy of x (don't modify original)
+ var y operand
+ check.expr(&y, expr, nil, -1)
+ if y.mode == invalid {
+ continue // error reported before
+ }
+ // If we have a constant case value, it must appear only
+ // once in the switch statement. Determine if there is a
+ // duplicate entry, but only report an error if there are
+ // no other errors.
+ var dupl token.Pos
+ var yy operand
+ if y.mode == constant {
+ // TODO(gri) This code doesn't work correctly for
+ // large integer, floating point, or
+ // complex values - the respective struct
+ // comparisons are shallow. Need to use a
+ // hash function to index the map.
+ dupl = seen[y.val]
+ seen[y.val] = y.pos()
+ yy = y // remember y
+ }
+ // TODO(gri) The convertUntyped call pair below appears in other places. Factor!
+ // Order matters: By comparing y against x, error positions are at the case values.
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ continue // error reported before
+ }
+ check.convertUntyped(&x, y.typ)
+ if x.mode == invalid {
+ continue // error reported before
+ }
+ check.comparison(&y, &x, token.EQL)
+ if y.mode != invalid && dupl.IsValid() {
+ check.errorf(yy.pos(), "%s is duplicate case (previous at %s)",
+ &yy, check.fset.Position(dupl))
+ }
+ }
+ }
+ check.stmtList(clause.Body)
+ }
+
+ case *ast.TypeSwitchStmt:
+ check.optionalStmt(s.Init)
+
+ // A type switch guard must be of the form:
+ //
+ // TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
+ //
+ // The parser is checking syntactic correctness;
+ // remaining syntactic errors are considered AST errors here.
+ // TODO(gri) better factoring of error handling (invalid ASTs)
+ //
+ var lhs *Var // lhs variable or nil
+ var rhs ast.Expr
+ switch guard := s.Assign.(type) {
+ case *ast.ExprStmt:
+ rhs = guard.X
+ case *ast.AssignStmt:
+ if len(guard.Lhs) != 1 || guard.Tok != token.DEFINE || len(guard.Rhs) != 1 {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+ ident, _ := guard.Lhs[0].(*ast.Ident)
+ if ident == nil {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+ lhs = check.lookup(ident).(*Var)
+ rhs = guard.Rhs[0]
+ default:
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+
+ // rhs must be of the form: expr.(type) and expr must be an interface
+ expr, _ := rhs.(*ast.TypeAssertExpr)
+ if expr == nil || expr.Type != nil {
+ check.invalidAST(s.Pos(), "incorrect form of type switch guard")
+ return
+ }
+ var x operand
+ check.expr(&x, expr.X, nil, -1)
+ if x.mode == invalid {
+ return
+ }
+ var T *Interface
+ if T, _ = underlying(x.typ).(*Interface); T == nil {
+ check.errorf(x.pos(), "%s is not an interface", &x)
+ return
+ }
+
+ check.multipleDefaults(s.Body.List)
+ for _, s := range s.Body.List {
+ clause, _ := s.(*ast.CaseClause)
+ if clause == nil {
+ continue // error reported before
+ }
+ // Check each type in this type switch case.
+ var typ Type
+ for _, expr := range clause.List {
+ typ = check.typOrNil(expr, false)
+ if typ != nil && typ != Typ[Invalid] {
+ if method, wrongType := missingMethod(typ, T); method != nil {
+ var msg string
+ if wrongType {
+ msg = "%s cannot have dynamic type %s (wrong type for method %s)"
+ } else {
+ msg = "%s cannot have dynamic type %s (missing method %s)"
+ }
+ check.errorf(expr.Pos(), msg, &x, typ, method.Name)
+ // ok to continue
+ }
+ }
+ }
+ // If lhs exists, set its type for each clause.
+ if lhs != nil {
+ // In clauses with a case listing exactly one type, the variable has that type;
+ // otherwise, the variable has the type of the expression in the TypeSwitchGuard.
+ if len(clause.List) != 1 || typ == nil {
+ typ = x.typ
+ }
+ lhs.Type = typ
+ }
+ check.stmtList(clause.Body)
+ }
+
+ // There is only one object (lhs) associated with a lhs identifier, but that object
+ // assumes different types for different clauses. Set it back to the type of the
+ // TypeSwitchGuard expression so that that variable always has a valid type.
+ if lhs != nil {
+ lhs.Type = x.typ
+ }
+
+ case *ast.SelectStmt:
+ check.multipleDefaults(s.Body.List)
+ for _, s := range s.Body.List {
+ clause, _ := s.(*ast.CommClause)
+ if clause == nil {
+ continue // error reported before
+ }
+ check.optionalStmt(clause.Comm) // TODO(gri) check correctness of c.Comm (must be Send/RecvStmt)
+ check.stmtList(clause.Body)
+ }
+
+ case *ast.ForStmt:
+ check.optionalStmt(s.Init)
+ if s.Cond != nil {
+ var x operand
+ check.expr(&x, s.Cond, nil, -1)
+ if x.mode != invalid && !isBoolean(x.typ) {
+ check.errorf(s.Cond.Pos(), "non-boolean condition in for statement")
+ }
+ }
+ check.optionalStmt(s.Post)
+ check.stmt(s.Body)
+
+ case *ast.RangeStmt:
+ // check expression to iterate over
+ decl := s.Tok == token.DEFINE
+ var x operand
+ check.expr(&x, s.X, nil, -1)
+ if x.mode == invalid {
+ // if we don't have a declaration, we can still check the loop's body
+ if !decl {
+ check.stmt(s.Body)
+ }
+ return
+ }
+
+ // determine key/value types
+ var key, val Type
+ switch typ := underlying(x.typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ key = Typ[UntypedInt]
+ val = Typ[UntypedRune]
+ }
+ case *Array:
+ key = Typ[UntypedInt]
+ val = typ.Elt
+ case *Slice:
+ key = Typ[UntypedInt]
+ val = typ.Elt
+ case *Pointer:
+ if typ, _ := underlying(typ.Base).(*Array); typ != nil {
+ key = Typ[UntypedInt]
+ val = typ.Elt
+ }
+ case *Map:
+ key = typ.Key
+ val = typ.Elt
+ case *Chan:
+ key = typ.Elt
+ if typ.Dir&ast.RECV == 0 {
+ check.errorf(x.pos(), "cannot range over send-only channel %s", &x)
+ // ok to continue
+ }
+ if s.Value != nil {
+ check.errorf(s.Value.Pos(), "iteration over %s permits only one iteration variable", &x)
+ // ok to continue
+ }
+ }
+
+ if key == nil {
+ check.errorf(x.pos(), "cannot range over %s", &x)
+ // if we don't have a declaration, we can still check the loop's body
+ if !decl {
+ check.stmt(s.Body)
+ }
+ return
+ }
+
+ // check assignment to/declaration of iteration variables
+ // TODO(gri) The error messages/positions are not great here,
+ // they refer to the expression in the range clause.
+ // Should give better messages w/o too much code
+ // duplication (assignment checking).
+ x.mode = value
+ if s.Key != nil {
+ x.typ = key
+ x.expr = s.Key
+ check.assign1to1(s.Key, nil, &x, decl, -1)
+ } else {
+ check.invalidAST(s.Pos(), "range clause requires index iteration variable")
+ // ok to continue
+ }
+ if s.Value != nil {
+ x.typ = val
+ x.expr = s.Value
+ check.assign1to1(s.Value, nil, &x, decl, -1)
+ }
+
+ check.stmt(s.Body)
+
+ default:
+ check.errorf(s.Pos(), "invalid statement")
+ }
+}
diff --git a/src/pkg/go/types/testdata/builtins.src b/src/pkg/go/types/testdata/builtins.src
new file mode 100644
index 000000000..c08c442ce
--- /dev/null
+++ b/src/pkg/go/types/testdata/builtins.src
@@ -0,0 +1,401 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// builtin calls
+
+package builtins
+
+import "unsafe"
+
+func _append() {
+ var x int
+ var s []byte
+ _0 := append /* ERROR "argument" */ ()
+ _1 := append("foo" /* ERROR "not a typed slice" */)
+ _2 := append(nil /* ERROR "not a typed slice" */, s)
+ _3 := append(x /* ERROR "not a typed slice" */, s)
+ _4 := append(s)
+ append /* ERROR "not used" */ (s)
+}
+
+func _cap() {
+ var a [10]bool
+ var p *[20]int
+ var s []int
+ var c chan string
+ _0 := cap /* ERROR "argument" */ ()
+ _1 := cap /* ERROR "argument" */ (1, 2)
+ _2 := cap(42 /* ERROR "invalid" */)
+ const _3 = cap(a)
+ assert(_3 == 10)
+ const _4 = cap(p)
+ assert(_4 == 20)
+ _5 := cap(c)
+ cap /* ERROR "not used" */ (c)
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = cap(((*T)(nil)).a)
+}
+
+func _close() {
+ var c chan int
+ var r <-chan int
+ close /* ERROR "argument" */ ()
+ close /* ERROR "argument" */ (1, 2)
+ close(42 /* ERROR "not a channel" */)
+ close(r /* ERROR "receive-only channel" */)
+ close(c)
+}
+
+func _complex() {
+ var i32 int32
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ _ = complex /* ERROR "argument" */ ()
+ _ = complex /* ERROR "argument" */ (1)
+ _ = complex(true /* ERROR "invalid argument" */ , 0)
+ _ = complex(i32 /* ERROR "invalid argument" */ , 0)
+ _ = complex("foo" /* ERROR "invalid argument" */ , 0)
+ _ = complex(c64 /* ERROR "invalid argument" */ , 0)
+ _ = complex(0, true /* ERROR "invalid argument" */ )
+ _ = complex(0, i32 /* ERROR "invalid argument" */ )
+ _ = complex(0, "foo" /* ERROR "invalid argument" */ )
+ _ = complex(0, c64 /* ERROR "invalid argument" */ )
+ _ = complex(f32, f32)
+ _ = complex(f32, 1)
+ _ = complex(f32, 1.0)
+ _ = complex(f32, 'a')
+ _ = complex(f64, f64)
+ _ = complex(f64, 1)
+ _ = complex(f64, 1.0)
+ _ = complex(f64, 'a')
+ _ = complex(f32 /* ERROR "mismatched types" */, f64)
+ _ = complex(f64 /* ERROR "mismatched types" */, f32)
+ _ = complex(1, 1)
+ _ = complex(1, 1.1)
+ _ = complex(1, 'a')
+ complex /* ERROR "not used" */ (1, 2)
+}
+
+func _copy() {
+ copy /* ERROR "not enough arguments" */ ()
+ copy /* ERROR "not enough arguments" */ ("foo")
+ copy([ /* ERROR "copy expects slice arguments" */ ...]int{}, []int{})
+ copy([ /* ERROR "copy expects slice arguments" */ ]int{}, [...]int{})
+ copy([ /* ERROR "different element types" */ ]int8{}, "foo")
+
+ // spec examples
+ var a = [...]int{0, 1, 2, 3, 4, 5, 6, 7}
+ var s = make([]int, 6)
+ var b = make([]byte, 5)
+ n1 := copy(s, a[0:]) // n1 == 6, s == []int{0, 1, 2, 3, 4, 5}
+ n2 := copy(s, s[2:]) // n2 == 4, s == []int{2, 3, 4, 5, 4, 5}
+ n3 := copy(b, "Hello, World!") // n3 == 5, b == []byte("Hello")
+}
+
+func _delete() {
+ var m map[string]int
+ var s string
+ delete /* ERROR "argument" */ ()
+ delete /* ERROR "argument" */ (1)
+ delete /* ERROR "argument" */ (1, 2, 3)
+ delete(m, 0 /* ERROR "not assignable" */)
+ delete(m, s)
+}
+
+func _imag() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = imag /* ERROR "argument" */ ()
+ _ = imag /* ERROR "argument" */ (1, 2)
+ _ = imag(10 /* ERROR "must be a complex number" */)
+ _ = imag(2.7182818 /* ERROR "must be a complex number" */)
+ _ = imag("foo" /* ERROR "must be a complex number" */)
+ const _5 = imag(1 + 2i)
+ assert(_5 == 2)
+ f32 = _5
+ f64 = _5
+ const _6 = imag(0i)
+ assert(_6 == 0)
+ f32 = imag(c64)
+ f64 = imag(c128)
+ f32 = imag /* ERROR "cannot assign" */ (c128)
+ f64 = imag /* ERROR "cannot assign" */ (c64)
+ imag /* ERROR "not used" */ (c64)
+}
+
+func _len() {
+ const c = "foobar"
+ var a [10]bool
+ var p *[20]int
+ var s []int
+ var m map[string]complex128
+ _ = len /* ERROR "argument" */ ()
+ _ = len /* ERROR "argument" */ (1, 2)
+ _ = len(42 /* ERROR "invalid" */)
+ const _3 = len(c)
+ assert(_3 == 6)
+ const _4 = len(a)
+ assert(_4 == 10)
+ const _5 = len(p)
+ assert(_5 == 20)
+ _ = len(m)
+ len /* ERROR "not used" */ (c)
+
+ // esoteric case
+ var t string
+ var hash map[interface{}][]*[10]int
+ const n = len /* ERROR "not constant" */ (hash[recover()][len(t)])
+ assert /* ERROR "failed" */ (n == 10)
+ var ch <-chan int
+ const nn = len /* ERROR "not constant" */ (hash[<-ch][len(t)])
+ _ = nn // TODO(gri) remove this once unused constants get type-checked
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = len(((*T)(nil)).a)
+}
+
+func _make() {
+ n := 0
+
+ _ = make /* ERROR "argument" */ ()
+ _ = make(1 /* ERROR "not a type" */)
+ _ = make(int /* ERROR "cannot make" */)
+
+ // slices
+ _ = make/* ERROR "arguments" */ ([]int)
+ _ = make/* ERROR "arguments" */ ([]int, 2, 3, 4)
+ _ = make([]int, int /* ERROR "not an expression" */)
+ _ = make([]int, 10, float32 /* ERROR "not an expression" */)
+ _ = make([]int, "foo" /* ERROR "must be an integer" */)
+ _ = make([]int, 10, 2.3 /* ERROR "must be an integer" */)
+ _ = make([]int, 5, 10.0)
+ _ = make([]int, 0i)
+ _ = make([]int, - /* ERROR "must not be negative" */ 1, 10)
+ _ = make([]int, 0, - /* ERROR "must not be negative" */ 1)
+ _ = make([]int, - /* ERROR "must not be negative" */ 1, - /* ERROR "must not be negative" */ 1)
+ _ = make([]int, 1<<100, 1<<100) // run-time panic
+ _ = make([]int, 1 /* ERROR "length and capacity swapped" */ <<100 + 1, 1<<100)
+ _ = make([]int, 1 /* ERROR "length and capacity swapped" */ <<100, 12345)
+
+ // maps
+ _ = make /* ERROR "arguments" */ (map[int]string, 10, 20)
+ _ = make(map[int]float32, int /* ERROR "not an expression" */)
+ _ = make(map[int]float32, "foo" /* ERROR "must be an integer" */)
+ _ = make(map[int]float32, 10)
+ _ = make(map[int]float32, n)
+ _ = make(map[int]float32, int64(n))
+
+ // channels
+ _ = make /* ERROR "arguments" */ (chan int, 10, 20)
+ _ = make(chan int, int /* ERROR "not an expression" */)
+ _ = make(chan<- int, "foo" /* ERROR "must be an integer" */)
+ _ = make(<-chan float64, 10)
+ _ = make(chan chan int, n)
+ _ = make(chan string, int64(n))
+
+ make /* ERROR "not used" */ ([]int, 10)
+}
+
+func _new() {
+ _ = new /* ERROR "argument" */ ()
+ _ = new /* ERROR "argument" */ (1, 2)
+ _ = new("foo" /* ERROR "not a type" */)
+ p := new(float64)
+ _ = new(struct{ x, y int })
+ q := new(*float64)
+ _ = *p == **q
+ new /* ERROR "not used" */ (int)
+}
+
+func _panic() {
+ panic /* ERROR "arguments" */ ()
+ panic /* ERROR "arguments" */ (1, 2)
+ panic(0)
+ panic("foo")
+ panic(false)
+}
+
+func _print() {
+ print()
+ print(1)
+ print(1, 2)
+ print("foo")
+ print(2.718281828)
+ print(false)
+}
+
+func _println() {
+ println()
+ println(1)
+ println(1, 2)
+ println("foo")
+ println(2.718281828)
+ println(false)
+}
+
+func _real() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = real /* ERROR "argument" */ ()
+ _ = real /* ERROR "argument" */ (1, 2)
+ _ = real(10 /* ERROR "must be a complex number" */)
+ _ = real(2.7182818 /* ERROR "must be a complex number" */)
+ _ = real("foo" /* ERROR "must be a complex number" */)
+ const _5 = real(1 + 2i)
+ assert(_5 == 1)
+ f32 = _5
+ f64 = _5
+ const _6 = real(0i)
+ assert(_6 == 0)
+ f32 = real(c64)
+ f64 = real(c128)
+ f32 = real /* ERROR "cannot assign" */ (c128)
+ f64 = real /* ERROR "cannot assign" */ (c64)
+ real /* ERROR "not used" */ (c64)
+}
+
+func _recover() {
+ _ = recover()
+ _ = recover /* ERROR "argument" */ (10)
+ recover()
+}
+
+// assuming types.DefaultPtrSize == 8
+type S0 struct{ // offset
+ a bool // 0
+ b rune // 4
+ c *int // 8
+ d bool // 16
+ e complex128 // 24
+} // 40
+
+type S1 struct{ // offset
+ x float32 // 0
+ y string // 8
+ z *S1 // 24
+ S0 // 32
+} // 72
+
+type S2 struct{ // offset
+ *S1 // 0
+} // 8
+
+func _Alignof() {
+ var x int
+ _ = unsafe /* ERROR "argument" */ .Alignof()
+ _ = unsafe /* ERROR "argument" */ .Alignof(1, 2)
+ _ = unsafe.Alignof(int /* ERROR "not an expression" */)
+ _ = unsafe.Alignof(42)
+ _ = unsafe.Alignof(new(struct{}))
+ unsafe /* ERROR "not used" */ .Alignof(x)
+
+ var y S0
+ assert(unsafe.Alignof(y.a) == 1)
+ assert(unsafe.Alignof(y.b) == 4)
+ assert(unsafe.Alignof(y.c) == 8)
+ assert(unsafe.Alignof(y.d) == 1)
+ assert(unsafe.Alignof(y.e) == 8)
+}
+
+func _Offsetof() {
+ var x struct{ f int }
+ _ = unsafe /* ERROR "argument" */ .Offsetof()
+ _ = unsafe /* ERROR "argument" */ .Offsetof(1, 2)
+ _ = unsafe.Offsetof(int /* ERROR "not an expression" */)
+ _ = unsafe.Offsetof(x /* ERROR "not a selector" */)
+ _ = unsafe.Offsetof(x.f)
+ _ = unsafe.Offsetof((x.f))
+ _ = unsafe.Offsetof((((((((x))).f)))))
+ unsafe /* ERROR "not used" */ .Offsetof(x.f)
+
+ var y0 S0
+ assert(unsafe.Offsetof(y0.a) == 0)
+ assert(unsafe.Offsetof(y0.b) == 4)
+ assert(unsafe.Offsetof(y0.c) == 8)
+ assert(unsafe.Offsetof(y0.d) == 16)
+ assert(unsafe.Offsetof(y0.e) == 24)
+
+ var y1 S1
+ assert(unsafe.Offsetof(y1.x) == 0)
+ assert(unsafe.Offsetof(y1.y) == 8)
+ assert(unsafe.Offsetof(y1.z) == 24)
+ assert(unsafe.Offsetof(y1.S0) == 32)
+
+ assert(unsafe.Offsetof(y1.S0.a) == 0) // relative to S0
+ assert(unsafe.Offsetof(y1.a) == 32) // relative to S1
+ assert(unsafe.Offsetof(y1.b) == 36) // relative to S1
+ assert(unsafe.Offsetof(y1.c) == 40) // relative to S1
+ assert(unsafe.Offsetof(y1.d) == 48) // relative to S1
+ assert(unsafe.Offsetof(y1.e) == 56) // relative to S1
+
+ var y2 S2
+ assert(unsafe.Offsetof(y2.S1) == 0)
+ _ = unsafe.Offsetof(y2 /* ERROR "embedded via pointer" */ .x)
+}
+
+func _Sizeof() {
+ var x int
+ _ = unsafe /* ERROR "argument" */ .Sizeof()
+ _ = unsafe /* ERROR "argument" */ .Sizeof(1, 2)
+ _ = unsafe.Sizeof(int /* ERROR "not an expression" */)
+ _ = unsafe.Sizeof(42)
+ _ = unsafe.Sizeof(new(complex128))
+ unsafe /* ERROR "not used" */ .Sizeof(x)
+
+ // basic types have size guarantees
+ assert(unsafe.Sizeof(byte(0)) == 1)
+ assert(unsafe.Sizeof(uint8(0)) == 1)
+ assert(unsafe.Sizeof(int8(0)) == 1)
+ assert(unsafe.Sizeof(uint16(0)) == 2)
+ assert(unsafe.Sizeof(int16(0)) == 2)
+ assert(unsafe.Sizeof(uint32(0)) == 4)
+ assert(unsafe.Sizeof(int32(0)) == 4)
+ assert(unsafe.Sizeof(float32(0)) == 4)
+ assert(unsafe.Sizeof(uint64(0)) == 8)
+ assert(unsafe.Sizeof(int64(0)) == 8)
+ assert(unsafe.Sizeof(float64(0)) == 8)
+ assert(unsafe.Sizeof(complex64(0)) == 8)
+ assert(unsafe.Sizeof(complex128(0)) == 16)
+
+ var y0 S0
+ assert(unsafe.Sizeof(y0.a) == 1)
+ assert(unsafe.Sizeof(y0.b) == 4)
+ assert(unsafe.Sizeof(y0.c) == 8)
+ assert(unsafe.Sizeof(y0.d) == 1)
+ assert(unsafe.Sizeof(y0.e) == 16)
+ assert(unsafe.Sizeof(y0) == 40)
+
+ var y1 S1
+ assert(unsafe.Sizeof(y1) == 72)
+
+ var y2 S2
+ assert(unsafe.Sizeof(y2) == 8)
+}
+
+// self-testing only
+func _assert() {
+ var x int
+ assert /* ERROR "argument" */ ()
+ assert /* ERROR "argument" */ (1, 2)
+ assert("foo" /* ERROR "boolean constant" */ )
+ assert(x /* ERROR "boolean constant" */)
+ assert(true)
+ assert /* ERROR "failed" */ (false)
+}
+
+// self-testing only
+func _trace() {
+ // Uncomment the code below to test trace - will produce console output
+ // _ = trace /* ERROR "no value" */ ()
+ // _ = trace(1)
+ // _ = trace(true, 1.2, '\'', "foo", 42i, "foo" <= "bar")
+}
diff --git a/src/pkg/go/types/testdata/const0.src b/src/pkg/go/types/testdata/const0.src
new file mode 100644
index 000000000..a2ca344c7
--- /dev/null
+++ b/src/pkg/go/types/testdata/const0.src
@@ -0,0 +1,215 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// constant declarations
+
+package const0
+
+// constants declarations must be initialized by constants
+var x = 0
+const c0 = x /* ERROR "not constant" */
+
+// untyped constants
+const (
+ // boolean values
+ ub0 = false
+ ub1 = true
+ ub2 = 2 < 1
+ ub3 = ui1 == uf1
+ ub4 = true /* ERROR "cannot convert" */ == 0
+
+ // integer values
+ ui0 = 0
+ ui1 = 1
+ ui2 = 42
+ ui3 = 3141592653589793238462643383279502884197169399375105820974944592307816406286
+ ui4 = -10
+
+ ui5 = ui0 + ui1
+ ui6 = ui1 - ui1
+ ui7 = ui2 * ui1
+ ui8 = ui3 / ui3
+ ui9 = ui3 % ui3
+
+ ui10 = 1 / 0 /* ERROR "division by zero" */
+ ui11 = ui1 / 0 /* ERROR "division by zero" */
+ ui12 = ui3 / ui0 /* ERROR "division by zero" */
+ ui13 = 1 % 0 /* ERROR "division by zero" */
+ ui14 = ui1 % 0 /* ERROR "division by zero" */
+ ui15 = ui3 % ui0 /* ERROR "division by zero" */
+
+ ui16 = ui2 & ui3
+ ui17 = ui2 | ui3
+ ui18 = ui2 ^ ui3
+
+ // floating point values
+ uf0 = 0.
+ uf1 = 1.
+ uf2 = 4.2e1
+ uf3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ uf4 = 1e-1
+
+ uf5 = uf0 + uf1
+ uf6 = uf1 - uf1
+ uf7 = uf2 * uf1
+ uf8 = uf3 / uf3
+ uf9 = uf3 /* ERROR "not defined" */ % uf3
+
+ uf10 = 1 / 0 /* ERROR "division by zero" */
+ uf11 = uf1 / 0 /* ERROR "division by zero" */
+ uf12 = uf3 / uf0 /* ERROR "division by zero" */
+
+ uf16 = uf2 /* ERROR "not defined" */ & uf3
+ uf17 = uf2 /* ERROR "not defined" */ | uf3
+ uf18 = uf2 /* ERROR "not defined" */ ^ uf3
+
+ // complex values
+ uc0 = 0.i
+ uc1 = 1.i
+ uc2 = 4.2e1i
+ uc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ uc4 = 1e-1i
+
+ uc5 = uc0 + uc1
+ uc6 = uc1 - uc1
+ uc7 = uc2 * uc1
+ uc8 = uc3 / uc3
+ uc9 = uc3 /* ERROR "not defined" */ % uc3
+
+ uc10 = 1 / 0 /* ERROR "division by zero" */
+ uc11 = uc1 / 0 /* ERROR "division by zero" */
+ uc12 = uc3 / uc0 /* ERROR "division by zero" */
+
+ uc16 = uc2 /* ERROR "not defined" */ & uc3
+ uc17 = uc2 /* ERROR "not defined" */ | uc3
+ uc18 = uc2 /* ERROR "not defined" */ ^ uc3
+)
+
+type (
+ mybool bool
+ myint int
+ myfloat float64
+ mycomplex complex128
+)
+
+// typed constants
+const (
+ // boolean values
+ tb0 bool = false
+ tb1 bool = true
+ tb2 mybool = 2 < 1
+ tb3 mybool = ti1 /* ERROR "cannot compare" */ == tf1
+
+ // integer values
+ ti0 int8 = ui0
+ ti1 int32 = ui1
+ ti2 int64 = ui2
+ ti3 myint = ui3 /* ERROR "overflows" */
+ ti4 myint = ui4
+
+ ti5 = ti0 /* ERROR "mismatched types" */ + ti1
+ ti6 = ti1 - ti1
+ ti7 = ti2 /* ERROR "mismatched types" */ * ti1
+ //ti8 = ti3 / ti3 // TODO(gri) enable this
+ //ti9 = ti3 % ti3 // TODO(gri) enable this
+
+ ti10 = 1 / 0 /* ERROR "division by zero" */
+ ti11 = ti1 / 0 /* ERROR "division by zero" */
+ ti12 = ti3 /* ERROR "mismatched types" */ / ti0
+ ti13 = 1 % 0 /* ERROR "division by zero" */
+ ti14 = ti1 % 0 /* ERROR "division by zero" */
+ ti15 = ti3 /* ERROR "mismatched types" */ % ti0
+
+ ti16 = ti2 /* ERROR "mismatched types" */ & ti3
+ ti17 = ti2 /* ERROR "mismatched types" */ | ti4
+ ti18 = ti2 ^ ti5 // no mismatched types error because the type of ti5 is unknown
+
+ // floating point values
+ tf0 float32 = 0.
+ tf1 float32 = 1.
+ tf2 float64 = 4.2e1
+ tf3 myfloat = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ tf4 myfloat = 1e-1
+
+ tf5 = tf0 + tf1
+ tf6 = tf1 - tf1
+ tf7 = tf2 /* ERROR "mismatched types" */ * tf1
+ // tf8 = tf3 / tf3 // TODO(gri) enable this
+ tf9 = tf3 /* ERROR "not defined" */ % tf3
+
+ tf10 = 1 / 0 /* ERROR "division by zero" */
+ tf11 = tf1 / 0 /* ERROR "division by zero" */
+ tf12 = tf3 /* ERROR "mismatched types" */ / tf0
+
+ tf16 = tf2 /* ERROR "mismatched types" */ & tf3
+ tf17 = tf2 /* ERROR "mismatched types" */ | tf3
+ tf18 = tf2 /* ERROR "mismatched types" */ ^ tf3
+
+ // complex values
+ tc0 = 0.i
+ tc1 = 1.i
+ tc2 = 4.2e1i
+ tc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ tc4 = 1e-1i
+
+ tc5 = tc0 + tc1
+ tc6 = tc1 - tc1
+ tc7 = tc2 * tc1
+ tc8 = tc3 / tc3
+ tc9 = tc3 /* ERROR "not defined" */ % tc3
+
+ tc10 = 1 / 0 /* ERROR "division by zero" */
+ tc11 = tc1 / 0 /* ERROR "division by zero" */
+ tc12 = tc3 / tc0 /* ERROR "division by zero" */
+
+ tc16 = tc2 /* ERROR "not defined" */ & tc3
+ tc17 = tc2 /* ERROR "not defined" */ | tc3
+ tc18 = tc2 /* ERROR "not defined" */ ^ tc3
+)
+
+// initialization cycles
+const (
+ a /* ERROR "cycle" */ = a
+ b /* ERROR "cycle" */ , c /* ERROR "cycle" */, d, e = e, d, c, b // TODO(gri) should only have one cycle error
+ f float64 = d
+)
+
+// multiple initialization
+const (
+ a1, a2, a3 = 7, 3.1415926, "foo"
+ b1, b2, b3 = b3, b1, 42
+ _p0 = assert(a1 == 7)
+ _p1 = assert(a2 == 3.1415926)
+ _p2 = assert(a3 == "foo")
+ _p3 = assert(b1 == 42)
+ _p4 = assert(b2 == 42)
+ _p5 = assert(b3 == 42)
+)
+
+// iota
+const (
+ iota0 = iota
+ iota1 = iota
+ iota2 = iota*2
+ _a0 = assert(iota0 == 0)
+ _a1 = assert(iota1 == 1)
+ _a2 = assert(iota2 == 4)
+ iota6 = iota*3
+
+ iota7
+ iota8
+ _a3 = assert(iota7 == 21)
+ _a4 = assert(iota8 == 24)
+)
+
+const (
+ _b0 = iota
+ _b1 = assert(iota + iota2 == 5)
+)
+
+// special cases
+const (
+ _n0 = nil /* ERROR "invalid constant type" */
+ _n1 = [ /* ERROR "not constant" */ ]int{}
+) \ No newline at end of file
diff --git a/src/pkg/go/types/testdata/conversions.src b/src/pkg/go/types/testdata/conversions.src
new file mode 100644
index 000000000..1b1518366
--- /dev/null
+++ b/src/pkg/go/types/testdata/conversions.src
@@ -0,0 +1,18 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// conversions
+
+package conversions
+
+// argument count
+var (
+ _v0 = int /* ERROR "one argument" */ ()
+ _v1 = int /* ERROR "one argument" */ (1, 2)
+)
+
+//
+var (
+ _v2 = int8(0)
+) \ No newline at end of file
diff --git a/src/pkg/go/types/testdata/decls0.src b/src/pkg/go/types/testdata/decls0.src
new file mode 100644
index 000000000..f0115bd9d
--- /dev/null
+++ b/src/pkg/go/types/testdata/decls0.src
@@ -0,0 +1,187 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type declarations
+
+package decls0
+
+import (
+ "unsafe"
+ // we can have multiple blank imports (was bug)
+ _ "math"
+ _ "net/rpc"
+ // reflect defines a type "flag" which shows up in the gc export data
+ "reflect"
+ . "reflect"
+)
+
+// reflect.flag must not be visible in this package
+type flag int
+type _ reflect /* ERROR "cannot refer to unexported" */ .flag
+
+// dot-imported exported objects may conflict with local objects
+type Value /* ERROR "redeclared in this block by dot-import" */ struct{}
+
+const pi = 3.1415
+
+type (
+ N undeclared /* ERROR "undeclared" */
+ B bool
+ I int32
+ A [10]P
+ T struct {
+ x, y P
+ }
+ P *T
+ R (*R)
+ F func(A) I
+ Y interface {
+ f(A) I
+ }
+ S [](((P)))
+ M map[I]F
+ C chan<- I
+
+ // blank types must be typechecked
+ _ pi /* ERROR "not a type" */
+ _ struct{}
+ _ struct{ pi /* ERROR "not a type" */ }
+)
+
+
+// invalid array types
+type (
+ iA0 [... /* ERROR "invalid use of '...'" */ ]byte
+ iA1 [1 /* ERROR "invalid array length" */ <<100]int
+ iA2 [- /* ERROR "invalid array length" */ 1]complex128
+ iA3 ["foo" /* ERROR "invalid array length" */ ]string
+)
+
+
+type (
+ p1 pi /* ERROR "no single field or method foo" */ .foo
+ p2 unsafe.Pointer
+)
+
+
+type (
+ Pi pi /* ERROR "not a type" */
+
+ a /* ERROR "illegal cycle" */ a
+ a /* ERROR "redeclared" */ int
+
+ // where the cycle error appears depends on the
+ // order in which declarations are processed
+ // (which depends on the order in which a map
+ // is iterated through)
+ b /* ERROR "illegal cycle" */ c
+ c d
+ d e
+ e b
+
+ t *t
+
+ U V
+ V *W
+ W U
+
+ P1 *S2
+ P2 P1
+
+ S0 struct {
+ }
+ S1 struct {
+ a, b, c int
+ u, v, a /* ERROR "redeclared" */ float32
+ }
+ S2 struct {
+ U // anonymous field
+ // TODO(gri) recognize double-declaration below
+ // U /* ERROR "redeclared" */ int
+ }
+ S3 struct {
+ x S2
+ }
+ S4/* ERROR "illegal cycle" */ struct {
+ S4
+ }
+ S5 /* ERROR "illegal cycle" */ struct {
+ S6
+ }
+ S6 struct {
+ field S7
+ }
+ S7 struct {
+ S5
+ }
+
+ L1 []L1
+ L2 []int
+
+ A1 [10.0]int
+ A2 /* ERROR "illegal cycle" */ [10]A2
+ A3 /* ERROR "illegal cycle" */ [10]struct {
+ x A4
+ }
+ A4 [10]A3
+
+ F1 func()
+ F2 func(x, y, z float32)
+ F3 func(x, y, x /* ERROR "redeclared" */ float32)
+ F4 func() (x, y, x /* ERROR "redeclared" */ float32)
+ F5 func(x int) (x /* ERROR "redeclared" */ float32)
+ F6 func(x ...int)
+
+ I1 interface{}
+ I2 interface {
+ m1()
+ }
+ I3 interface { /* ERROR "multiple methods named m1" */
+ m1()
+ m1 /* ERROR "redeclared" */ ()
+ }
+ I4 interface {
+ m1(x, y, x /* ERROR "redeclared" */ float32)
+ m2() (x, y, x /* ERROR "redeclared" */ float32)
+ m3(x int) (x /* ERROR "redeclared" */ float32)
+ }
+ I5 interface {
+ m1(I5)
+ }
+ I6 interface {
+ S0 /* ERROR "not an interface" */
+ }
+ I7 interface {
+ I1
+ I1
+ }
+ I8 /* ERROR "illegal cycle" */ interface {
+ I8
+ }
+ // Use I09 (rather than I9) because it appears lexically before
+ // I10 so that we get the illegal cycle here rather then in the
+ // declaration of I10. If the implementation sorts by position
+ // rather than name, the error message will still be here.
+ I09 /* ERROR "illegal cycle" */ interface {
+ I10
+ }
+ I10 interface {
+ I11
+ }
+ I11 interface {
+ I09
+ }
+
+ C1 chan int
+ C2 <-chan int
+ C3 chan<- C3
+ C4 chan C5
+ C5 chan C6
+ C6 chan C4
+
+ M1 map[Last]string
+ M2 map[string]M2
+
+ Last int
+)
diff --git a/src/pkg/go/types/testdata/decls1.src b/src/pkg/go/types/testdata/decls1.src
new file mode 100644
index 000000000..2251f457f
--- /dev/null
+++ b/src/pkg/go/types/testdata/decls1.src
@@ -0,0 +1,132 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// variable declarations
+
+package decls1
+
+import (
+ "math"
+)
+
+// Global variables without initialization
+var (
+ a, b bool
+ c byte
+ d uint8
+ r rune
+ i int
+ j, k, l int
+ x, y float32
+ xx, yy float64
+ u, v complex64
+ uu, vv complex128
+ s, t string
+ array []byte
+ iface interface{}
+
+ blank _ /* ERROR "cannot use _" */
+)
+
+// Global variables with initialization
+var (
+ s1 = i + j
+ s2 = i /* ERROR "mismatched types" */ + x
+ s3 = c + d
+ s4 = s + t
+ s5 = s /* ERROR "invalid operation" */ / t
+ s6 = array[t1]
+ s7 = array[x /* ERROR "index" */]
+ s8 = &a
+ s10 = &42 /* ERROR "cannot take address" */
+ s11 = &v
+ s12 = -(u + *t11) / *&v
+ s13 = a /* ERROR "shifted operand" */ << d
+ s14 = i << j /* ERROR "must be unsigned" */
+ s18 = math.Pi * 10.0
+ s19 = s1 /* ERROR "cannot call" */ ()
+ s20 = f0 /* ERROR "no value" */ ()
+ s21 = f6(1, s1, i)
+ s22 = f6(1, s1, uu /* ERROR "cannot assign" */ )
+
+ t1 int = i + j
+ t2 int = i /* ERROR "mismatched types" */ + x
+ t3 int = c /* ERROR "cannot assign" */ + d
+ t4 string = s + t
+ t5 string = s /* ERROR "invalid operation" */ / t
+ t6 byte = array[t1]
+ t7 byte = array[x /* ERROR "index" */]
+ t8 *int = & /* ERROR "cannot assign" */ a
+ t10 *int = &42 /* ERROR "cannot take address" */
+ t11 *complex64 = &v
+ t12 complex64 = -(u + *t11) / *&v
+ t13 int = a /* ERROR "shifted operand" */ << d
+ t14 int = i << j /* ERROR "must be unsigned" */
+ t15 math /* ERROR "not in selector" */
+ t16 math /* ERROR "unexported" */ .xxx
+ t17 math /* ERROR "not a type" */ .Pi
+ t18 float64 = math.Pi * 10.0
+ t19 int = t1 /* ERROR "cannot call" */ ()
+ t20 int = f0 /* ERROR "no value" */ ()
+)
+
+// Various more complex expressions
+var (
+ u1 = x /* ERROR "not an interface" */ .(int)
+ u2 = iface.([]int)
+ u3 = iface.(a /* ERROR "not a type" */ )
+ u4, ok = iface.(int)
+ u5 /* ERROR "assignment count mismatch" */ , ok2, ok3 = iface.(int)
+)
+
+// Constant expression initializations
+var (
+ v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v2 = c + 255
+ v3 = c + 256 /* ERROR "overflows" */
+ v4 = r + 2147483647
+ v5 = r + 2147483648 /* ERROR "overflows" */
+ v6 = 42
+ v7 = v6 + 9223372036854775807
+ v8 = v6 + 9223372036854775808 /* ERROR "overflows" */
+ v9 = i + 1 << 10
+ v10 byte = 1024 /* ERROR "overflows" */
+ v11 = xx/yy*yy - xx
+ v12 = true && false
+ v13 = nil /* ERROR "use of untyped nil" */
+)
+
+// Multiple assignment expressions
+var (
+ m1a, m1b = 1, 2
+ m2a /* ERROR "assignment count mismatch" */ , m2b, m2c = 1, 2
+ m3a /* ERROR "assignment count mismatch" */ , m3b = 1, 2, 3
+)
+
+// Declaration of parameters and results
+func f0() {}
+func f1(a /* ERROR "not a type" */) {}
+func f2(a, b, c d /* ERROR "not a type" */) {}
+
+func f3() int {}
+func f4() a /* ERROR "not a type" */ {}
+func f5() (a, b, c d /* ERROR "not a type" */) {}
+
+func f6(a, b, c int) complex128 { return 0 }
+
+// Declaration of receivers
+type T struct{}
+
+func (T) m0() {}
+func (*T) m1() {}
+func (x T) m2() {}
+func (x *T) m3() {}
+
+
+// Initialization functions
+func init() {}
+func /* ERROR "no arguments and no return values" */ init(int) {}
+func /* ERROR "no arguments and no return values" */ init() int { return 0 }
+func /* ERROR "no arguments and no return values" */ init(int) int {}
+func (T) init(int) int { return 0 }
diff --git a/src/pkg/go/types/testdata/decls2a.src b/src/pkg/go/types/testdata/decls2a.src
new file mode 100644
index 000000000..3867be737
--- /dev/null
+++ b/src/pkg/go/types/testdata/decls2a.src
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "time"
+
+// T1 declared before its methods.
+type T1 struct{
+ f int
+}
+
+func (T1) m() {}
+func (T1) m /* ERROR "redeclared" */ () {}
+func (x *T1) f /* ERROR "field and method" */ () {}
+
+// T2's method declared before the type.
+func (*T2) f /* ERROR "field and method" */ () {}
+
+type T2 struct {
+ f int
+}
+
+// Methods declared without a declared type.
+func (undeclared /* ERROR "undeclared" */) m() {}
+func (x *undeclared /* ERROR "undeclared" */) m() {}
+
+// TODO(gri) try to get rid of double error reporting here
+func (pi /* ERROR "not a type" */) m1() {}
+func (x pi /* ERROR "not a type" */) m2() {}
+func (x *pi /* ERROR "not a type" */ ) m3() {} // TODO(gri) not closing the last /* comment crashes the system
+
+// Blank types.
+type _ struct { m int }
+type _ struct { m int }
+
+// TODO(gri) blank idents not fully checked - disabled for now
+// func (_ /* ERROR "cannot use _" */) m() {}
+// func (_ /* ERROR "cannot use _" */) m() {}
+
+// Methods with receiver base type declared in another file.
+func (T3) m1() {}
+func (*T3) m2() {}
+func (x T3) m3() {}
+func (x *T3) f /* ERROR "field and method" */ () {}
+
+// Methods of non-struct type.
+type T4 func()
+
+func (self T4) m() func() { return self }
+
+// Methods associated with an interface.
+type T5 interface {
+ m() int
+}
+
+func (T5 /* ERROR "invalid receiver" */) m1() {}
+func (T5 /* ERROR "invalid receiver" */) m2() {}
+
+// Methods associated with non-local or unnamed types.
+func (int /* ERROR "non-local type" */ ) m() {}
+func ([ /* ERROR "expected" */ ]int) m() {}
+func (time /* ERROR "expected" */ .Time) m() {}
+func (x interface /* ERROR "expected" */ {}) m() {}
diff --git a/src/pkg/go/types/testdata/decls2b.src b/src/pkg/go/types/testdata/decls2b.src
new file mode 100644
index 000000000..c7f9ddf01
--- /dev/null
+++ b/src/pkg/go/types/testdata/decls2b.src
@@ -0,0 +1,28 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+const pi = 3.1415
+
+func (T1) m /* ERROR "redeclared" */ () {}
+
+type T3 struct {
+ f *T3
+}
+
+type T6 struct {
+ x int
+}
+
+func (t *T6) m1() int {
+ return t.x
+}
+
+func f() {
+ var t *T6
+ t.m1()
+} \ No newline at end of file
diff --git a/src/pkg/go/types/testdata/decls3.src b/src/pkg/go/types/testdata/decls3.src
new file mode 100644
index 000000000..6aa9f90e9
--- /dev/null
+++ b/src/pkg/go/types/testdata/decls3.src
@@ -0,0 +1,253 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// embedded types
+
+package decls3
+
+// fields with the same name at the same level cancel each other out
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { X int }
+ T3 struct { T1; T2 } // X is embedded twice at the same level via T1->X, T2->X
+ )
+
+ var t T3
+ _ = t /* ERROR "no single field or method" */ .X
+}
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { T1 }
+ T3 struct { T1 }
+ T4 struct { T2; T3 } // X is embedded twice at the same level via T2->T1->X, T3->T1->X
+ )
+
+ var t T4
+ _ = t /* ERROR "no single field or method" */ .X
+}
+
+func issue4355() {
+ type (
+ T1 struct {X int}
+ T2 struct {T1}
+ T3 struct {T2}
+ T4 struct {T2}
+ T5 struct {T3; T4} // X is embedded twice at the same level via T3->T2->T1->X, T4->T2->T1->X
+ )
+
+ var t T5
+ _ = t /* ERROR "no single field or method" */ .X
+}
+
+// Embedded fields can be predeclared types.
+
+func _() {
+ type T0 struct{
+ int
+ float32
+ f int
+ }
+ var x T0
+ _ = x.int
+ _ = x.float32
+ _ = x.f
+
+ type T1 struct{
+ T0
+ }
+ var y T1
+ _ = y.int
+ _ = y.float32
+ _ = y.f
+}
+
+// Borrowed from the FieldByName test cases in reflect/all_test.go.
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+func _() {
+ _ = struct /* ERROR "no single field or method" */ {}{}.Foo
+ _ = S0{}.A
+ _ = S0 /* ERROR "no single field or method" */ {}.D
+ _ = S1{}.A
+ _ = S1{}.B
+ _ = S1{}.S0
+ _ = S1{}.C
+ _ = S2{}.A
+ _ = S2{}.S1
+ _ = S2{}.B
+ _ = S2{}.C
+ _ = S2 /* ERROR "no single field or method" */ {}.D
+ _ = S3 /* ERROR "no single field or method" */ {}.S1
+ _ = S3{}.A
+ _ = S3 /* ERROR "no single field or method" */ {}.B
+ _ = S3{}.D
+ _ = S3{}.E
+ _ = S4{}.A
+ _ = S4 /* ERROR "no single field or method" */ {}.B
+ _ = S5 /* ERROR "no single field or method" */ {}.X
+ _ = S5{}.Y
+ _ = S10 /* ERROR "no single field or method" */ {}.X
+ _ = S10{}.Y
+}
+
+// Borrowed from the FieldByName benchmark in reflect/all_test.go.
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+var _ = R0 /* ERROR "no single field or method" */ {}.X \ No newline at end of file
diff --git a/src/pkg/go/types/testdata/exports.go b/src/pkg/go/types/testdata/exports.go
new file mode 100644
index 000000000..8ee28b094
--- /dev/null
+++ b/src/pkg/go/types/testdata/exports.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package exports
+
+import (
+ "go/ast"
+)
+
+// Issue 3682: Correctly read dotted identifiers from export data.
+const init1 = 0
+
+func init() {}
+
+const (
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456E+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+)
+
+type (
+ T1 int
+ T2 [10]int
+ T3 []int
+ T4 *int
+ T5 chan int
+ T6a chan<- int
+ T6b chan (<-chan int)
+ T6c chan<- (chan int)
+ T7 <-chan *ast.File
+ T8 struct{}
+ T9 struct {
+ a int
+ b, c float32
+ d []string `go:"tag"`
+ }
+ T10 struct {
+ T8
+ T9
+ _ *T10
+ }
+ T11 map[int]string
+ T12 interface{}
+ T13 interface {
+ m1()
+ m2(int) float32
+ }
+ T14 interface {
+ T12
+ T13
+ m3(x ...struct{}) []T9
+ }
+ T15 func()
+ T16 func(int)
+ T17 func(x int)
+ T18 func() float32
+ T19 func() (x float32)
+ T20 func(...interface{})
+ T21 struct{ next *T21 }
+ T22 struct{ link *T23 }
+ T23 struct{ link *T22 }
+ T24 *T24
+ T25 *T26
+ T26 *T27
+ T27 *T25
+ T28 func(T28) T28
+)
+
+var (
+ V0 int
+ V1 = -991.0
+)
+
+func F1() {}
+func F2(x int) {}
+func F3() int { return 0 }
+func F4() float32 { return 0 }
+func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
+
+func (p *T1) M1()
diff --git a/src/pkg/go/types/testdata/expr0.src b/src/pkg/go/types/testdata/expr0.src
new file mode 100644
index 000000000..8d057f63c
--- /dev/null
+++ b/src/pkg/go/types/testdata/expr0.src
@@ -0,0 +1,161 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// unary expressions
+
+package expr0
+
+var (
+ // bool
+ b0 = true
+ b1 bool = b0
+ b2 = !true
+ b3 = !b1
+ b4 bool = !true
+ b5 bool = !b4
+ b6 = +b0 /* ERROR "not defined" */
+ b7 = -b0 /* ERROR "not defined" */
+ b8 = ^b0 /* ERROR "not defined" */
+ b9 = *b0 /* ERROR "cannot indirect" */
+ b10 = &true /* ERROR "cannot take address" */
+ b11 = &b0
+ b12 = <-b0 /* ERROR "cannot receive" */
+
+ // int
+ i0 = 1
+ i1 int = i0
+ i2 = +1
+ i3 = +i0
+ i4 int = +1
+ i5 int = +i4
+ i6 = -1
+ i7 = -i0
+ i8 int = -1
+ i9 int = -i4
+ i10 = !i0 /* ERROR "not defined" */
+ i11 = ^1
+ i12 = ^i0
+ i13 int = ^1
+ i14 int = ^i4
+ i15 = *i0 /* ERROR "cannot indirect" */
+ i16 = &i0
+ i17 = *i16
+ i18 = <-i16 /* ERROR "cannot receive" */
+
+ // uint
+ u0 = uint(1)
+ u1 uint = u0
+ u2 = +1
+ u3 = +u0
+ u4 uint = +1
+ u5 uint = +u4
+ u6 = -1
+ u7 = -u0
+ u8 uint = - /* ERROR "overflows" */ 1
+ u9 uint = -u4
+ u10 = !u0 /* ERROR "not defined" */
+ u11 = ^1
+ u12 = ^i0
+ u13 uint = ^ /* ERROR "overflows" */ 1
+ u14 uint = ^u4
+ u15 = *u0 /* ERROR "cannot indirect" */
+ u16 = &u0
+ u17 = *u16
+ u18 = <-u16 /* ERROR "cannot receive" */
+ u19 = ^uint(0)
+
+ // float64
+ f0 = float64(1)
+ f1 float64 = f0
+ f2 = +1
+ f3 = +f0
+ f4 float64 = +1
+ f5 float64 = +f4 /* ERROR not defined */
+ f6 = -1
+ f7 = -f0
+ f8 float64 = -1
+ f9 float64 = -f4
+ f10 = !f0 /* ERROR "not defined" */
+ f11 = ^1
+ f12 = ^i0
+ f13 float64 = ^1
+ f14 float64 = ^f4 /* ERROR "not defined" */
+ f15 = *f0 /* ERROR "cannot indirect" */
+ f16 = &f0
+ f17 = *u16
+ f18 = <-u16 /* ERROR "cannot receive" */
+
+ // complex128
+ c0 = complex128(1)
+ c1 complex128 = c0
+ c2 = +1
+ c3 = +c0
+ c4 complex128 = +1
+ c5 complex128 = +c4 /* ERROR not defined */
+ c6 = -1
+ c7 = -c0
+ c8 complex128 = -1
+ c9 complex128 = -c4
+ c10 = !c0 /* ERROR "not defined" */
+ c11 = ^1
+ c12 = ^i0
+ c13 complex128 = ^1
+ c14 complex128 = ^c4 /* ERROR "not defined" */
+ c15 = *c0 /* ERROR "cannot indirect" */
+ c16 = &c0
+ c17 = *u16
+ c18 = <-u16 /* ERROR "cannot receive" */
+
+ // string
+ s0 = "foo"
+ s1 = +"foo" /* ERROR "not defined" */
+ s2 = -s0 /* ERROR "not defined" */
+ s3 = !s0 /* ERROR "not defined" */
+ s4 = ^s0 /* ERROR "not defined" */
+ s5 = *s4 /* ERROR "cannot indirect" */
+ s6 = &s4
+ s7 = *s6
+ s8 = <-s7 /* ERROR "cannot receive" */
+
+ // channel
+ ch chan int
+ rc <-chan float64
+ sc chan <- string
+ ch0 = +ch /* ERROR "not defined" */
+ ch1 = -ch /* ERROR "not defined" */
+ ch2 = !ch /* ERROR "not defined" */
+ ch3 = ^ch /* ERROR "not defined" */
+ ch4 = *ch /* ERROR "cannot indirect" */
+ ch5 = &ch
+ ch6 = *ch5
+ ch7 = <-ch
+ ch8 = <-rc
+ ch9 = <-sc /* ERROR "cannot receive" */
+)
+
+// address of composite literals
+type T struct{x, y int}
+
+func f() T { return T{} }
+
+var (
+ _ = &T{1, 2}
+ _ = &[...]int{}
+ _ = &[]int{}
+ _ = &[]int{}
+ _ = &map[string]T{}
+ _ = &(T{1, 2})
+ _ = &((((T{1, 2}))))
+ _ = &f /* ERROR "cannot take address" */ ()
+)
+
+// recursive pointer types
+type P *P
+
+var (
+ p1 P = new(P)
+ p2 P = *p1
+ p3 P = &p2
+)
+
diff --git a/src/pkg/go/types/testdata/expr1.src b/src/pkg/go/types/testdata/expr1.src
new file mode 100644
index 000000000..8ef0aed6d
--- /dev/null
+++ b/src/pkg/go/types/testdata/expr1.src
@@ -0,0 +1,7 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// binary expressions
+
+package expr1
diff --git a/src/pkg/go/types/testdata/expr2.src b/src/pkg/go/types/testdata/expr2.src
new file mode 100644
index 000000000..674be4005
--- /dev/null
+++ b/src/pkg/go/types/testdata/expr2.src
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comparisons
+
+package expr2
+
+func _bool() {
+ const t = true == true
+ const f = true == false
+ _ = t /* ERROR "cannot compare" */ < f
+ _ = 0 /* ERROR "cannot convert" */ == t
+ var b bool
+ var x, y float32
+ b = x < y
+ _ = struct{b bool}{x < y}
+}
+
+// corner cases
+var (
+ v0 = nil /* ERROR "cannot compare" */ == nil
+) \ No newline at end of file
diff --git a/src/pkg/go/types/testdata/expr3.src b/src/pkg/go/types/testdata/expr3.src
new file mode 100644
index 000000000..ff17f2eee
--- /dev/null
+++ b/src/pkg/go/types/testdata/expr3.src
@@ -0,0 +1,463 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// various expressions
+
+package expr3
+
+func shifts1() {
+ var (
+ i0 int
+ u0 uint
+ )
+
+ var (
+ v0 = 1<<0
+ v1 = 1<<i0 /* ERROR "must be unsigned" */
+ v2 = 1<<u0
+ v3 = 1<<"foo" /* ERROR "must be unsigned" */
+ v4 = 1<<- /* ERROR "stupid shift" */ 1
+ v5 = 1<<1025 /* ERROR "stupid shift" */
+ v6 = 1 /* ERROR "overflows" */ <<100
+
+ v10 uint = 1 << 0
+ v11 uint = 1 << u0
+ v12 float32 = 1 /* ERROR "must be integer" */ << u0
+ )
+}
+
+func shifts2() {
+ // from the spec
+ var (
+ s uint = 33
+ i = 1<<s // 1 has type int
+ j int32 = 1<<s // 1 has type int32; j == 0
+ k = uint64(1<<s) // 1 has type uint64; k == 1<<33
+ m int = 1.0<<s // 1.0 has type int
+ n = 1.0<<s != 0 // 1.0 has type int; n == false if ints are 32bits in size
+ o = 1<<s == 2<<s // 1 and 2 have type int; o == true if ints are 32bits in size
+ p = 1<<s == 1<<33 // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
+ u = 1.0 /* ERROR "must be integer" */ <<s // illegal: 1.0 has type float64, cannot shift
+ v float32 = 1 /* ERROR "must be integer" */ <<s // illegal: 1 has type float32, cannot shift
+ w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression
+ )
+}
+
+func shifts3(a int16, b float32) {
+ var (
+ s uint = 11
+ u = 1 /* ERROR "must be integer" */ <<s + 1.0
+ v complex128 = 1 /* ERROR "must be integer" */ << s + 1.0 /* ERROR "must be integer" */ << s + 1
+ )
+ x := 1.0 /* ERROR "must be integer" */ <<s + 1
+ shifts3(1.0 << s, 1 /* ERROR "must be integer" */ >> s)
+ // TODO(gri) add more tests (systematically)
+}
+
+func shifts4() {
+ // from src/pkg/compress/lzw/reader.go:90
+ {
+ var d struct {
+ bits uint32
+ width uint
+ }
+ _ = uint16(d.bits & (1<<d.width - 1))
+ }
+
+ // from src/pkg/debug/dwarf/buf.go:116
+ {
+ var ux uint64
+ var bits uint
+ x := int64(ux)
+ if x&(1<<(bits-1)) != 0 {}
+ }
+
+ // from src/pkg/encoding/asn1/asn1.go:160
+ {
+ var bytes []byte
+ if bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {}
+ }
+
+ // from src/pkg/math/big/rat.go:140
+ {
+ var exp int
+ var mantissa uint64
+ shift := uint64(-1022 - (exp - 1)) // [1..53)
+ _ = mantissa & (1<<shift - 1)
+ }
+
+ // from src/pkg/net/interface.go:51
+ {
+ type Flags uint
+ var f Flags
+ var i int
+ if f&(1<<uint(i)) != 0 {}
+ }
+
+ // from src/pkg/runtime/softfloat64.go:234
+ {
+ var gm uint64
+ var shift uint
+ _ = gm & (1<<shift - 1)
+ }
+
+ // from src/pkg/strconv/atof.go:326
+ {
+ var mant uint64
+ var mantbits uint
+ if mant == 2<<mantbits {}
+ }
+
+ // from src/pkg/syscall/route_bsd.go:82
+ {
+ var Addrs int32
+ const rtaRtMask = 1
+ var i uint
+ if Addrs&rtaRtMask&(1<<i) == 0 {}
+ }
+
+ // from src/pkg/text/scanner/scanner.go:540
+ {
+ var s struct { Whitespace uint64 }
+ var ch rune
+ for s.Whitespace&(1<<uint(ch)) != 0 {}
+ }
+}
+
+// TODO(gri) The error messages below depond on adjusting the spec
+// to reflect what gc is doing at the moment (the spec
+// asks for run-time errors at the moment - see issue 4231).
+//
+func indexes() {
+ _ = 1 /* ERROR "cannot index" */ [0]
+ _ = indexes /* ERROR "cannot index" */ [0]
+ _ = ( /* ERROR "cannot slice" */ 12 + 3)[1:2]
+
+ var a [10]int
+ _ = a[true /* ERROR "must be integer" */ ]
+ _ = a["foo" /* ERROR "must be integer" */ ]
+ _ = a[1.1 /* ERROR "must be integer" */ ]
+ _ = a[1.0]
+ _ = a[- /* ERROR "index .* negative" */ 1]
+ _ = a[- /* ERROR "index .* negative" */ 1 :]
+ _ = a[: - /* ERROR "index .* negative" */ 1]
+ var a0 int
+ a0 = a[0]
+ var a1 int32
+ a1 = a /* ERROR "cannot assign" */ [1]
+ _ = a[9]
+ _ = a[10 /* ERROR "index .* out of bounds" */ ]
+ _ = a[1 /* ERROR "stupid index" */ <<100]
+ _ = a[10:]
+ _ = a[:10]
+ _ = a[10:10]
+ _ = a[11 /* ERROR "index .* out of bounds" */ :]
+ _ = a[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = a[: 1 /* ERROR "stupid index" */ <<100]
+
+ pa := &a
+ _ = pa[9]
+ _ = pa[10 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[1 /* ERROR "stupid index" */ <<100]
+ _ = pa[10:]
+ _ = pa[:10]
+ _ = pa[10:10]
+ _ = pa[11 /* ERROR "index .* out of bounds" */ :]
+ _ = pa[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[: 1 /* ERROR "stupid index" */ <<100]
+
+ var b [0]int
+ _ = b[0 /* ERROR "index .* out of bounds" */ ]
+ _ = b[:]
+ _ = b[0:]
+ _ = b[:0]
+ _ = b[0:0]
+
+ var s []int
+ _ = s[- /* ERROR "index .* negative" */ 1]
+ _ = s[- /* ERROR "index .* negative" */ 1 :]
+ _ = s[: - /* ERROR "index .* negative" */ 1]
+ _ = s[0]
+ _ = s[1 : 2]
+ _ = s[2 /* ERROR "inverted slice range" */ : 1]
+ _ = s[2 :]
+ _ = s[: 1 /* ERROR "stupid index" */ <<100]
+ _ = s[1 /* ERROR "stupid index" */ <<100 :]
+ _ = s[1 /* ERROR "stupid index" */ <<100 : 1 /* ERROR "stupid index" */ <<100]
+
+ var t string
+ _ = t[- /* ERROR "index .* negative" */ 1]
+ _ = t[- /* ERROR "index .* negative" */ 1 :]
+ _ = t[: - /* ERROR "index .* negative" */ 1]
+ var t0 byte
+ t0 = t[0]
+ var t1 rune
+ t1 = t /* ERROR "cannot assign" */ [2]
+ _ = ("foo" + "bar")[5]
+ _ = ("foo" + "bar")[6 /* ERROR "index .* out of bounds" */ ]
+
+ const c = "foo"
+ _ = c[- /* ERROR "index .* negative" */ 1]
+ _ = c[- /* ERROR "index .* negative" */ 1 :]
+ _ = c[: - /* ERROR "index .* negative" */ 1]
+ var c0 byte
+ c0 = c[0]
+ var c2 float32
+ c2 = c /* ERROR "cannot assign" */ [2]
+ _ = c[3 /* ERROR "index .* out of bounds" */ ]
+ _ = ""[0 /* ERROR "index .* out of bounds" */ ]
+
+ _ = s[1<<30] // no compile-time error here
+
+ // issue 4913
+ type mystring string
+ var ss string
+ var ms mystring
+ var i, j int
+ ss = "foo"[1:2]
+ ss = "foo"[i:j]
+ ms = "foo" /* ERROR "cannot assign" */ [1:2]
+ ms = "foo" /* ERROR "cannot assign" */ [i:j]
+}
+
+type T struct {
+ x int
+}
+
+func (*T) m() {}
+
+func method_expressions() {
+ _ = T /* ERROR "no single field or method" */ .a
+ _ = T /* ERROR "has no method" */ .x
+ _ = T.m
+ var f func(*T) = (*T).m
+ var g func(*T) = ( /* ERROR "cannot assign" */ T).m
+}
+
+func struct_literals() {
+ type T0 struct {
+ a, b, c int
+ }
+
+ type T1 struct {
+ T0
+ a, b int
+ u float64
+ s string
+ }
+
+ // keyed elements
+ _ = T1{}
+ _ = T1{a: 0, 1 /* ERROR "mixture of .* elements" */ }
+ _ = T1{aa /* ERROR "unknown field" */ : 0}
+ _ = T1{1 /* ERROR "invalid field name" */ : 0}
+ _ = T1{a: 0, s: "foo", u: 0, a /* ERROR "duplicate field" */: 10}
+ _ = T1{a: "foo" /* ERROR "cannot use" */ }
+ _ = T1{c /* ERROR "unknown field" */ : 0}
+ _ = T1{T0: { /* ERROR "missing type" */ }}
+ _ = T1{T0: T0{}}
+ _ = T1{T0 /* ERROR "invalid field name" */ .a: 0}
+
+ // unkeyed elements
+ _ = T0{1, 2, 3}
+ _ = T0{1, b /* ERROR "mixture" */ : 2, 3}
+ _ = T0{1, 2} /* ERROR "too few values" */
+ _ = T0{1, 2, 3, 4 /* ERROR "too many values" */ }
+ _ = T0{1, "foo" /* ERROR "cannot use" */, 3.4 /* ERROR "cannot use" */}
+}
+
+func array_literals() {
+ type A0 [0]int
+ _ = A0{}
+ _ = A0{0 /* ERROR "index .* out of bounds" */}
+ _ = A0{0 /* ERROR "index .* out of bounds" */ : 0}
+
+ type A1 [10]int
+ _ = A1{}
+ _ = A1{0, 1, 2}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{- /* ERROR "index .* negative" */ 1: 0}
+ _ = A1{8: 8, 9}
+ _ = A1{8: 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{10 /* ERROR "index .* out of bounds" */ : 10, 10 /* ERROR "index .* out of bounds" */ : 10}
+ _ = A1{5: 5, 6, 7, 3: 3, 1 /* ERROR "stupid index" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{5: 5, 6, 7, 4: 4, 1 /* ERROR "stupid index" */ <<100: 4}
+ _ = A1{2.0}
+ _ = A1{2.1 /* ERROR "cannot use" */ }
+ _ = A1{"foo" /* ERROR "cannot use" */ }
+
+ a0 := [...]int{}
+ assert(len(a0) == 0)
+
+ a1 := [...]int{0, 1, 2}
+ assert(len(a1) == 3)
+ var a13 [3]int
+ var a14 [4]int
+ a13 = a1
+ a14 = a1 /* ERROR "cannot assign" */
+
+ a2 := [...]int{- /* ERROR "index .* negative" */ 1: 0}
+
+ a3 := [...]int{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ assert(len(a3) == 5) // somewhat arbitrary
+
+ a4 := [...]complex128{0, 1, 2, 1<<10-2: -1i, 1i, 400: 10, 12, 14}
+ assert(len(a4) == 1024)
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = [...]Point{Point{1.5, -3.5}, Point{0, 0}}
+ _ = [...]Point{{1.5, -3.5}, {0, 0}}
+ _ = [][]int{[]int{1, 2, 3}, []int{4, 5}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+ _ = [...]*Point{&Point{1.5, -3.5}, &Point{0, 0}}
+ _ = [...]*Point{{1.5, -3.5}, {0, 0}}
+}
+
+func slice_literals() {
+ type S0 []int
+ _ = S0{}
+ _ = S0{0, 1, 2}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ _ = S0{- /* ERROR "index .* negative" */ 1: 0}
+ _ = S0{8: 8, 9}
+ _ = S0{8: 8, 9, 10}
+ _ = S0{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{10: 10, 10 /* ERROR "duplicate index" */ : 10}
+ _ = S0{5: 5, 6, 7, 3: 3, 1 /* ERROR "stupid index" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{5: 5, 6, 7, 4: 4, 1 /* ERROR "stupid index" */ <<100: 4}
+ _ = S0{2.0}
+ _ = S0{2.1 /* ERROR "cannot use" */ }
+ _ = S0{"foo" /* ERROR "cannot use" */ }
+
+ // indices must be resolved correctly
+ // (for details, see comment in go/parser/parser.go, method parseElement)
+ index1 := 1
+ _ = S0{index1: 1}
+ _ = S0{index2: 2}
+ _ = S0{index3 /* ERROR "undeclared name" */ : 3}
+}
+
+var index2 int = 2
+
+func map_literals() {
+ type M0 map[string]int
+ type M1 map[bool]int
+ type M2 map[*int]int
+
+ _ = M0{}
+ _ = M0{1 /* ERROR "missing key" */ }
+ _ = M0{1 /* ERROR "cannot use .* as string key" */ : 2}
+ _ = M0{"foo": "bar" /* ERROR "cannot use .* as int value" */ }
+ _ = M0{"foo": 1, "bar": 2, "foo" /* ERROR "duplicate key" */ : 3 }
+
+ // map keys must be resolved correctly
+ // (for details, see comment in go/parser/parser.go, method parseElement)
+ key1 := "foo"
+ _ = M0{key1: 1}
+ _ = M0{key2: 2}
+ _ = M0{key3 /* ERROR "undeclared name" */ : 2}
+
+ _ = M1{true: 1, false: 0}
+ _ = M2{nil: 0, &index2: 1}
+}
+
+var key2 string = "bar"
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T1 struct{}
+type T2 struct{}
+
+func (T2) m(int) {}
+
+func type_asserts() {
+ var x int
+ _ = x /* ERROR "not an interface" */ .(int)
+
+ var e interface{}
+ var ok bool
+ x, ok = e.(int)
+
+ var t I
+ _ = t /* ERROR "use of .* outside type switch" */ .(type)
+ _ = t.(T)
+ _ = t.(T1 /* ERROR "missing method m" */ )
+ _ = t.(T2 /* ERROR "wrong type for method m" */ )
+ _ = t.(I2 /* ERROR "wrong type for method m" */ )
+}
+
+func f0() {}
+func f1(x int) {}
+func f2(u float32, s string) {}
+func fs(s []byte) {}
+func fv(x ...int) {}
+func fi(x ... interface{}) {}
+
+func g0() {}
+func g1() int { return 0}
+func g2() (u float32, s string) { return }
+func gs() []byte { return nil }
+
+func _calls() {
+ var x int
+ var y float32
+ var s []int
+
+ f0()
+ _ = f0 /* ERROR "used as value" */ ()
+ f0(g0 /* ERROR "too many arguments" */ )
+
+ f1(0)
+ f1(x)
+ f1(10.0)
+ f1 /* ERROR "too few arguments" */ ()
+ f1(x, y /* ERROR "too many arguments" */ )
+ f1(s /* ERROR "cannot assign" */ )
+ f1(x ... /* ERROR "cannot use ..." */ )
+ f1(g0 /* ERROR "used as value" */ ())
+ f1(g1())
+ // f1(g2()) // TODO(gri) missing position in error message
+
+ f2 /* ERROR "too few arguments" */ ()
+ f2 /* ERROR "too few arguments" */ (3.14)
+ f2(3.14, "foo")
+ f2(x /* ERROR "cannot assign" */ , "foo")
+ f2(g0 /* ERROR "used as value" */ ())
+ f2 /* ERROR "too few arguments" */ (g1 /* ERROR "cannot assign" */ ())
+ f2(g2())
+
+ fs /* ERROR "too few arguments" */ ()
+ fs(g0 /* ERROR "used as value" */ ())
+ fs(g1 /* ERROR "cannot assign" */ ())
+ // fs(g2()) // TODO(gri) missing position in error message
+ fs(gs())
+
+ fv()
+ fv(1, 2.0, x)
+ fv(s /* ERROR "cannot assign" */ )
+ fv(s...)
+ fv(1, s /* ERROR "can only use ... with matching parameter" */ ...)
+ fv(gs /* ERROR "cannot assign" */ ())
+ fv(gs /* ERROR "cannot assign" */ ()...)
+
+ fi()
+ fi(1, 2.0, x, 3.14, "foo")
+ fi(g2())
+ fi(0, g2)
+ fi(0, g2 /* ERROR "2-valued expression" */ ())
+}
diff --git a/src/pkg/go/types/testdata/stmt0.src b/src/pkg/go/types/testdata/stmt0.src
new file mode 100644
index 000000000..9d85de3bb
--- /dev/null
+++ b/src/pkg/go/types/testdata/stmt0.src
@@ -0,0 +1,288 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// statements
+
+package stmt0
+
+func _() {
+ b, i, f, c, s := false, 1, 1.0, 1i, "foo"
+ b = i /* ERROR "cannot assign" */
+ i = f /* ERROR "cannot assign" */
+ f = c /* ERROR "cannot assign" */
+ c = s /* ERROR "cannot assign" */
+ s = b /* ERROR "cannot assign" */
+
+ v0 /* ERROR "mismatch" */, v1, v2 := 1, 2, 3, 4
+
+ b = true
+
+ i += 1
+ i += "foo" /* ERROR "cannot convert.*int" */
+
+ f -= 1
+ f -= "foo" /* ERROR "cannot convert.*float64" */
+
+ c *= 1
+ c /= 0 /* ERROR "division by zero" */
+
+ s += "bar"
+ s += 1 /* ERROR "cannot convert.*string" */
+
+ var u64 uint64
+ u64 += 1<<u64
+
+ undeclared /* ERROR "undeclared" */ = 991
+}
+
+func incdecs() {
+ const c = 3.14
+ c /* ERROR "cannot assign" */ ++
+ s := "foo"
+ s /* ERROR "cannot convert" */ --
+ 3.14 /* ERROR "cannot assign" */ ++
+ var (
+ x int
+ y float32
+ z complex128
+ )
+ x++
+ y--
+ z++
+}
+
+func sends() {
+ var ch chan int
+ var rch <-chan int
+ var x int
+ x /* ERROR "cannot send" */ <- x
+ rch /* ERROR "cannot send" */ <- x
+ ch <- "foo" /* ERROR "cannot convert" */
+ ch <- x
+}
+
+func selects() {
+ select {}
+ var (
+ ch chan int
+ sc chan <- bool
+ x int
+ )
+ select {
+ case <-ch:
+ ch <- x
+ case t, ok := <-ch:
+ x = t
+ case <-sc /* ERROR "cannot receive from send-only channel" */ :
+ }
+ select {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+}
+
+func gos() {
+ go 1 /* ERROR "expected function/method call" */
+ go gos()
+ var c chan int
+ go close(c)
+ go len(c) // TODO(gri) this should not be legal
+}
+
+func defers() {
+ defer 1 /* ERROR "expected function/method call" */
+ defer defers()
+ var c chan int
+ defer close(c)
+ defer len(c) // TODO(gri) this should not be legal
+}
+
+func switches() {
+ var x int
+
+ switch x {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch {
+ case 1 /* ERROR "cannot convert" */ :
+ }
+
+ switch int32(x) {
+ case 1, 2:
+ case x /* ERROR "cannot compare" */ :
+ }
+
+ switch x {
+ case 1 /* ERROR "overflows int" */ << 100:
+ }
+
+ switch x {
+ case 1:
+ case 1 /* ERROR "duplicate case" */ :
+ case 2, 3, 4:
+ case 1 /* ERROR "duplicate case" */ :
+ }
+
+ // TODO(gri) duplicate 64bit values that don't fit into an int64 are not yet detected
+ switch uint64(x) {
+ case 1<<64-1:
+ case 1<<64-1:
+ }
+}
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T struct{}
+type T1 struct{}
+type T2 struct{}
+
+func (T) m() {}
+func (T2) m(int) {}
+
+func typeswitches() {
+ var i int
+ var x interface{}
+
+ switch x.(type) {}
+ switch (x /* ERROR "outside type switch" */ .(type)) {}
+
+ switch x.(type) {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch x := x.(type) {}
+
+ switch x := x.(type) {
+ case int:
+ var y int = x
+ }
+
+ switch x := i /* ERROR "not an interface" */ .(type) {}
+
+ switch t := x.(type) {
+ case nil:
+ var v bool = t /* ERROR "cannot assign" */
+ case int:
+ var v int = t
+ case float32, complex64:
+ var v float32 = t /* ERROR "cannot assign" */
+ default:
+ var v float32 = t /* ERROR "cannot assign" */
+ }
+
+ var t I
+ switch t.(type) {
+ case T:
+ case T1 /* ERROR "missing method m" */ :
+ case T2 /* ERROR "wrong type for method m" */ :
+ case I2 /* ERROR "wrong type for method m" */ :
+ }
+}
+
+func typeswitch0() {
+ switch y := interface{}(nil).(type) {
+ case int:
+ // TODO(gri) y has the wrong type here (type-checking
+ // of captured variable is delayed)
+ // func() int { return y + 0 }()
+ }
+}
+
+func rangeloops() {
+ var (
+ x int
+ a [10]float32
+ b []string
+ p *[10]complex128
+ pp **[10]complex128
+ s string
+ m map[int]bool
+ c chan int
+ sc chan<- int
+ rc <-chan int
+ )
+
+ for _ = range x /* ERROR "cannot range over" */ {}
+ for i := range x /* ERROR "cannot range over" */ {}
+
+ for i := range a {
+ var ii int
+ ii = i
+ }
+ for i, x := range a {
+ var ii int
+ ii = i
+ var xx float64
+ xx = x /* ERROR "cannot assign" */
+ }
+ var ii int
+ var xx float32
+ for ii, xx := range a {}
+
+ for i := range b {
+ var ii int
+ ii = i
+ }
+ for i, x := range b {
+ var ii int
+ ii = i
+ var xx string
+ xx = x
+ }
+
+ for i := range s {
+ var ii int
+ ii = i
+ }
+ for i, x := range s {
+ var ii int
+ ii = i
+ var xx rune
+ xx = x
+ }
+
+ for _, x := range p {
+ var xx complex128
+ xx = x
+ }
+
+ for _, x := range pp /* ERROR "cannot range over" */ {}
+
+ for k := range m {
+ var kk int32
+ kk = k /* ERROR "cannot assign" */
+ }
+ for k, v := range m {
+ var kk int
+ kk = k
+ if v {}
+ }
+
+ for _, _ /* ERROR "only one iteration variable" */ = range c {}
+ for e := range c {
+ var ee int
+ ee = e
+ }
+ for _ = range sc /* ERROR "cannot range over send-only channel" */ {}
+ for _ = range rc {}
+
+ // constant strings
+ const cs = "foo"
+ for i, x := range cs {}
+ for i, x := range "" {
+ var ii int
+ ii = i
+ var xx rune
+ xx = x
+ }
+}
diff --git a/src/pkg/go/types/types.go b/src/pkg/go/types/types.go
new file mode 100644
index 000000000..2f2e579bd
--- /dev/null
+++ b/src/pkg/go/types/types.go
@@ -0,0 +1,236 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "go/ast"
+
+// All types implement the Type interface.
+type Type interface {
+ String() string
+ aType()
+}
+
+// BasicKind describes the kind of basic type.
+type BasicKind int
+
+const (
+ Invalid BasicKind = iota // type is invalid
+
+ // predeclared types
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ String
+ UnsafePointer
+
+ // types for untyped values
+ UntypedBool
+ UntypedInt
+ UntypedRune
+ UntypedFloat
+ UntypedComplex
+ UntypedString
+ UntypedNil
+
+ // aliases
+ Byte = Uint8
+ Rune = Int32
+)
+
+// BasicInfo is a set of flags describing properties of a basic type.
+type BasicInfo int
+
+// Properties of basic types.
+const (
+ IsBoolean BasicInfo = 1 << iota
+ IsInteger
+ IsUnsigned
+ IsFloat
+ IsComplex
+ IsString
+ IsUntyped
+
+ IsOrdered = IsInteger | IsFloat | IsString
+ IsNumeric = IsInteger | IsFloat | IsComplex
+ IsConstType = IsBoolean | IsNumeric | IsString
+)
+
+// A Basic represents a basic type.
+type Basic struct {
+ Kind BasicKind
+ Info BasicInfo
+ size int64 // use DefaultSizeof to get size
+ Name string
+}
+
+// An Array represents an array type [Len]Elt.
+type Array struct {
+ Len int64
+ Elt Type
+}
+
+// A Slice represents a slice type []Elt.
+type Slice struct {
+ Elt Type
+}
+
+// A QualifiedName is a name qualified with the package that declared the name.
+// Note: Pkg may be a fake package (no name, no scope) because the GC compiler's
+// export information doesn't provide full information in some cases.
+// TODO(gri): Should change Pkg to PkgPath since it's the only thing we care about.
+type QualifiedName struct {
+ Pkg *Package // nil only for predeclared error.Error (exported)
+ Name string // unqualified type name for anonymous fields
+}
+
+// IsSame reports whether p and q are the same.
+func (p QualifiedName) IsSame(q QualifiedName) bool {
+ // spec:
+ // "Two identifiers are different if they are spelled differently,
+ // or if they appear in different packages and are not exported.
+ // Otherwise, they are the same."
+ if p.Name != q.Name {
+ return false
+ }
+ // p.Name == q.Name
+ return ast.IsExported(p.Name) || p.Pkg.Path == q.Pkg.Path
+}
+
+// A Field represents a field of a struct.
+type Field struct {
+ QualifiedName
+ Type Type
+ Tag string
+ IsAnonymous bool
+}
+
+// A Struct represents a struct type struct{...}.
+type Struct struct {
+ Fields []*Field
+ offsets []int64 // field offsets in bytes, lazily computed
+}
+
+func (typ *Struct) fieldIndex(name QualifiedName) int {
+ for i, f := range typ.Fields {
+ if f.QualifiedName.IsSame(name) {
+ return i
+ }
+ }
+ return -1
+}
+
+// A Pointer represents a pointer type *Base.
+type Pointer struct {
+ Base Type
+}
+
+// A Result represents a (multi-value) function call result.
+type Result struct {
+ Values []*Var // Signature.Results of the function called
+}
+
+// A Signature represents a user-defined function type func(...) (...).
+type Signature struct {
+ Recv *Var // nil if not a method
+ Params []*Var // (incoming) parameters from left to right; or nil
+ Results []*Var // (outgoing) results from left to right; or nil
+ IsVariadic bool // true if the last parameter's type is of the form ...T
+}
+
+// builtinId is an id of a builtin function.
+type builtinId int
+
+// Predeclared builtin functions.
+const (
+ // Universe scope
+ _Append builtinId = iota
+ _Cap
+ _Close
+ _Complex
+ _Copy
+ _Delete
+ _Imag
+ _Len
+ _Make
+ _New
+ _Panic
+ _Print
+ _Println
+ _Real
+ _Recover
+
+ // Unsafe package
+ _Alignof
+ _Offsetof
+ _Sizeof
+
+ // Testing support
+ _Assert
+ _Trace
+)
+
+// A builtin represents the type of a built-in function.
+type builtin struct {
+ id builtinId
+ name string
+ nargs int // number of arguments (minimum if variadic)
+ isVariadic bool
+ isStatement bool // true if the built-in is valid as an expression statement
+}
+
+// A Method represents a method.
+type Method struct {
+ QualifiedName
+ Type *Signature
+}
+
+// An Interface represents an interface type interface{...}.
+type Interface struct {
+ Methods []*Method // TODO(gri) consider keeping them in sorted order
+}
+
+// A Map represents a map type map[Key]Elt.
+type Map struct {
+ Key, Elt Type
+}
+
+// A Chan represents a channel type chan Elt, <-chan Elt, or chan<-Elt.
+type Chan struct {
+ Dir ast.ChanDir
+ Elt Type
+}
+
+// A NamedType represents a named type as declared in a type declaration.
+type NamedType struct {
+ Obj *TypeName // corresponding declared object
+ Underlying Type // nil if not fully declared yet; never a *NamedType
+ Methods []*Method // TODO(gri) consider keeping them in sorted order
+}
+
+func (*Basic) aType() {}
+func (*Array) aType() {}
+func (*Slice) aType() {}
+func (*Struct) aType() {}
+func (*Pointer) aType() {}
+func (*Result) aType() {}
+func (*Signature) aType() {}
+func (*builtin) aType() {}
+func (*Interface) aType() {}
+func (*Map) aType() {}
+func (*Chan) aType() {}
+func (*NamedType) aType() {}
diff --git a/src/pkg/go/types/types_test.go b/src/pkg/go/types/types_test.go
new file mode 100644
index 000000000..8e228fa67
--- /dev/null
+++ b/src/pkg/go/types/types_test.go
@@ -0,0 +1,171 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests verifying the types associated with an AST after
+// type checking.
+
+package types
+
+import (
+ "go/ast"
+ "go/parser"
+ "testing"
+)
+
+const filename = "<src>"
+
+func makePkg(t *testing.T, src string) (*Package, error) {
+ file, err := parser.ParseFile(fset, filename, src, parser.DeclarationErrors)
+ if err != nil {
+ return nil, err
+ }
+ pkg, err := Check(fset, []*ast.File{file})
+ return pkg, err
+}
+
+type testEntry struct {
+ src, str string
+}
+
+// dup returns a testEntry where both src and str are the same.
+func dup(s string) testEntry {
+ return testEntry{s, s}
+}
+
+var testTypes = []testEntry{
+ // basic types
+ dup("int"),
+ dup("float32"),
+ dup("string"),
+
+ // arrays
+ dup("[10]int"),
+
+ // slices
+ dup("[]int"),
+ dup("[][]int"),
+
+ // structs
+ dup("struct{}"),
+ dup("struct{x int}"),
+ {`struct {
+ x, y int
+ z float32 "foo"
+ }`, `struct{x int; y int; z float32 "foo"}`},
+ {`struct {
+ string
+ elems []T
+ }`, `struct{string; elems []T}`},
+
+ // pointers
+ dup("*int"),
+ dup("***struct{}"),
+ dup("*struct{a int; b float32}"),
+
+ // functions
+ dup("func()"),
+ dup("func(x int)"),
+ {"func(x, y int)", "func(x int, y int)"},
+ {"func(x, y int, z string)", "func(x int, y int, z string)"},
+ dup("func(int)"),
+ {"func(int, string, byte)", "func(int, string, byte)"},
+
+ dup("func() int"),
+ {"func() (string)", "func() string"},
+ dup("func() (u int)"),
+ {"func() (u, v int, w string)", "func() (u int, v int, w string)"},
+
+ dup("func(int) string"),
+ dup("func(x int) string"),
+ dup("func(x int) (u string)"),
+ {"func(x, y int) (u string)", "func(x int, y int) (u string)"},
+
+ dup("func(...int) string"),
+ dup("func(x ...int) string"),
+ dup("func(x ...int) (u string)"),
+ {"func(x, y ...int) (u string)", "func(x int, y ...int) (u string)"},
+
+ // interfaces
+ dup("interface{}"),
+ dup("interface{m()}"),
+ dup(`interface{m(int) float32; String() string}`),
+ // TODO(gri) add test for interface w/ anonymous field
+
+ // maps
+ dup("map[string]int"),
+ {"map[struct{x, y int}][]byte", "map[struct{x int; y int}][]byte"},
+
+ // channels
+ dup("chan int"),
+ dup("chan<- func()"),
+ dup("<-chan []func() int"),
+}
+
+func TestTypes(t *testing.T) {
+ for _, test := range testTypes {
+ src := "package p; type T " + test.src
+ pkg, err := makePkg(t, src)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+ typ := underlying(pkg.Scope.Lookup("T").GetType())
+ str := typeString(typ)
+ if str != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, str, test.str)
+ }
+ }
+}
+
+var testExprs = []testEntry{
+ // basic type literals
+ dup("x"),
+ dup("true"),
+ dup("42"),
+ dup("3.1415"),
+ dup("2.71828i"),
+ dup(`'a'`),
+ dup(`"foo"`),
+ dup("`bar`"),
+
+ // arbitrary expressions
+ dup("&x"),
+ dup("*&x"),
+ dup("(x)"),
+ dup("x + y"),
+ dup("x + y * 10"),
+ dup("t.foo"),
+ dup("s[0]"),
+ dup("s[x:y]"),
+ dup("s[:y]"),
+ dup("s[x:]"),
+ dup("s[:]"),
+ dup("f(1, 2.3)"),
+ dup("-f(10, 20)"),
+ dup("f(x + y, +3.1415)"),
+ {"func(a, b int) {}", "(func literal)"},
+ {"func(a, b int) []int {}(1, 2)[x]", "(func literal)(1, 2)[x]"},
+ {"[]int{1, 2, 3}", "(composite literal)"},
+ {"[]int{1, 2, 3}[x:]", "(composite literal)[x:]"},
+ {"i.([]string)", "i.(...)"},
+}
+
+func TestExprs(t *testing.T) {
+ for _, test := range testExprs {
+ src := "package p; var _ = " + test.src + "; var (x, y int; s []string; f func(int, float32) int; i interface{}; t interface { foo() })"
+ file, err := parser.ParseFile(fset, filename, src, parser.DeclarationErrors)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+ // TODO(gri) writing the code below w/o the decl variable will
+ // cause a 386 compiler error (out of fixed registers)
+ decl := file.Decls[0].(*ast.GenDecl)
+ expr := decl.Specs[0].(*ast.ValueSpec).Values[0]
+ str := exprString(expr)
+ if str != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, str, test.str)
+ }
+ }
+}
diff --git a/src/pkg/go/types/universe.go b/src/pkg/go/types/universe.go
new file mode 100644
index 000000000..b218525c1
--- /dev/null
+++ b/src/pkg/go/types/universe.go
@@ -0,0 +1,146 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the universe and unsafe package scopes.
+
+package types
+
+import (
+ "go/ast"
+ "strings"
+)
+
+var (
+ Universe *Scope
+ Unsafe *Package
+ universeIota *Const
+)
+
+// Predeclared types, indexed by BasicKind.
+var Typ = [...]*Basic{
+ Invalid: {Invalid, 0, 0, "invalid type"},
+
+ Bool: {Bool, IsBoolean, 1, "bool"},
+ Int: {Int, IsInteger, 0, "int"},
+ Int8: {Int8, IsInteger, 1, "int8"},
+ Int16: {Int16, IsInteger, 2, "int16"},
+ Int32: {Int32, IsInteger, 4, "int32"},
+ Int64: {Int64, IsInteger, 8, "int64"},
+ Uint: {Uint, IsInteger | IsUnsigned, 0, "uint"},
+ Uint8: {Uint8, IsInteger | IsUnsigned, 1, "uint8"},
+ Uint16: {Uint16, IsInteger | IsUnsigned, 2, "uint16"},
+ Uint32: {Uint32, IsInteger | IsUnsigned, 4, "uint32"},
+ Uint64: {Uint64, IsInteger | IsUnsigned, 8, "uint64"},
+ Uintptr: {Uintptr, IsInteger | IsUnsigned, 0, "uintptr"},
+ Float32: {Float32, IsFloat, 4, "float32"},
+ Float64: {Float64, IsFloat, 8, "float64"},
+ Complex64: {Complex64, IsComplex, 8, "complex64"},
+ Complex128: {Complex128, IsComplex, 16, "complex128"},
+ String: {String, IsString, 0, "string"},
+ UnsafePointer: {UnsafePointer, 0, 0, "Pointer"},
+
+ UntypedBool: {UntypedBool, IsBoolean | IsUntyped, 0, "untyped boolean"},
+ UntypedInt: {UntypedInt, IsInteger | IsUntyped, 0, "untyped integer"},
+ UntypedRune: {UntypedRune, IsInteger | IsUntyped, 0, "untyped rune"},
+ UntypedFloat: {UntypedFloat, IsFloat | IsUntyped, 0, "untyped float"},
+ UntypedComplex: {UntypedComplex, IsComplex | IsUntyped, 0, "untyped complex"},
+ UntypedString: {UntypedString, IsString | IsUntyped, 0, "untyped string"},
+ UntypedNil: {UntypedNil, IsUntyped, 0, "untyped nil"},
+}
+
+var aliases = [...]*Basic{
+ {Byte, IsInteger | IsUnsigned, 1, "byte"},
+ {Rune, IsInteger, 4, "rune"},
+}
+
+var predeclaredConstants = [...]*Const{
+ {nil, "true", Typ[UntypedBool], true, nil},
+ {nil, "false", Typ[UntypedBool], false, nil},
+ {nil, "iota", Typ[UntypedInt], zeroConst, nil},
+ {nil, "nil", Typ[UntypedNil], nilConst, nil},
+}
+
+var predeclaredFunctions = [...]*builtin{
+ {_Append, "append", 1, true, false},
+ {_Cap, "cap", 1, false, false},
+ {_Close, "close", 1, false, true},
+ {_Complex, "complex", 2, false, false},
+ {_Copy, "copy", 2, false, true},
+ {_Delete, "delete", 2, false, true},
+ {_Imag, "imag", 1, false, false},
+ {_Len, "len", 1, false, false},
+ {_Make, "make", 1, true, false},
+ {_New, "new", 1, false, false},
+ {_Panic, "panic", 1, false, true},
+ {_Print, "print", 0, true, true},
+ {_Println, "println", 0, true, true},
+ {_Real, "real", 1, false, false},
+ {_Recover, "recover", 0, false, true},
+
+ {_Alignof, "Alignof", 1, false, false},
+ {_Offsetof, "Offsetof", 1, false, false},
+ {_Sizeof, "Sizeof", 1, false, false},
+}
+
+func init() {
+ Universe = new(Scope)
+ Unsafe = &Package{Name: "unsafe", Scope: new(Scope)}
+
+ // predeclared types
+ for _, t := range Typ {
+ def(&TypeName{Name: t.Name, Type: t})
+ }
+ for _, t := range aliases {
+ def(&TypeName{Name: t.Name, Type: t})
+ }
+
+ // error type
+ {
+ // Error has a nil package in its qualified name since it is in no package
+ err := &Method{QualifiedName{nil, "Error"}, &Signature{Results: []*Var{{Name: "", Type: Typ[String]}}}}
+ def(&TypeName{Name: "error", Type: &NamedType{Underlying: &Interface{Methods: []*Method{err}}}})
+ }
+
+ for _, c := range predeclaredConstants {
+ def(c)
+ }
+
+ for _, f := range predeclaredFunctions {
+ def(&Func{Name: f.name, Type: f})
+ }
+
+ universeIota = Universe.Lookup("iota").(*Const)
+}
+
+// Objects with names containing blanks are internal and not entered into
+// a scope. Objects with exported names are inserted in the unsafe package
+// scope; other objects are inserted in the universe scope.
+//
+func def(obj Object) {
+ name := obj.GetName()
+ if strings.Index(name, " ") >= 0 {
+ return // nothing to do
+ }
+ // fix Obj link for named types
+ if typ, ok := obj.GetType().(*NamedType); ok {
+ typ.Obj = obj.(*TypeName)
+ }
+ // exported identifiers go into package unsafe
+ scope := Universe
+ if ast.IsExported(name) {
+ scope = Unsafe.Scope
+ // set Pkg field
+ switch obj := obj.(type) {
+ case *TypeName:
+ obj.Pkg = Unsafe
+ case *Func:
+ obj.Pkg = Unsafe
+ default:
+ unreachable()
+ }
+ }
+ if scope.Insert(obj) != nil {
+ panic("internal error: double declaration")
+ }
+}