summaryrefslogtreecommitdiff
path: root/src/pkg/go
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/go')
-rw-r--r--src/pkg/go/ast/Makefile1
-rw-r--r--src/pkg/go/ast/ast.go58
-rw-r--r--src/pkg/go/ast/filter.go6
-rw-r--r--src/pkg/go/ast/print.go94
-rw-r--r--src/pkg/go/ast/print_test.go80
-rw-r--r--src/pkg/go/ast/resolve.go188
-rw-r--r--src/pkg/go/ast/scope.go268
-rw-r--r--src/pkg/go/ast/walk.go8
-rw-r--r--src/pkg/go/doc/comment.go2
-rw-r--r--src/pkg/go/doc/doc.go8
-rw-r--r--src/pkg/go/parser/interface.go7
-rw-r--r--src/pkg/go/parser/parser.go696
-rw-r--r--src/pkg/go/parser/parser_test.go3
-rw-r--r--src/pkg/go/printer/nodes.go114
-rw-r--r--src/pkg/go/printer/printer.go196
-rw-r--r--src/pkg/go/printer/printer_test.go37
-rw-r--r--src/pkg/go/printer/testdata/declarations.golden77
-rw-r--r--src/pkg/go/printer/testdata/declarations.input29
-rw-r--r--src/pkg/go/printer/testdata/expressions.golden34
-rw-r--r--src/pkg/go/printer/testdata/expressions.input20
-rw-r--r--src/pkg/go/printer/testdata/expressions.raw37
-rw-r--r--src/pkg/go/printer/testdata/slow.golden85
-rw-r--r--src/pkg/go/printer/testdata/slow.input85
-rw-r--r--src/pkg/go/scanner/scanner.go26
-rw-r--r--src/pkg/go/scanner/scanner_test.go60
-rw-r--r--src/pkg/go/token/token.go15
-rw-r--r--src/pkg/go/typechecker/Makefile1
-rw-r--r--src/pkg/go/typechecker/scope.go72
-rw-r--r--src/pkg/go/typechecker/testdata/test0.src (renamed from src/pkg/go/typechecker/testdata/test0.go)0
-rw-r--r--src/pkg/go/typechecker/testdata/test1.src (renamed from src/pkg/go/typechecker/testdata/test1.go)2
-rw-r--r--src/pkg/go/typechecker/testdata/test3.src (renamed from src/pkg/go/typechecker/testdata/test3.go)7
-rw-r--r--src/pkg/go/typechecker/testdata/test4.src (renamed from src/pkg/go/typechecker/testdata/test4.go)2
-rw-r--r--src/pkg/go/typechecker/type.go125
-rw-r--r--src/pkg/go/typechecker/typechecker.go82
-rw-r--r--src/pkg/go/typechecker/typechecker_test.go4
-rw-r--r--src/pkg/go/typechecker/universe.go6
-rw-r--r--src/pkg/go/types/Makefile15
-rw-r--r--src/pkg/go/types/const.go347
-rw-r--r--src/pkg/go/types/exportdata.go135
-rw-r--r--src/pkg/go/types/gcimporter.go786
-rw-r--r--src/pkg/go/types/gcimporter_test.go111
-rw-r--r--src/pkg/go/types/testdata/exports.go89
-rw-r--r--src/pkg/go/types/types.go122
-rw-r--r--src/pkg/go/types/universe.go113
44 files changed, 3396 insertions, 857 deletions
diff --git a/src/pkg/go/ast/Makefile b/src/pkg/go/ast/Makefile
index e9b885c70..40be10208 100644
--- a/src/pkg/go/ast/Makefile
+++ b/src/pkg/go/ast/Makefile
@@ -9,6 +9,7 @@ GOFILES=\
ast.go\
filter.go\
print.go\
+ resolve.go\
scope.go\
walk.go\
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go
index abafb5663..ed3e2cdd9 100644
--- a/src/pkg/go/ast/ast.go
+++ b/src/pkg/go/ast/ast.go
@@ -66,7 +66,7 @@ type Decl interface {
// A Comment node represents a single //-style or /*-style comment.
type Comment struct {
Slash token.Pos // position of "/" starting the comment
- Text []byte // comment text (excluding '\n' for //-style comments)
+ Text string // comment text (excluding '\n' for //-style comments)
}
@@ -199,7 +199,7 @@ type (
BasicLit struct {
ValuePos token.Pos // literal position
Kind token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING
- Value []byte // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
+ Value string // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
}
// A FuncLit node represents a function literal.
@@ -602,12 +602,12 @@ type (
Else Stmt // else branch; or nil
}
- // A CaseClause represents a case of an expression switch statement.
+ // A CaseClause represents a case of an expression or type switch statement.
CaseClause struct {
- Case token.Pos // position of "case" or "default" keyword
- Values []Expr // nil means default case
- Colon token.Pos // position of ":"
- Body []Stmt // statement list; or nil
+ Case token.Pos // position of "case" or "default" keyword
+ List []Expr // list of expressions or types; nil means default case
+ Colon token.Pos // position of ":"
+ Body []Stmt // statement list; or nil
}
// A SwitchStmt node represents an expression switch statement.
@@ -618,20 +618,12 @@ type (
Body *BlockStmt // CaseClauses only
}
- // A TypeCaseClause represents a case of a type switch statement.
- TypeCaseClause struct {
- Case token.Pos // position of "case" or "default" keyword
- Types []Expr // nil means default case
- Colon token.Pos // position of ":"
- Body []Stmt // statement list; or nil
- }
-
// An TypeSwitchStmt node represents a type switch statement.
TypeSwitchStmt struct {
Switch token.Pos // position of "switch" keyword
Init Stmt // initalization statement; or nil
- Assign Stmt // x := y.(type)
- Body *BlockStmt // TypeCaseClauses only
+ Assign Stmt // x := y.(type) or y.(type)
+ Body *BlockStmt // CaseClauses only
}
// A CommClause node represents a case of a select statement.
@@ -687,7 +679,6 @@ func (s *BlockStmt) Pos() token.Pos { return s.Lbrace }
func (s *IfStmt) Pos() token.Pos { return s.If }
func (s *CaseClause) Pos() token.Pos { return s.Case }
func (s *SwitchStmt) Pos() token.Pos { return s.Switch }
-func (s *TypeCaseClause) Pos() token.Pos { return s.Case }
func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch }
func (s *CommClause) Pos() token.Pos { return s.Case }
func (s *SelectStmt) Pos() token.Pos { return s.Select }
@@ -734,13 +725,7 @@ func (s *CaseClause) End() token.Pos {
}
return s.Colon + 1
}
-func (s *SwitchStmt) End() token.Pos { return s.Body.End() }
-func (s *TypeCaseClause) End() token.Pos {
- if n := len(s.Body); n > 0 {
- return s.Body[n-1].End()
- }
- return s.Colon + 1
-}
+func (s *SwitchStmt) End() token.Pos { return s.Body.End() }
func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() }
func (s *CommClause) End() token.Pos {
if n := len(s.Body); n > 0 {
@@ -772,7 +757,6 @@ func (s *BlockStmt) stmtNode() {}
func (s *IfStmt) stmtNode() {}
func (s *CaseClause) stmtNode() {}
func (s *SwitchStmt) stmtNode() {}
-func (s *TypeCaseClause) stmtNode() {}
func (s *TypeSwitchStmt) stmtNode() {}
func (s *CommClause) stmtNode() {}
func (s *SelectStmt) stmtNode() {}
@@ -797,7 +781,7 @@ type (
ImportSpec struct {
Doc *CommentGroup // associated documentation; or nil
Name *Ident // local package name (including "."); or nil
- Path *BasicLit // package path
+ Path *BasicLit // import path
Comment *CommentGroup // line comments; or nil
}
@@ -937,11 +921,14 @@ func (d *FuncDecl) declNode() {}
// via Doc and Comment fields.
//
type File struct {
- Doc *CommentGroup // associated documentation; or nil
- Package token.Pos // position of "package" keyword
- Name *Ident // package name
- Decls []Decl // top-level declarations; or nil
- Comments []*CommentGroup // list of all comments in the source file
+ Doc *CommentGroup // associated documentation; or nil
+ Package token.Pos // position of "package" keyword
+ Name *Ident // package name
+ Decls []Decl // top-level declarations; or nil
+ Scope *Scope // package scope (this file only)
+ Imports []*ImportSpec // imports in this file
+ Unresolved []*Ident // unresolved identifiers in this file
+ Comments []*CommentGroup // list of all comments in the source file
}
@@ -958,9 +945,10 @@ func (f *File) End() token.Pos {
// collectively building a Go package.
//
type Package struct {
- Name string // package name
- Scope *Scope // package scope; or nil
- Files map[string]*File // Go source files by filename
+ Name string // package name
+ Scope *Scope // package scope
+ Imports map[string]*Scope // map of import path -> package scope across all files
+ Files map[string]*File // Go source files by filename
}
diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go
index 0c3cef4b2..090d08d34 100644
--- a/src/pkg/go/ast/filter.go
+++ b/src/pkg/go/ast/filter.go
@@ -304,7 +304,7 @@ const (
// separator is an empty //-style comment that is interspersed between
// different comment groups when they are concatenated into a single group
//
-var separator = &Comment{noPos, []byte("//")}
+var separator = &Comment{noPos, "//"}
// MergePackageFiles creates a file AST by merging the ASTs of the
@@ -425,5 +425,7 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File {
}
}
- return &File{doc, pos, NewIdent(pkg.Name), decls, comments}
+ // TODO(gri) need to compute pkgScope and unresolved identifiers!
+ // TODO(gri) need to compute imports!
+ return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, nil, comments}
}
diff --git a/src/pkg/go/ast/print.go b/src/pkg/go/ast/print.go
index d71490d4a..e6d4e838d 100644
--- a/src/pkg/go/ast/print.go
+++ b/src/pkg/go/ast/print.go
@@ -21,24 +21,29 @@ type FieldFilter func(name string, value reflect.Value) bool
// NotNilFilter returns true for field values that are not nil;
// it returns false otherwise.
-func NotNilFilter(_ string, value reflect.Value) bool {
- v, ok := value.(interface {
- IsNil() bool
- })
- return !ok || !v.IsNil()
+func NotNilFilter(_ string, v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return !v.IsNil()
+ }
+ return true
}
// Fprint prints the (sub-)tree starting at AST node x to w.
+// If fset != nil, position information is interpreted relative
+// to that file set. Otherwise positions are printed as integer
+// values (file set specific offsets).
//
// A non-nil FieldFilter f may be provided to control the output:
// struct fields for which f(fieldname, fieldvalue) is true are
// are printed; all others are filtered from the output.
//
-func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) {
+func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err os.Error) {
// setup printer
p := printer{
output: w,
+ fset: fset,
filter: f,
ptrmap: make(map[interface{}]int),
last: '\n', // force printing of line number on first line
@@ -65,16 +70,17 @@ func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) {
// Print prints x to standard output, skipping nil fields.
-// Print(x) is the same as Fprint(os.Stdout, x, NotNilFilter).
-func Print(x interface{}) (int, os.Error) {
- return Fprint(os.Stdout, x, NotNilFilter)
+// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
+func Print(fset *token.FileSet, x interface{}) (int, os.Error) {
+ return Fprint(os.Stdout, fset, x, NotNilFilter)
}
type printer struct {
output io.Writer
+ fset *token.FileSet
filter FieldFilter
- ptrmap map[interface{}]int // *reflect.PtrValue -> line number
+ ptrmap map[interface{}]int // *T -> line number
written int // number of bytes written to output
indent int // current indentation level
last byte // the last byte processed by Write
@@ -135,73 +141,69 @@ func (p *printer) printf(format string, args ...interface{}) {
// Implementation note: Print is written for AST nodes but could be
// used to print arbitrary data structures; such a version should
// probably be in a different package.
+//
+// Note: This code detects (some) cycles created via pointers but
+// not cycles that are created via slices or maps containing the
+// same slice or map. Code for general data structures probably
+// should catch those as well.
func (p *printer) print(x reflect.Value) {
- // Note: This test is only needed because AST nodes
- // embed a token.Position, and thus all of them
- // understand the String() method (but it only
- // applies to the Position field).
- // TODO: Should reconsider this AST design decision.
- if pos, ok := x.Interface().(token.Position); ok {
- p.printf("%s", pos)
- return
- }
-
if !NotNilFilter("", x) {
p.printf("nil")
return
}
- switch v := x.(type) {
- case *reflect.InterfaceValue:
- p.print(v.Elem())
+ switch x.Kind() {
+ case reflect.Interface:
+ p.print(x.Elem())
- case *reflect.MapValue:
- p.printf("%s (len = %d) {\n", x.Type().String(), v.Len())
+ case reflect.Map:
+ p.printf("%s (len = %d) {\n", x.Type().String(), x.Len())
p.indent++
- for _, key := range v.Keys() {
+ for _, key := range x.MapKeys() {
p.print(key)
p.printf(": ")
- p.print(v.Elem(key))
+ p.print(x.MapIndex(key))
+ p.printf("\n")
}
p.indent--
p.printf("}")
- case *reflect.PtrValue:
+ case reflect.Ptr:
p.printf("*")
// type-checked ASTs may contain cycles - use ptrmap
// to keep track of objects that have been printed
// already and print the respective line number instead
- ptr := v.Interface()
+ ptr := x.Interface()
if line, exists := p.ptrmap[ptr]; exists {
p.printf("(obj @ %d)", line)
} else {
p.ptrmap[ptr] = p.line
- p.print(v.Elem())
+ p.print(x.Elem())
}
- case *reflect.SliceValue:
- if s, ok := v.Interface().([]byte); ok {
+ case reflect.Slice:
+ if s, ok := x.Interface().([]byte); ok {
p.printf("%#q", s)
return
}
- p.printf("%s (len = %d) {\n", x.Type().String(), v.Len())
+ p.printf("%s (len = %d) {\n", x.Type().String(), x.Len())
p.indent++
- for i, n := 0, v.Len(); i < n; i++ {
+ for i, n := 0, x.Len(); i < n; i++ {
p.printf("%d: ", i)
- p.print(v.Elem(i))
+ p.print(x.Index(i))
p.printf("\n")
}
p.indent--
p.printf("}")
- case *reflect.StructValue:
+ case reflect.Struct:
p.printf("%s {\n", x.Type().String())
p.indent++
- t := v.Type().(*reflect.StructType)
+ t := x.Type()
for i, n := 0, t.NumField(); i < n; i++ {
name := t.Field(i).Name
- value := v.Field(i)
+ value := x.Field(i)
if p.filter == nil || p.filter(name, value) {
p.printf("%s: ", name)
p.print(value)
@@ -212,6 +214,20 @@ func (p *printer) print(x reflect.Value) {
p.printf("}")
default:
- p.printf("%v", x.Interface())
+ v := x.Interface()
+ switch v := v.(type) {
+ case string:
+ // print strings in quotes
+ p.printf("%q", v)
+ return
+ case token.Pos:
+ // position values can be printed nicely if we have a file set
+ if p.fset != nil {
+ p.printf("%s", p.fset.Position(v))
+ return
+ }
+ }
+ // default
+ p.printf("%v", v)
}
}
diff --git a/src/pkg/go/ast/print_test.go b/src/pkg/go/ast/print_test.go
new file mode 100644
index 000000000..0820dcfce
--- /dev/null
+++ b/src/pkg/go/ast/print_test.go
@@ -0,0 +1,80 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+
+var tests = []struct {
+ x interface{} // x is printed as s
+ s string
+}{
+ // basic types
+ {nil, "0 nil"},
+ {true, "0 true"},
+ {42, "0 42"},
+ {3.14, "0 3.14"},
+ {1 + 2.718i, "0 (1+2.718i)"},
+ {"foobar", "0 \"foobar\""},
+
+ // maps
+ {map[string]int{"a": 1, "b": 2},
+ `0 map[string] int (len = 2) {
+ 1 . "a": 1
+ 2 . "b": 2
+ 3 }`},
+
+ // pointers
+ {new(int), "0 *0"},
+
+ // slices
+ {[]int{1, 2, 3},
+ `0 []int (len = 3) {
+ 1 . 0: 1
+ 2 . 1: 2
+ 3 . 2: 3
+ 4 }`},
+
+ // structs
+ {struct{ x, y int }{42, 991},
+ `0 struct { x int; y int } {
+ 1 . x: 42
+ 2 . y: 991
+ 3 }`},
+}
+
+
+// Split s into lines, trim whitespace from all lines, and return
+// the concatenated non-empty lines.
+func trim(s string) string {
+ lines := strings.Split(s, "\n", -1)
+ i := 0
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line != "" {
+ lines[i] = line
+ i++
+ }
+ }
+ return strings.Join(lines[0:i], "\n")
+}
+
+
+func TestPrint(t *testing.T) {
+ var buf bytes.Buffer
+ for _, test := range tests {
+ buf.Reset()
+ if _, err := Fprint(&buf, nil, test.x, nil); err != nil {
+ t.Errorf("Fprint failed: %s", err)
+ }
+ if s, ts := trim(buf.String()), trim(test.s); s != ts {
+ t.Errorf("got:\n%s\nexpected:\n%s\n", s, ts)
+ }
+ }
+}
diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go
new file mode 100644
index 000000000..fddc3baab
--- /dev/null
+++ b/src/pkg/go/ast/resolve.go
@@ -0,0 +1,188 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements NewPackage.
+
+package ast
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "os"
+)
+
+
+type pkgBuilder struct {
+ scanner.ErrorVector
+ fset *token.FileSet
+}
+
+
+func (p *pkgBuilder) error(pos token.Pos, msg string) {
+ p.Error(p.fset.Position(pos), msg)
+}
+
+
+func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
+ p.error(pos, fmt.Sprintf(format, args...))
+}
+
+
+func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
+ alt := scope.Insert(obj)
+ if alt == nil && altScope != nil {
+ // see if there is a conflicting declaration in altScope
+ alt = altScope.Lookup(obj.Name)
+ }
+ if alt != nil {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.fset.Position(pos))
+ }
+ p.error(obj.Pos(), fmt.Sprintf("%s redeclared in this block%s", obj.Name, prevDecl))
+ }
+}
+
+
+func resolve(scope *Scope, ident *Ident) bool {
+ for ; scope != nil; scope = scope.Outer {
+ if obj := scope.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return true
+ }
+ }
+ return false
+}
+
+
+// NewPackage uses an Importer to resolve imports. Given an importPath,
+// an importer returns the imported package's name, its scope of exported
+// objects, and an error, if any.
+//
+type Importer func(path string) (name string, scope *Scope, err os.Error)
+
+
+// NewPackage creates a new Package node from a set of File nodes. It resolves
+// unresolved identifiers across files and updates each file's Unresolved list
+// accordingly. If a non-nil importer and universe scope are provided, they are
+// used to resolve identifiers not declared in any of the package files. Any
+// remaining unresolved identifiers are reported as undeclared. If the files
+// belong to different packages, one package name is selected and files with
+// different package name are reported and then ignored.
+// The result is a package node and a scanner.ErrorList if there were errors.
+//
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, os.Error) {
+ var p pkgBuilder
+ p.fset = fset
+
+ // complete package scope
+ pkgName := ""
+ pkgScope := NewScope(universe)
+ for _, file := range files {
+ // package names must match
+ switch name := file.Name.Name; {
+ case pkgName == "":
+ pkgName = name
+ case name != pkgName:
+ p.errorf(file.Package, "package %s; expected %s", name, pkgName)
+ continue // ignore this file
+ }
+
+ // collect top-level file objects in package scope
+ for _, obj := range file.Scope.Objects {
+ p.declare(pkgScope, nil, obj)
+ }
+ }
+
+ // imports maps import paths to package names and scopes
+ // TODO(gri): Eventually we like to get to the import scope from
+ // a package object. Then we can have a map path -> Obj.
+ type importedPkg struct {
+ name string
+ scope *Scope
+ }
+ imports := make(map[string]*importedPkg)
+
+ // complete file scopes with imports and resolve identifiers
+ for _, file := range files {
+ // ignore file if it belongs to a different package
+ // (error has already been reported)
+ if file.Name.Name != pkgName {
+ continue
+ }
+
+ // build file scope by processing all imports
+ importErrors := false
+ fileScope := NewScope(pkgScope)
+ for _, spec := range file.Imports {
+ // add import to global map of imports
+ path := string(spec.Path.Value)
+ path = path[1 : len(path)-1] // strip ""'s
+ pkg := imports[path]
+ if pkg == nil {
+ if importer == nil {
+ importErrors = true
+ continue
+ }
+ name, scope, err := importer(path)
+ if err != nil {
+ p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+ importErrors = true
+ continue
+ }
+ pkg = &importedPkg{name, scope}
+ imports[path] = pkg
+ // TODO(gri) If a local package name != "." is provided,
+ // global identifier resolution could proceed even if the
+ // import failed. Consider adjusting the logic here a bit.
+ }
+ // local name overrides imported package name
+ name := pkg.name
+ if spec.Name != nil {
+ name = spec.Name.Name
+ }
+ // add import to file scope
+ if name == "." {
+ // merge imported scope with file scope
+ for _, obj := range pkg.scope.Objects {
+ p.declare(fileScope, pkgScope, obj)
+ }
+ } else {
+ // declare imported package object in file scope
+ obj := NewObj(Pkg, name)
+ obj.Decl = spec
+ p.declare(fileScope, pkgScope, obj)
+ }
+ }
+
+ // resolve identifiers
+ if importErrors {
+ // don't use the universe scope without correct imports
+ // (objects in the universe may be shadowed by imports;
+ // with missing imports identifiers might get resolved
+ // wrongly)
+ pkgScope.Outer = nil
+ }
+ i := 0
+ for _, ident := range file.Unresolved {
+ if !resolve(fileScope, ident) {
+ p.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+ file.Unresolved[i] = ident
+ i++
+ }
+
+ }
+ file.Unresolved = file.Unresolved[0:i]
+ pkgScope.Outer = universe // reset universe scope
+ }
+
+ // collect all import paths and respective package scopes
+ importedScopes := make(map[string]*Scope)
+ for path, pkg := range imports {
+ importedScopes[path] = pkg.scope
+ }
+
+ return &Package{pkgName, pkgScope, importedScopes, files}, p.GetError(scanner.Sorted)
+}
diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go
index 956a208ae..830d88aef 100644
--- a/src/pkg/go/ast/scope.go
+++ b/src/pkg/go/ast/scope.go
@@ -2,31 +2,31 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file implements scopes, the objects they contain,
-// and object types.
+// This file implements scopes and the objects they contain.
package ast
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+)
+
+
// A Scope maintains the set of named language entities declared
// in the scope and a link to the immediately surrounding (outer)
// scope.
//
type Scope struct {
Outer *Scope
- Objects []*Object // in declaration order
- // Implementation note: In some cases (struct fields,
- // function parameters) we need the source order of
- // variables. Thus for now, we store scope entries
- // in a linear list. If scopes become very large
- // (say, for packages), we may need to change this
- // to avoid slow lookups.
+ Objects map[string]*Object
}
// NewScope creates a new scope nested in the outer scope.
func NewScope(outer *Scope) *Scope {
- const n = 4 // initial scope capacity, must be > 0
- return &Scope{outer, make([]*Object, 0, n)}
+ const n = 4 // initial scope capacity
+ return &Scope{outer, make(map[string]*Object, n)}
}
@@ -34,73 +34,111 @@ func NewScope(outer *Scope) *Scope {
// found in scope s, otherwise it returns nil. Outer scopes
// are ignored.
//
-// Lookup always returns nil if name is "_", even if the scope
-// contains objects with that name.
-//
func (s *Scope) Lookup(name string) *Object {
- if name != "_" {
- for _, obj := range s.Objects {
- if obj.Name == name {
- return obj
- }
- }
- }
- return nil
+ return s.Objects[name]
}
-// Insert attempts to insert a named object into the scope s.
-// If the scope does not contain an object with that name yet
-// or if the object is named "_", Insert inserts the object
-// and returns it. Otherwise, Insert leaves the scope unchanged
-// and returns the object found in the scope instead.
+// Insert attempts to insert a named object obj into the scope s.
+// If the scope already contains an object alt with the same name,
+// Insert leaves the scope unchanged and returns alt. Otherwise
+// it inserts obj and returns nil."
//
-func (s *Scope) Insert(obj *Object) *Object {
- alt := s.Lookup(obj.Name)
- if alt == nil {
- s.append(obj)
- alt = obj
+func (s *Scope) Insert(obj *Object) (alt *Object) {
+ if alt = s.Objects[obj.Name]; alt == nil {
+ s.Objects[obj.Name] = obj
}
- return alt
+ return
}
-func (s *Scope) append(obj *Object) {
- s.Objects = append(s.Objects, obj)
+// Debugging support
+func (s *Scope) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "scope %p {", s)
+ if s != nil && len(s.Objects) > 0 {
+ fmt.Fprintln(&buf)
+ for _, obj := range s.Objects {
+ fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name)
+ }
+ }
+ fmt.Fprintf(&buf, "}\n")
+ return buf.String()
}
+
// ----------------------------------------------------------------------------
// Objects
-// An Object describes a language entity such as a package,
-// constant, type, variable, or function (incl. methods).
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
//
type Object struct {
- Kind Kind
- Name string // declared name
- Type *Type
- Decl interface{} // corresponding Field, XxxSpec or FuncDecl
- N int // value of iota for this declaration
+ Kind ObjKind
+ Name string // declared name
+ Decl interface{} // corresponding Field, XxxSpec, FuncDecl, or LabeledStmt; or nil
+ Type interface{} // place holder for type information; may be nil
}
// NewObj creates a new object of a given kind and name.
-func NewObj(kind Kind, name string) *Object {
+func NewObj(kind ObjKind, name string) *Object {
return &Object{Kind: kind, Name: name}
}
-// Kind describes what an object represents.
-type Kind int
+// Pos computes the source position of the declaration of an object name.
+// The result may be an invalid position if it cannot be computed
+// (obj.Decl may be nil or not correct).
+func (obj *Object) Pos() token.Pos {
+ name := obj.Name
+ switch d := obj.Decl.(type) {
+ case *Field:
+ for _, n := range d.Names {
+ if n.Name == name {
+ return n.Pos()
+ }
+ }
+ case *ImportSpec:
+ if d.Name != nil && d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ return d.Path.Pos()
+ case *ValueSpec:
+ for _, n := range d.Names {
+ if n.Name == name {
+ return n.Pos()
+ }
+ }
+ case *TypeSpec:
+ if d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ case *FuncDecl:
+ if d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ case *LabeledStmt:
+ if d.Label.Name == name {
+ return d.Label.Pos()
+ }
+ }
+ return token.NoPos
+}
+
+
+// ObKind describes what an object represents.
+type ObjKind int
// The list of possible Object kinds.
const (
- Bad Kind = iota // for error handling
- Pkg // package
- Con // constant
- Typ // type
- Var // variable
- Fun // function or method
+ Bad ObjKind = iota // for error handling
+ Pkg // package
+ Con // constant
+ Typ // type
+ Var // variable
+ Fun // function or method
+ Lbl // label
)
@@ -111,132 +149,8 @@ var objKindStrings = [...]string{
Typ: "type",
Var: "var",
Fun: "func",
+ Lbl: "label",
}
-func (kind Kind) String() string { return objKindStrings[kind] }
-
-
-// IsExported returns whether obj is exported.
-func (obj *Object) IsExported() bool { return IsExported(obj.Name) }
-
-
-// ----------------------------------------------------------------------------
-// Types
-
-// A Type represents a Go type.
-type Type struct {
- Form Form
- Obj *Object // corresponding type name, or nil
- Scope *Scope // fields and methods, always present
- N uint // basic type id, array length, number of function results, or channel direction
- Key, Elt *Type // map key and array, pointer, slice, map or channel element
- Params *Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil
- Expr Expr // corresponding AST expression
-}
-
-
-// NewType creates a new type of a given form.
-func NewType(form Form) *Type {
- return &Type{Form: form, Scope: NewScope(nil)}
-}
-
-
-// Form describes the form of a type.
-type Form int
-
-// The list of possible type forms.
-const (
- BadType Form = iota // for error handling
- Unresolved // type not fully setup
- Basic
- Array
- Struct
- Pointer
- Function
- Method
- Interface
- Slice
- Map
- Channel
- Tuple
-)
-
-
-var formStrings = [...]string{
- BadType: "badType",
- Unresolved: "unresolved",
- Basic: "basic",
- Array: "array",
- Struct: "struct",
- Pointer: "pointer",
- Function: "function",
- Method: "method",
- Interface: "interface",
- Slice: "slice",
- Map: "map",
- Channel: "channel",
- Tuple: "tuple",
-}
-
-
-func (form Form) String() string { return formStrings[form] }
-
-
-// The list of basic type id's.
-const (
- Bool = iota
- Byte
- Uint
- Int
- Float
- Complex
- Uintptr
- String
-
- Uint8
- Uint16
- Uint32
- Uint64
-
- Int8
- Int16
- Int32
- Int64
-
- Float32
- Float64
-
- Complex64
- Complex128
-
- // TODO(gri) ideal types are missing
-)
-
-
-var BasicTypes = map[uint]string{
- Bool: "bool",
- Byte: "byte",
- Uint: "uint",
- Int: "int",
- Float: "float",
- Complex: "complex",
- Uintptr: "uintptr",
- String: "string",
-
- Uint8: "uint8",
- Uint16: "uint16",
- Uint32: "uint32",
- Uint64: "uint64",
-
- Int8: "int8",
- Int16: "int16",
- Int32: "int32",
- Int64: "int64",
-
- Float32: "float32",
- Float64: "float64",
-
- Complex64: "complex64",
- Complex128: "complex128",
-}
+func (kind ObjKind) String() string { return objKindStrings[kind] }
diff --git a/src/pkg/go/ast/walk.go b/src/pkg/go/ast/walk.go
index 20c337c3b..95c4b3a35 100644
--- a/src/pkg/go/ast/walk.go
+++ b/src/pkg/go/ast/walk.go
@@ -234,7 +234,7 @@ func Walk(v Visitor, node Node) {
}
case *CaseClause:
- walkExprList(v, n.Values)
+ walkExprList(v, n.List)
walkStmtList(v, n.Body)
case *SwitchStmt:
@@ -246,12 +246,6 @@ func Walk(v Visitor, node Node) {
}
Walk(v, n.Body)
- case *TypeCaseClause:
- for _, x := range n.Types {
- Walk(v, x)
- }
- walkStmtList(v, n.Body)
-
case *TypeSwitchStmt:
if n.Init != nil {
Walk(v, n.Init)
diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go
index 9ff0bd536..f1ebfa97b 100644
--- a/src/pkg/go/doc/comment.go
+++ b/src/pkg/go/doc/comment.go
@@ -286,7 +286,7 @@ func unindent(block [][]byte) {
// nor to have trailing spaces at the end of lines.
// The comment markers have already been removed.
//
-// Turn each run of multiple \n into </p><p>
+// Turn each run of multiple \n into </p><p>.
// Turn each run of indented lines into a <pre> block without indent.
//
// URLs in the comment text are converted into links; if the URL also appears
diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go
index e46857cb8..e7a8d3f63 100644
--- a/src/pkg/go/doc/doc.go
+++ b/src/pkg/go/doc/doc.go
@@ -66,7 +66,7 @@ func (doc *docReader) addDoc(comments *ast.CommentGroup) {
n2 := len(comments.List)
list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line
copy(list, doc.doc.List)
- list[n1] = &ast.Comment{token.NoPos, []byte("//")} // separator line
+ list[n1] = &ast.Comment{token.NoPos, "//"} // separator line
copy(list[n1+1:], comments.List)
doc.doc = &ast.CommentGroup{list}
}
@@ -105,7 +105,7 @@ func baseTypeName(typ ast.Expr) string {
// if the type is not exported, the effect to
// a client is as if there were no type name
if t.IsExported() {
- return string(t.Name)
+ return t.Name
}
case *ast.StarExpr:
return baseTypeName(t.X)
@@ -300,9 +300,9 @@ func (doc *docReader) addFile(src *ast.File) {
// collect BUG(...) comments
for _, c := range src.Comments {
text := c.List[0].Text
- if m := bug_markers.FindIndex(text); m != nil {
+ if m := bug_markers.FindStringIndex(text); m != nil {
// found a BUG comment; maybe empty
- if btxt := text[m[1]:]; bug_content.Match(btxt) {
+ if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
// non-empty BUG comment; collect comment without BUG prefix
list := copyCommentList(c.List)
list[0].Text = text[m[1]:]
diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go
index 6f35b495e..b4780e057 100644
--- a/src/pkg/go/parser/interface.go
+++ b/src/pkg/go/parser/interface.go
@@ -69,7 +69,7 @@ func ParseExpr(fset *token.FileSet, filename string, src interface{}) (ast.Expr,
var p parser
p.init(fset, filename, data, 0)
- x := p.parseExpr()
+ x := p.parseRhs()
if p.tok == token.SEMICOLON {
p.next() // consume automatically inserted semicolon, if any
}
@@ -159,7 +159,8 @@ func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[st
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
- pkg = &ast.Package{name, nil, make(map[string]*ast.File)}
+ // TODO(gri) Use NewPackage here; reconsider ParseFiles API.
+ pkg = &ast.Package{name, nil, nil, make(map[string]*ast.File)}
pkgs[name] = pkg
}
pkg.Files[filename] = src
@@ -182,7 +183,7 @@ func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[st
// error are returned.
//
func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool, mode uint) (map[string]*ast.Package, os.Error) {
- fd, err := os.Open(path, os.O_RDONLY, 0)
+ fd, err := os.Open(path)
if err != nil {
return nil, err
}
diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go
index 7c5843f36..84a0da6ae 100644
--- a/src/pkg/go/parser/parser.go
+++ b/src/pkg/go/parser/parser.go
@@ -17,10 +17,6 @@ import (
)
-// noPos is used when there is no corresponding source position for a token.
-var noPos token.Position
-
-
// The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
@@ -30,6 +26,7 @@ const (
ImportsOnly // parsing stops after import declarations
ParseComments // parse comments and add them to AST
Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
)
@@ -46,16 +43,27 @@ type parser struct {
// Comments
comments []*ast.CommentGroup
- leadComment *ast.CommentGroup // the last lead comment
- lineComment *ast.CommentGroup // the last line comment
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
- lit []byte // token literal
+ lit string // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
+
+ // Ordinary identifer scopes
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
+ // Label scope
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+ targetStack [][]*ast.Ident // stack of unresolved labels
}
@@ -72,9 +80,133 @@ func scannerMode(mode uint) uint {
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scannerMode(mode))
+
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
p.next()
+
+ // set up the pkgScope here (as opposed to in parseFile) because
+ // there are other parser entry points (ParseExpr, etc.)
+ p.openScope()
+ p.pkgScope = p.topScope
+
+ // for the same reason, set up a label scope
+ p.openLabelScope()
+}
+
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+ p.topScope = ast.NewScope(p.topScope)
+}
+
+
+func (p *parser) closeScope() {
+ p.topScope = p.topScope.Outer
+}
+
+
+func (p *parser) openLabelScope() {
+ p.labelScope = ast.NewScope(p.labelScope)
+ p.targetStack = append(p.targetStack, nil)
+}
+
+
+func (p *parser) closeLabelScope() {
+ // resolve labels
+ n := len(p.targetStack) - 1
+ scope := p.labelScope
+ for _, ident := range p.targetStack[n] {
+ ident.Obj = scope.Lookup(ident.Name)
+ if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+ p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+ }
+ }
+ // pop label scope
+ p.targetStack = p.targetStack[0:n]
+ p.labelScope = p.labelScope.Outer
+}
+
+
+func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+ }
+ p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ ident.Obj = obj
+ }
+ }
+}
+
+
+func (p *parser) shortVarDecl(idents []*ast.Ident) {
+ // Go spec: A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block with
+ // the same type, and at least one of the non-blank variables is new.
+ n := 0 // number of new variables
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // short var declarations cannot have redeclaration errors
+ // and are not global => no need to remember the respective
+ // declaration
+ alt := p.topScope.Insert(obj)
+ if alt == nil {
+ n++ // new declaration
+ alt = obj
+ }
+ ident.Obj = alt
+ }
+ }
+ if n == 0 && p.mode&DeclarationErrors != 0 {
+ p.error(idents[0].Pos(), "no new variables on left side of :=")
+ }
+}
+
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+
+func (p *parser) resolve(x ast.Expr) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name == "_" {
+ return
+ }
+ // try to resolve the identifier
+ for s := p.topScope; s != nil; s = s.Outer {
+ if obj := s.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return
+ }
+ }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
}
@@ -120,7 +252,7 @@ func (p *parser) next0() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
- p.printTrace(s, string(p.lit))
+ p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
@@ -137,8 +269,9 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
- for _, b := range p.lit {
- if b == '\n' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
endline++
}
}
@@ -199,7 +332,7 @@ func (p *parser) next() {
var endline int
if p.file.Line(p.pos) == line {
- // The comment is on same line as previous token; it
+ // The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup()
if p.file.Line(p.pos) != endline {
@@ -239,7 +372,7 @@ func (p *parser) errorExpected(pos token.Pos, msg string) {
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
- msg += " " + string(p.lit)
+ msg += " " + p.lit
}
}
}
@@ -264,6 +397,13 @@ func (p *parser) expectSemi() {
}
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Identifiers
@@ -271,7 +411,7 @@ func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
- name = string(p.lit)
+ name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
@@ -298,21 +438,51 @@ func (p *parser) parseIdentList() (list []*ast.Ident) {
// ----------------------------------------------------------------------------
// Common productions
-func (p *parser) parseExprList() (list []ast.Expr) {
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
- list = append(list, p.parseExpr())
+ list = append(list, p.parseExpr(lhs))
for p.tok == token.COMMA {
p.next()
- list = append(list, p.parseExpr())
+ list = append(list, p.parseExpr(lhs))
}
return
}
+func (p *parser) parseLhsList() []ast.Expr {
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ p.shortVarDecl(p.makeIdentList(list))
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ return list
+}
+
+
+func (p *parser) parseRhsList() []ast.Expr {
+ return p.parseExprList(false)
+}
+
+
// ----------------------------------------------------------------------------
// Types
@@ -334,28 +504,24 @@ func (p *parser) parseType() ast.Expr {
}
-func (p *parser) parseQualifiedIdent() ast.Expr {
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
if p.trace {
- defer un(trace(p, "QualifiedIdent"))
+ defer un(trace(p, "TypeName"))
}
- var x ast.Expr = p.parseIdent()
+ ident := p.parseIdent()
+ // don't resolve ident yet - it may be a parameter or field name
+
if p.tok == token.PERIOD {
- // first identifier is a package identifier
+ // ident is a package name
p.next()
+ p.resolve(ident)
sel := p.parseIdent()
- x = &ast.SelectorExpr{x, sel}
- }
- return x
-}
-
-
-func (p *parser) parseTypeName() ast.Expr {
- if p.trace {
- defer un(trace(p, "TypeName"))
+ return &ast.SelectorExpr{ident, sel}
}
- return p.parseQualifiedIdent()
+ return ident
}
@@ -370,7 +536,7 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
len = &ast.Ellipsis{p.pos, nil}
p.next()
} else if p.tok != token.RBRACK {
- len = p.parseExpr()
+ len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
@@ -394,7 +560,7 @@ func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
}
-func (p *parser) parseFieldDecl() *ast.Field {
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
@@ -419,6 +585,7 @@ func (p *parser) parseFieldDecl() *ast.Field {
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
+ p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
@@ -426,9 +593,12 @@ func (p *parser) parseFieldDecl() *ast.Field {
}
}
- p.expectSemi()
+ p.expectSemi() // call before accessing p.linecomment
+
+ field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ p.declare(field, scope, ast.Var, idents...)
- return &ast.Field{doc, idents, typ, tag, p.lineComment}
+ return field
}
@@ -439,15 +609,17 @@ func (p *parser) parseStructType() *ast.StructType {
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
- list = append(list, p.parseFieldDecl())
+ list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
+ // TODO(gri): store struct scope in AST
return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
@@ -468,7 +640,7 @@ func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
- typ := p.tryType() // don't use parseType so we can provide better error message
+ typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{pos, p.pos}
@@ -478,7 +650,7 @@ func (p *parser) tryVarType(isParam bool) ast.Expr {
}
return &ast.Ellipsis{pos, typ}
}
- return p.tryType()
+ return p.tryIdentOrType(false)
}
@@ -514,12 +686,15 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
// if we had a list of identifiers, it must be followed by a type
typ = p.tryVarType(isParam)
+ if typ != nil {
+ p.resolve(typ)
+ }
return
}
-func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
@@ -528,7 +703,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
if typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
- params = append(params, &ast.Field{nil, idents, typ, nil, nil})
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
if p.tok == token.COMMA {
p.next()
}
@@ -536,7 +715,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
- params = append(params, &ast.Field{nil, idents, typ, nil, nil})
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
if p.tok != token.COMMA {
break
}
@@ -547,6 +730,7 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
+ p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
@@ -555,7 +739,7 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) {
}
-func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList {
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
@@ -563,7 +747,7 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList {
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
- params = p.parseParameterList(ellipsisOk)
+ params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
@@ -571,13 +755,13 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList {
}
-func (p *parser) parseResult() *ast.FieldList {
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
- return p.parseParameters(false)
+ return p.parseParameters(scope, false)
}
typ := p.tryType()
@@ -591,31 +775,32 @@ func (p *parser) parseResult() *ast.FieldList {
}
-func (p *parser) parseSignature() (params, results *ast.FieldList) {
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
- params = p.parseParameters(true)
- results = p.parseResult()
+ params = p.parseParameters(scope, true)
+ results = p.parseResult(scope)
return
}
-func (p *parser) parseFuncType() *ast.FuncType {
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
- params, results := p.parseSignature()
+ scope := ast.NewScope(p.topScope) // function scope
+ params, results := p.parseSignature(scope)
- return &ast.FuncType{pos, params, results}
+ return &ast.FuncType{pos, params, results}, scope
}
-func (p *parser) parseMethodSpec() *ast.Field {
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
@@ -623,19 +808,23 @@ func (p *parser) parseMethodSpec() *ast.Field {
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
- x := p.parseQualifiedIdent()
+ x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
- params, results := p.parseSignature()
+ scope := ast.NewScope(nil) // method scope
+ params, results := p.parseSignature(scope)
typ = &ast.FuncType{token.NoPos, params, results}
} else {
// embedded interface
typ = x
}
- p.expectSemi()
+ p.expectSemi() // call before accessing p.linecomment
- return &ast.Field{doc, idents, typ, nil, p.lineComment}
+ spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ p.declare(spec, scope, ast.Fun, idents...)
+
+ return spec
}
@@ -646,12 +835,14 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType {
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
- list = append(list, p.parseMethodSpec())
+ list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
+ // TODO(gri): store interface scope in AST
return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
@@ -695,7 +886,8 @@ func (p *parser) parseChanType() *ast.ChanType {
}
-func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
@@ -706,7 +898,8 @@ func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
case token.MUL:
return p.parsePointerType()
case token.FUNC:
- return p.parseFuncType()
+ typ, _ := p.parseFuncType()
+ return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
@@ -726,7 +919,13 @@ func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
}
-func (p *parser) tryType() ast.Expr { return p.tryRawType(false) }
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType(false)
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
// ----------------------------------------------------------------------------
@@ -745,13 +944,17 @@ func (p *parser) parseStmtList() (list []ast.Stmt) {
}
-func (p *parser) parseBody() *ast.BlockStmt {
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
+ p.topScope = scope // open function scope
+ p.openLabelScope()
list := p.parseStmtList()
+ p.closeLabelScope()
+ p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace}
@@ -764,7 +967,9 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt {
}
lbrace := p.expect(token.LBRACE)
+ p.openScope()
list := p.parseStmtList()
+ p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{lbrace, list, rbrace}
@@ -779,14 +984,14 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr {
defer un(trace(p, "FuncTypeOrLit"))
}
- typ := p.parseFuncType()
+ typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
- body := p.parseBody()
+ body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{typ, body}
@@ -795,15 +1000,20 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr {
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
//
-func (p *parser) parseOperand() ast.Expr {
+func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
- return p.parseIdent()
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{p.pos, p.tok, p.lit}
@@ -814,7 +1024,7 @@ func (p *parser) parseOperand() ast.Expr {
lparen := p.pos
p.next()
p.exprLev++
- x := p.parseExpr()
+ x := p.parseRhs()
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, x, rparen}
@@ -823,9 +1033,11 @@ func (p *parser) parseOperand() ast.Expr {
return p.parseFuncTypeOrLit()
default:
- t := p.tryRawType(true) // could be type for composite literal or conversion
- if t != nil {
- return t
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
}
}
@@ -836,19 +1048,22 @@ func (p *parser) parseOperand() ast.Expr {
}
-func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
- defer un(trace(p, "SelectorOrTypeAssertion"))
+ defer un(trace(p, "Selector"))
}
- p.expect(token.PERIOD)
- if p.tok == token.IDENT {
- // selector
- sel := p.parseIdent()
- return &ast.SelectorExpr{x, sel}
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{x, sel}
+}
+
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
}
- // type assertion
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
@@ -873,13 +1088,13 @@ func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
- low = p.parseExpr()
+ low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
- high = p.parseExpr()
+ high = p.parseRhs()
}
}
p.exprLev--
@@ -902,7 +1117,7 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
- list = append(list, p.parseExpr())
+ list = append(list, p.parseRhs())
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
@@ -928,12 +1143,16 @@ func (p *parser) parseElement(keyOk bool) ast.Expr {
return p.parseLiteralValue(nil)
}
- x := p.parseExpr()
- if keyOk && p.tok == token.COLON {
- colon := p.pos
- p.next()
- x = &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ x := p.parseExpr(keyOk) // don't resolve if map key
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ }
+ p.resolve(x) // not a map key
}
+
return x
}
@@ -1085,23 +1304,47 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
}
-func (p *parser) parsePrimaryExpr() ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
- x := p.parseOperand()
+ x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
- x = p.parseSelectorOrTypeAssertion(p.checkExpr(x))
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.next() // make progress
+ p.errorExpected(pos, "selector or type assertion")
+ x = &ast.BadExpr{pos, p.pos}
+ }
case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseLiteralValue(x)
} else {
break L
@@ -1109,13 +1352,15 @@ L:
default:
break L
}
+ lhs = false // no need to try to resolve again
}
return x
}
-func (p *parser) parseUnaryExpr() ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
@@ -1124,7 +1369,7 @@ func (p *parser) parseUnaryExpr() ast.Expr {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
pos, op := p.pos, p.tok
p.next()
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
case token.ARROW:
@@ -1137,32 +1382,37 @@ func (p *parser) parseUnaryExpr() ast.Expr {
return &ast.ChanType{pos, ast.RECV, value}
}
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.StarExpr{pos, p.checkExprOrType(x)}
}
- return p.parsePrimaryExpr()
+ return p.parsePrimaryExpr(lhs)
}
-func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
- y := p.parseBinaryExpr(prec + 1)
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
}
}
@@ -1171,14 +1421,20 @@ func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
}
+// If lhs is set and the result is an identifier, it is not resolved.
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
-func (p *parser) parseExpr() ast.Expr {
+func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
- return p.parseBinaryExpr(token.LowestPrec + 1)
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+
+func (p *parser) parseRhs() ast.Expr {
+ return p.parseExpr(false)
}
@@ -1190,7 +1446,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
defer un(trace(p, "SimpleStmt"))
}
- x := p.parseExprList()
+ x := p.parseLhsList()
switch p.tok {
case
@@ -1201,7 +1457,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
// assignment statement
pos, tok := p.pos, p.tok
p.next()
- y := p.parseExprList()
+ y := p.parseRhsList()
return &ast.AssignStmt{x, pos, tok, y}
}
@@ -1216,7 +1472,12 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
- return &ast.LabeledStmt{label, colon, p.parseStmt()}
+ // Go spec: The scope of a label is the body of the function
+ // in which it is declared and excludes the body of any nested
+ // function.
+ stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+ p.declare(stmt, p.labelScope, ast.Lbl, label)
+ return stmt
}
p.error(x[0].Pos(), "illegal label declaration")
return &ast.BadStmt{x[0].Pos(), colon + 1}
@@ -1225,7 +1486,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
// send statement
arrow := p.pos
p.next() // consume "<-"
- y := p.parseExpr()
+ y := p.parseRhs()
return &ast.SendStmt{x[0], arrow, y}
case token.INC, token.DEC:
@@ -1241,7 +1502,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
func (p *parser) parseCallExpr() *ast.CallExpr {
- x := p.parseExpr()
+ x := p.parseRhs()
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
@@ -1291,7 +1552,7 @@ func (p *parser) parseReturnStmt() *ast.ReturnStmt {
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
- x = p.parseExprList()
+ x = p.parseRhsList()
}
p.expectSemi()
@@ -1304,14 +1565,17 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
defer un(trace(p, "BranchStmt"))
}
- s := &ast.BranchStmt{p.pos, tok, nil}
- p.expect(tok)
+ pos := p.expect(tok)
+ var label *ast.Ident
if tok != token.FALLTHROUGH && p.tok == token.IDENT {
- s.Label = p.parseIdent()
+ label = p.parseIdent()
+ // add to list of unresolved targets
+ n := len(p.targetStack) - 1
+ p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
- return s
+ return &ast.BranchStmt{pos, tok, label}
}
@@ -1333,6 +1597,8 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
}
pos := p.expect(token.IF)
+ p.openScope()
+ defer p.closeScope()
var s ast.Stmt
var x ast.Expr
@@ -1341,12 +1607,12 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
- x = p.parseExpr()
+ x = p.parseRhs()
} else {
s = p.parseSimpleStmt(false)
if p.tok == token.SEMICOLON {
p.next()
- x = p.parseExpr()
+ x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
@@ -1368,28 +1634,6 @@ func (p *parser) parseIfStmt() *ast.IfStmt {
}
-func (p *parser) parseCaseClause() *ast.CaseClause {
- if p.trace {
- defer un(trace(p, "CaseClause"))
- }
-
- // SwitchCase
- pos := p.pos
- var x []ast.Expr
- if p.tok == token.CASE {
- p.next()
- x = p.parseExprList()
- } else {
- p.expect(token.DEFAULT)
- }
-
- colon := p.expect(token.COLON)
- body := p.parseStmtList()
-
- return &ast.CaseClause{pos, x, colon, body}
-}
-
-
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
@@ -1405,25 +1649,30 @@ func (p *parser) parseTypeList() (list []ast.Expr) {
}
-func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause {
+func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
if p.trace {
- defer un(trace(p, "TypeCaseClause"))
+ defer un(trace(p, "CaseClause"))
}
- // TypeSwitchCase
pos := p.pos
- var types []ast.Expr
+ var list []ast.Expr
if p.tok == token.CASE {
p.next()
- types = p.parseTypeList()
+ if exprSwitch {
+ list = p.parseRhsList()
+ } else {
+ list = p.parseTypeList()
+ }
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
+ p.openScope()
body := p.parseStmtList()
+ p.closeScope()
- return &ast.TypeCaseClause{pos, types, colon, body}
+ return &ast.CaseClause{pos, list, colon, body}
}
@@ -1447,6 +1696,8 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
}
pos := p.expect(token.SWITCH)
+ p.openScope()
+ defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
@@ -1466,28 +1717,21 @@ func (p *parser) parseSwitchStmt() ast.Stmt {
p.exprLev = prevLev
}
- if isExprSwitch(s2) {
- lbrace := p.expect(token.LBRACE)
- var list []ast.Stmt
- for p.tok == token.CASE || p.tok == token.DEFAULT {
- list = append(list, p.parseCaseClause())
- }
- rbrace := p.expect(token.RBRACE)
- body := &ast.BlockStmt{lbrace, list, rbrace}
- p.expectSemi()
- return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
- }
-
- // type switch
- // TODO(gri): do all the checks!
+ exprSwitch := isExprSwitch(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
- list = append(list, p.parseTypeCaseClause())
+ list = append(list, p.parseCaseClause(exprSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ if exprSwitch {
+ return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+ }
+ // type switch
+ // TODO(gri): do all the checks!
return &ast.TypeSwitchStmt{pos, s1, s2, body}
}
@@ -1497,12 +1741,12 @@ func (p *parser) parseCommClause() *ast.CommClause {
defer un(trace(p, "CommClause"))
}
- // CommCase
+ p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
- lhs := p.parseExprList()
+ lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
@@ -1511,14 +1755,14 @@ func (p *parser) parseCommClause() *ast.CommClause {
}
arrow := p.pos
p.next()
- rhs := p.parseExpr()
+ rhs := p.parseRhs()
comm = &ast.SendStmt{lhs[0], arrow, rhs}
} else {
// RecvStmt
pos := p.pos
tok := p.tok
var rhs ast.Expr
- if p.tok == token.ASSIGN || p.tok == token.DEFINE {
+ if tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
@@ -1526,7 +1770,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
lhs = lhs[0:2]
}
p.next()
- rhs = p.parseExpr()
+ rhs = p.parseRhs()
} else {
// rhs must be single receive operation
if len(lhs) > 1 {
@@ -1552,6 +1796,7 @@ func (p *parser) parseCommClause() *ast.CommClause {
colon := p.expect(token.COLON)
body := p.parseStmtList()
+ p.closeScope()
return &ast.CommClause{pos, comm, colon, body}
}
@@ -1582,6 +1827,8 @@ func (p *parser) parseForStmt() ast.Stmt {
}
pos := p.expect(token.FOR)
+ p.openScope()
+ defer p.closeScope()
var s1, s2, s3 ast.Stmt
if p.tok != token.LBRACE {
@@ -1631,18 +1878,16 @@ func (p *parser) parseForStmt() ast.Stmt {
return &ast.BadStmt{pos, body.End()}
}
if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
- // rhs is range expression; check lhs
+ // rhs is range expression
+ // (any short variable declaration was handled by parseSimpleStat above)
return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
- } else {
- p.errorExpected(s2.Pos(), "range clause")
- return &ast.BadStmt{pos, body.End()}
}
- } else {
- // regular for statement
- return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+ p.errorExpected(s2.Pos(), "range clause")
+ return &ast.BadStmt{pos, body.End()}
}
- panic("unreachable")
+ // regular for statement
+ return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
}
@@ -1706,19 +1951,20 @@ func (p *parser) parseStmt() (s ast.Stmt) {
// ----------------------------------------------------------------------------
// Declarations
-type parseSpecFunction func(p *parser, doc *ast.CommentGroup) ast.Spec
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
-func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
+func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
- if p.tok == token.PERIOD {
+ switch p.tok {
+ case token.PERIOD:
ident = &ast.Ident{p.pos, ".", nil}
p.next()
- } else if p.tok == token.IDENT {
+ case token.IDENT:
ident = p.parseIdent()
}
@@ -1729,13 +1975,17 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
} else {
p.expect(token.STRING) // use expect() error handling
}
- p.expectSemi()
+ p.expectSemi() // call before accessing p.linecomment
- return &ast.ImportSpec{doc, ident, path, p.lineComment}
+ // collect imports
+ spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ p.imports = append(p.imports, spec)
+
+ return spec
}
-func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
+func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
}
@@ -1743,30 +1993,46 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
- if typ != nil || p.tok == token.ASSIGN {
+ if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
- values = p.parseExprList()
+ values = p.parseRhsList()
}
- p.expectSemi()
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Con, idents...)
- return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ return spec
}
-func parseTypeSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
+func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
- typ := p.parseType()
- p.expectSemi()
- return &ast.TypeSpec{doc, ident, typ, p.lineComment}
+ // Go spec: The scope of a type identifier declared inside a function begins
+ // at the identifier in the TypeSpec and ends at the end of the innermost
+ // containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.TypeSpec{doc, ident, nil, nil}
+ p.declare(spec, p.topScope, ast.Typ, ident)
+
+ spec.Type = p.parseType()
+ p.expectSemi() // call before accessing p.linecomment
+ spec.Comment = p.lineComment
+
+ return spec
}
-func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
+func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
}
@@ -1776,11 +2042,18 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec {
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
- values = p.parseExprList()
+ values = p.parseRhsList()
}
- p.expectSemi()
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Var, idents...)
- return &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ return spec
}
@@ -1796,26 +2069,26 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
- for p.tok != token.RPAREN && p.tok != token.EOF {
- list = append(list, f(p, p.leadComment))
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, f(p, p.leadComment, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
- list = append(list, f(p, nil))
+ list = append(list, f(p, nil, 0))
}
return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
}
-func (p *parser) parseReceiver() *ast.FieldList {
+func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
}
pos := p.pos
- par := p.parseParameters(false)
+ par := p.parseParameters(scope, false)
// must have exactly one receiver
if par.NumFields() != 1 {
@@ -1844,22 +2117,37 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl {
doc := p.leadComment
pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
- recv = p.parseReceiver()
+ recv = p.parseReceiver(scope)
}
ident := p.parseIdent()
- params, results := p.parseSignature()
+
+ params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
- body = p.parseBody()
+ body = p.parseBody(scope)
}
p.expectSemi()
- return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ if recv == nil {
+ // Go spec: The scope of an identifier denoting a constant, type,
+ // variable, or function (but not method) declared at top level
+ // (outside any function) is the package block.
+ //
+ // init() functions cannot be referred to and there may
+ // be more than one - don't put them in the pkgScope
+ if ident.Name != "init" {
+ p.declare(decl, p.pkgScope, ast.Fun, ident)
+ }
+ }
+
+ return decl
}
@@ -1918,7 +2206,12 @@ func (p *parser) parseFile() *ast.File {
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
+ // Go spec: The package clause is not a declaration;
+ // the package name does not appear in any scope.
ident := p.parseIdent()
+ if ident.Name == "_" {
+ p.error(p.pos, "invalid package name _")
+ }
p.expectSemi()
var decls []ast.Decl
@@ -1940,5 +2233,20 @@ func (p *parser) parseFile() *ast.File {
}
}
- return &ast.File{doc, pos, ident, decls, p.comments}
+ assert(p.topScope == p.pkgScope, "imbalanced scopes")
+
+ // resolve global identifiers within the same file
+ i := 0
+ for _, ident := range p.unresolved {
+ // i <= index for current ident
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+ if ident.Obj == nil {
+ p.unresolved[i] = ident
+ i++
+ }
+ }
+
+ // TODO(gri): store p.imports in AST
+ return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
}
diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go
index 38535627a..2f1ee6bfc 100644
--- a/src/pkg/go/parser/parser_test.go
+++ b/src/pkg/go/parser/parser_test.go
@@ -21,6 +21,7 @@ var illegalInputs = []interface{}{
`package p; func f() { if /* should have condition */ {} };`,
`package p; func f() { if ; /* should have condition */ {} };`,
`package p; func f() { if f(); /* should have condition */ {} };`,
+ `package p; const c; /* should have constant value */`,
}
@@ -73,7 +74,7 @@ var validFiles = []string{
func TestParse3(t *testing.T) {
for _, filename := range validFiles {
- _, err := ParseFile(fset, filename, nil, 0)
+ _, err := ParseFile(fset, filename, nil, DeclarationErrors)
if err != nil {
t.Errorf("ParseFile(%s): %v", filename, err)
}
diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go
index 7933c2f18..86c327930 100644
--- a/src/pkg/go/printer/nodes.go
+++ b/src/pkg/go/printer/nodes.go
@@ -108,17 +108,6 @@ func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) {
}
-// Compute the key size of a key:value expression.
-// Returns 0 if the expression doesn't fit onto a single line.
-func (p *printer) keySize(pair *ast.KeyValueExpr) int {
- if p.nodeSize(pair, infinity) <= infinity {
- // entire expression fits on one line - return key size
- return p.nodeSize(pair.Key, infinity)
- }
- return 0
-}
-
-
// Print a list of expressions. If the list spans multiple
// source lines, the original line breaks are respected between
// expressions. Sets multiLine to true if the list spans multiple
@@ -171,19 +160,7 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// the first linebreak is always a formfeed since this section must not
// depend on any previous formatting
prevBreak := -1 // index of last expression that was followed by a linebreak
- linebreakMin := 1
- if mode&periodSep != 0 {
- // Make fragments like
- //
- // a.Bar(1,
- // 2).Foo
- //
- // format correctly (a linebreak shouldn't be added before Foo) when
- // doing period-separated expr lists by setting minimum linebreak to 0
- // lines for them.
- linebreakMin = 0
- }
- if prev.IsValid() && prev.Line < line && p.linebreak(line, linebreakMin, ws, true) {
+ if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) {
ws = ignore
*multiLine = true
prevBreak = 0
@@ -204,17 +181,21 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// the key and the node size into the decision process
useFF := true
- // determine size
+ // determine element size: all bets are off if we don't have
+ // position information for the previous and next token (likely
+ // generated code - simply ignore the size in this case by setting
+ // it to 0)
prevSize := size
const infinity = 1e6 // larger than any source line
size = p.nodeSize(x, infinity)
pair, isPair := x.(*ast.KeyValueExpr)
- if size <= infinity {
+ if size <= infinity && prev.IsValid() && next.IsValid() {
// x fits on a single line
if isPair {
size = p.nodeSize(pair.Key, infinity) // size <= infinity
}
} else {
+ // size too large or we don't have good layout information
size = 0
}
@@ -244,8 +225,7 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp
// lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on
// the same line in which case formfeed is used
- // broken with a formfeed
- if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) {
+ if p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) {
ws = ignore
*multiLine = true
prevBreak = i
@@ -371,11 +351,11 @@ func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
func (p *printer) setLineComment(text string) {
- p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, []byte(text)}}})
+ p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, text}}})
}
-func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprContext) {
+func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
p.nesting++
defer func() {
p.nesting--
@@ -384,15 +364,15 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
lbrace := fields.Opening
list := fields.List
rbrace := fields.Closing
+ srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.fset.Position(lbrace).Line == p.fset.Position(rbrace).Line
- if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) {
+ if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) && srcIsOneLine {
// possibly a one-line struct/interface
if len(list) == 0 {
// no blank between keyword and {} in this case
p.print(lbrace, token.LBRACE, rbrace, token.RBRACE)
return
- } else if ctxt&(compositeLit|structType) == compositeLit|structType &&
- p.isOneLineFieldList(list) { // for now ignore interfaces
+ } else if isStruct && p.isOneLineFieldList(list) { // for now ignore interfaces
// small enough - print on one line
// (don't use identList and ignore source line breaks)
p.print(lbrace, token.LBRACE, blank)
@@ -414,7 +394,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
// at least one entry or incomplete
p.print(blank, lbrace, token.LBRACE, indent, formfeed)
- if ctxt&structType != 0 {
+ if isStruct {
sep := vtab
if len(list) == 1 {
@@ -497,15 +477,6 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC
// ----------------------------------------------------------------------------
// Expressions
-// exprContext describes the syntactic environment in which an expression node is printed.
-type exprContext uint
-
-const (
- compositeLit exprContext = 1 << iota
- structType
-)
-
-
func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
switch e.Op.Precedence() {
case 4:
@@ -544,7 +515,7 @@ func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
}
case *ast.StarExpr:
- if e.Op.String() == "/" {
+ if e.Op == token.QUO { // `*/`
maxProblem = 5
}
@@ -650,7 +621,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
printBlank := prec < cutoff
ws := indent
- p.expr1(x.X, prec, depth+diffPrec(x.X, prec), 0, multiLine)
+ p.expr1(x.X, prec, depth+diffPrec(x.X, prec), multiLine)
if printBlank {
p.print(blank)
}
@@ -669,7 +640,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL
if printBlank {
p.print(blank)
}
- p.expr1(x.Y, prec+1, depth+1, 0, multiLine)
+ p.expr1(x.Y, prec+1, depth+1, multiLine)
if ws == ignore {
p.print(unindent)
}
@@ -742,7 +713,7 @@ func selectorExprList(expr ast.Expr) (list []ast.Expr) {
// Sets multiLine to true if the expression spans multiple lines.
-func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multiLine *bool) {
+func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(expr.Pos())
switch x := expr.(type) {
@@ -792,7 +763,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
// TODO(gri) Remove this code if it cannot be reached.
p.print(blank)
}
- p.expr1(x.X, prec, depth, 0, multiLine)
+ p.expr1(x.X, prec, depth, multiLine)
}
case *ast.BasicLit:
@@ -818,7 +789,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos)
case *ast.TypeAssertExpr:
- p.expr1(x.X, token.HighestPrec, depth, 0, multiLine)
+ p.expr1(x.X, token.HighestPrec, depth, multiLine)
p.print(token.PERIOD, token.LPAREN)
if x.Type != nil {
p.expr(x.Type, multiLine)
@@ -829,14 +800,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.IndexExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
- p.expr1(x.X, token.HighestPrec, 1, 0, multiLine)
+ p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.print(x.Lbrack, token.LBRACK)
p.expr0(x.Index, depth+1, multiLine)
p.print(x.Rbrack, token.RBRACK)
case *ast.SliceExpr:
// TODO(gri): should treat[] like parentheses and undo one level of depth
- p.expr1(x.X, token.HighestPrec, 1, 0, multiLine)
+ p.expr1(x.X, token.HighestPrec, 1, multiLine)
p.print(x.Lbrack, token.LBRACK)
if x.Low != nil {
p.expr0(x.Low, depth+1, multiLine)
@@ -856,7 +827,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
if len(x.Args) > 1 {
depth++
}
- p.expr1(x.Fun, token.HighestPrec, depth, 0, multiLine)
+ p.expr1(x.Fun, token.HighestPrec, depth, multiLine)
p.print(x.Lparen, token.LPAREN)
p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen)
if x.Ellipsis.IsValid() {
@@ -867,7 +838,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.CompositeLit:
// composite literal elements that are composite literals themselves may have the type omitted
if x.Type != nil {
- p.expr1(x.Type, token.HighestPrec, depth, compositeLit, multiLine)
+ p.expr1(x.Type, token.HighestPrec, depth, multiLine)
}
p.print(x.Lbrace, token.LBRACE)
p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace)
@@ -892,7 +863,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.StructType:
p.print(token.STRUCT)
- p.fieldList(x.Fields, x.Incomplete, ctxt|structType)
+ p.fieldList(x.Fields, true, x.Incomplete)
case *ast.FuncType:
p.print(token.FUNC)
@@ -900,7 +871,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
case *ast.InterfaceType:
p.print(token.INTERFACE)
- p.fieldList(x.Methods, x.Incomplete, ctxt)
+ p.fieldList(x.Methods, false, x.Incomplete)
case *ast.MapType:
p.print(token.MAP, token.LBRACK)
@@ -929,14 +900,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi
func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) {
- p.expr1(x, token.LowestPrec, depth, 0, multiLine)
+ p.expr1(x, token.LowestPrec, depth, multiLine)
}
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr(x ast.Expr, multiLine *bool) {
const depth = 1
- p.expr1(x, token.LowestPrec, depth, 0, multiLine)
+ p.expr1(x, token.LowestPrec, depth, multiLine)
}
@@ -1145,9 +1116,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
}
case *ast.CaseClause:
- if s.Values != nil {
+ if s.List != nil {
p.print(token.CASE)
- p.exprList(s.Pos(), s.Values, 1, blankStart|commaSep, multiLine, s.Colon)
+ p.exprList(s.Pos(), s.List, 1, blankStart|commaSep, multiLine, s.Colon)
} else {
p.print(token.DEFAULT)
}
@@ -1160,16 +1131,6 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.block(s.Body, 0)
*multiLine = true
- case *ast.TypeCaseClause:
- if s.Types != nil {
- p.print(token.CASE)
- p.exprList(s.Pos(), s.Types, 1, blankStart|commaSep, multiLine, s.Colon)
- } else {
- p.print(token.DEFAULT)
- }
- p.print(s.Colon, token.COLON)
- p.stmtList(s.Body, 1, nextIsRBrace)
-
case *ast.TypeSwitchStmt:
p.print(token.SWITCH)
if s.Init != nil {
@@ -1239,7 +1200,7 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) {
p.setComment(s.Doc)
if s.Name != nil {
p.expr(s.Name, multiLine)
- p.print(vtab)
+ p.print(blank)
}
p.expr(s.Path, multiLine)
p.setComment(s.Comment)
@@ -1331,13 +1292,23 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
// any control chars. Otherwise, the result is > maxSize.
//
func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
+ // nodeSize invokes the printer, which may invoke nodeSize
+ // recursively. For deep composite literal nests, this can
+ // lead to an exponential algorithm. Remember previous
+ // results to prune the recursion (was issue 1628).
+ if size, found := p.nodeSizes[n]; found {
+ return size
+ }
+
size = maxSize + 1 // assume n doesn't fit
+ p.nodeSizes[n] = size
+
// nodeSize computation must be indendent of particular
// style so that we always get the same decision; print
// in RawFormat
cfg := Config{Mode: RawFormat}
var buf bytes.Buffer
- if _, err := cfg.Fprint(&buf, p.fset, n); err != nil {
+ if _, err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
return
}
if buf.Len() <= maxSize {
@@ -1347,6 +1318,7 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
}
}
size = buf.Len() // n fits
+ p.nodeSizes[n] = size
}
return
}
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index 90d9784ac..697a83fa8 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -34,12 +34,6 @@ const (
)
-const (
- esc2 = '\xfe' // an escape byte that cannot occur in regular UTF-8
- _ = 1 / (esc2 - tabwriter.Escape) // cause compiler error if esc2 == tabwriter.Escape
-)
-
-
var (
esc = []byte{tabwriter.Escape}
htab = []byte{'\t'}
@@ -81,8 +75,9 @@ type printer struct {
mode pmode // current printer mode
lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
- // Buffered whitespace
- buffer []whiteSpace
+ // Reused buffers
+ wsbuf []whiteSpace // delayed white space
+ litbuf bytes.Buffer // for creation of escaped literals and comments
// The (possibly estimated) position in the generated output;
// in AST space (i.e., pos is set whenever a token position is
@@ -94,22 +89,23 @@ type printer struct {
// written using writeItem.
last token.Position
- // HTML support
- lastTaggedLine int // last line for which a line tag was written
-
// The list of all source comments, in order of appearance.
comments []*ast.CommentGroup // may be nil
cindex int // current comment index
useNodeComments bool // if not set, ignore lead and line comments of nodes
+
+ // Cache of already computed node sizes.
+ nodeSizes map[ast.Node]int
}
-func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet) {
+func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
p.output = output
p.Config = *cfg
p.fset = fset
p.errors = make(chan os.Error)
- p.buffer = make([]whiteSpace, 0, 16) // whitespace sequences are short
+ p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
+ p.nodeSizes = nodeSizes
}
@@ -122,6 +118,20 @@ func (p *printer) internalError(msg ...interface{}) {
}
+// escape escapes string s by bracketing it with tabwriter.Escape.
+// Escaped strings pass through tabwriter unchanged. (Note that
+// valid Go programs cannot contain tabwriter.Escape bytes since
+// they do not appear in legal UTF-8 sequences).
+//
+func (p *printer) escape(s string) string {
+ p.litbuf.Reset()
+ p.litbuf.WriteByte(tabwriter.Escape)
+ p.litbuf.WriteString(s)
+ p.litbuf.WriteByte(tabwriter.Escape)
+ return p.litbuf.String()
+}
+
+
// nlines returns the adjusted number of linebreaks given the desired number
// of breaks n such that min <= result <= max where max depends on the current
// nesting level.
@@ -229,7 +239,7 @@ func (p *printer) writeNewlines(n int, useFF bool) {
// source text. writeItem updates p.last to the position immediately following
// the data.
//
-func (p *printer) writeItem(pos token.Position, data []byte) {
+func (p *printer) writeItem(pos token.Position, data string) {
if pos.IsValid() {
// continue with previous position if we don't have a valid pos
if p.last.IsValid() && p.last.Filename != pos.Filename {
@@ -238,7 +248,7 @@ func (p *printer) writeItem(pos token.Position, data []byte) {
// e.g., the result of ast.MergePackageFiles)
p.indent = 0
p.mode = 0
- p.buffer = p.buffer[0:0]
+ p.wsbuf = p.wsbuf[0:0]
}
p.pos = pos
}
@@ -247,7 +257,7 @@ func (p *printer) writeItem(pos token.Position, data []byte) {
_, filename := filepath.Split(pos.Filename)
p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column)))
}
- p.write(data)
+ p.write([]byte(data))
p.last = p.pos
}
@@ -279,11 +289,11 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment
if prev == nil {
// first comment of a comment group
j := 0
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank:
// ignore any blanks before a comment
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
continue
case vtab:
// respect existing tabs - important
@@ -317,11 +327,11 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment
if prev == nil {
// first comment of a comment group
j := 0
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore any horizontal whitespace before line breaks
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
continue
case indent:
// apply pending indentation
@@ -338,7 +348,7 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment
}
case newline, formfeed:
// TODO(gri): may want to keep formfeed info in some cases
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
}
j = i
break
@@ -359,12 +369,8 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment
}
-func (p *printer) writeCommentLine(comment *ast.Comment, pos token.Position, line []byte) {
- // line must pass through unchanged, bracket it with tabwriter.Escape
- line = bytes.Join([][]byte{esc, line, esc}, nil)
- p.writeItem(pos, line)
-}
-
+// TODO(gri): It should be possible to convert the code below from using
+// []byte to string and in the process eliminate some conversions.
// Split comment text into lines
func split(text []byte) [][]byte {
@@ -545,13 +551,13 @@ func (p *printer) writeComment(comment *ast.Comment) {
// shortcut common case of //-style comments
if text[1] == '/' {
- p.writeCommentLine(comment, p.fset.Position(comment.Pos()), text)
+ p.writeItem(p.fset.Position(comment.Pos()), p.escape(text))
return
}
// for /*-style comments, print line by line and let the
// write function take care of the proper indentation
- lines := split(text)
+ lines := split([]byte(text))
stripCommonPrefix(lines)
// write comment lines, separated by formfeed,
@@ -564,7 +570,7 @@ func (p *printer) writeComment(comment *ast.Comment) {
pos = p.pos
}
if len(line) > 0 {
- p.writeCommentLine(comment, pos, line)
+ p.writeItem(pos, p.escape(string(line)))
}
}
}
@@ -577,11 +583,11 @@ func (p *printer) writeComment(comment *ast.Comment) {
// formfeed was dropped from the whitespace buffer.
//
func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore trailing whitespace
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
case indent, unindent:
// don't loose indentation information
case newline, formfeed:
@@ -593,11 +599,11 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
if ch == formfeed {
droppedFF = true
}
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
}
}
}
- p.writeWhitespace(len(p.buffer))
+ p.writeWhitespace(len(p.wsbuf))
// make sure we have a line break
if needsLinebreak {
@@ -651,7 +657,7 @@ func (p *printer) writeWhitespace(n int) {
// write entries
var data [1]byte
for i := 0; i < n; i++ {
- switch ch := p.buffer[i]; ch {
+ switch ch := p.wsbuf[i]; ch {
case ignore:
// ignore!
case indent:
@@ -669,13 +675,13 @@ func (p *printer) writeWhitespace(n int) {
// the line break and the label, the unindent is not
// part of the comment whitespace prefix and the comment
// will be positioned correctly indented.
- if i+1 < n && p.buffer[i+1] == unindent {
+ if i+1 < n && p.wsbuf[i+1] == unindent {
// Use a formfeed to terminate the current section.
// Otherwise, a long label name on the next line leading
// to a wide column may increase the indentation column
// of lines before the label; effectively leading to wrong
// indentation.
- p.buffer[i], p.buffer[i+1] = unindent, formfeed
+ p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
i-- // do it again
continue
}
@@ -688,11 +694,11 @@ func (p *printer) writeWhitespace(n int) {
// shift remaining entries down
i := 0
- for ; n < len(p.buffer); n++ {
- p.buffer[i] = p.buffer[n]
+ for ; n < len(p.wsbuf); n++ {
+ p.wsbuf[i] = p.wsbuf[n]
i++
}
- p.buffer = p.buffer[0:i]
+ p.wsbuf = p.wsbuf[0:i]
}
@@ -733,7 +739,7 @@ func mayCombine(prev token.Token, next byte) (b bool) {
func (p *printer) print(args ...interface{}) {
for _, f := range args {
next := p.pos // estimated position of next item
- var data []byte
+ var data string
var tok token.Token
switch x := f.(type) {
@@ -747,42 +753,22 @@ func (p *printer) print(args ...interface{}) {
// LabeledStmt)
break
}
- i := len(p.buffer)
- if i == cap(p.buffer) {
+ i := len(p.wsbuf)
+ if i == cap(p.wsbuf) {
// Whitespace sequences are very short so this should
// never happen. Handle gracefully (but possibly with
// bad comment placement) if it does happen.
p.writeWhitespace(i)
i = 0
}
- p.buffer = p.buffer[0 : i+1]
- p.buffer[i] = x
+ p.wsbuf = p.wsbuf[0 : i+1]
+ p.wsbuf[i] = x
case *ast.Ident:
- data = []byte(x.Name)
+ data = x.Name
tok = token.IDENT
case *ast.BasicLit:
- // escape all literals so they pass through unchanged
- // (note that valid Go programs cannot contain
- // tabwriter.Escape bytes since they do not appear in
- // legal UTF-8 sequences)
- data = make([]byte, 0, len(x.Value)+2)
- data = append(data, tabwriter.Escape)
- data = append(data, x.Value...)
- data = append(data, tabwriter.Escape)
+ data = p.escape(x.Value)
tok = x.Kind
- // If we have a raw string that spans multiple lines and
- // the opening quote (`) is on a line preceded only by
- // indentation, we don't want to write that indentation
- // because the following lines of the raw string are not
- // indented. It's easiest to correct the output at the end
- // via the trimmer (because of the complex handling of
- // white space).
- // Mark multi-line raw strings by replacing the opening
- // quote with esc2 and have the trimmer take care of fixing
- // it up. (Do this _after_ making a copy of data!)
- if data[1] == '`' && bytes.IndexByte(data, '\n') > 0 {
- data[1] = esc2
- }
case token.Token:
s := x.String()
if mayCombine(p.lastTok, s[0]) {
@@ -792,13 +778,13 @@ func (p *printer) print(args ...interface{}) {
// (except for token.INT followed by a '.' this
// should never happen because it is taken care
// of via binary expression formatting)
- if len(p.buffer) != 0 {
+ if len(p.wsbuf) != 0 {
p.internalError("whitespace buffer not empty")
}
- p.buffer = p.buffer[0:1]
- p.buffer[0] = ' '
+ p.wsbuf = p.wsbuf[0:1]
+ p.wsbuf[0] = ' '
}
- data = []byte(s)
+ data = s
tok = x
case token.Pos:
if x.IsValid() {
@@ -812,7 +798,7 @@ func (p *printer) print(args ...interface{}) {
p.lastTok = tok
p.pos = next
- if data != nil {
+ if data != "" {
droppedFF := p.flush(next, tok)
// intersperse extra newlines if present in the source
@@ -847,7 +833,7 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) {
droppedFF = p.intersperseComments(next, tok)
} else {
// otherwise, write any leftover whitespace
- p.writeWhitespace(len(p.buffer))
+ p.writeWhitespace(len(p.wsbuf))
}
return
}
@@ -863,10 +849,9 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) {
// through unchanged.
//
type trimmer struct {
- output io.Writer
- state int
- space bytes.Buffer
- hasText bool
+ output io.Writer
+ state int
+ space bytes.Buffer
}
@@ -874,15 +859,11 @@ type trimmer struct {
// It can be in one of the following states:
const (
inSpace = iota // inside space
- atEscape // inside space and the last char was an opening tabwriter.Escape
inEscape // inside text bracketed by tabwriter.Escapes
inText // inside text
)
-var backquote = []byte{'`'}
-
-
// Design note: It is tempting to eliminate extra blanks occurring in
// whitespace in this function as it could simplify some
// of the blanks logic in the node printing functions.
@@ -891,9 +872,8 @@ var backquote = []byte{'`'}
func (p *trimmer) Write(data []byte) (n int, err os.Error) {
// invariants:
- // p.state == inSpace, atEscape:
+ // p.state == inSpace:
// p.space is unwritten
- // p.hasText indicates if there is any text on this line
// p.state == inEscape, inText:
// data[m:n] is unwritten
m := 0
@@ -910,32 +890,20 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
case '\n', '\f':
p.space.Reset() // discard trailing space
_, err = p.output.Write(newlines[0:1]) // write newline
- p.hasText = false
case tabwriter.Escape:
- p.state = atEscape
+ _, err = p.output.Write(p.space.Bytes())
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
default:
_, err = p.output.Write(p.space.Bytes())
p.state = inText
m = n
}
- case atEscape:
- // discard indentation if we have a multi-line raw string
- // (see printer.print for details)
- if b != esc2 || p.hasText {
- _, err = p.output.Write(p.space.Bytes())
- }
- p.state = inEscape
- m = n
- if b == esc2 {
- _, err = p.output.Write(backquote) // convert back
- m++
- }
case inEscape:
if b == tabwriter.Escape {
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
- p.hasText = true
}
case inText:
switch b {
@@ -944,19 +912,18 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
p.state = inSpace
p.space.Reset()
p.space.WriteByte(b) // WriteByte returns no errors
- p.hasText = true
case '\n', '\f':
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
_, err = p.output.Write(newlines[0:1]) // write newline
- p.hasText = false
case tabwriter.Escape:
_, err = p.output.Write(data[m:n])
- p.state = atEscape
- p.space.Reset()
- p.hasText = true
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
}
+ default:
+ panic("unreachable")
}
if err != nil {
return
@@ -969,7 +936,6 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) {
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
- p.hasText = true
}
return
@@ -994,13 +960,8 @@ type Config struct {
}
-// Fprint "pretty-prints" an AST node to output and returns the number
-// of bytes written and an error (if any) for a given configuration cfg.
-// Position information is interpreted relative to the file set fset.
-// The node type must be *ast.File, or assignment-compatible to ast.Expr,
-// ast.Decl, ast.Spec, or ast.Stmt.
-//
-func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
+// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) {
// redirect output through a trimmer to eliminate trailing whitespace
// (Input to a tabwriter must be untrimmed since trailing tabs provide
// formatting information. The tabwriter could provide trimming
@@ -1029,7 +990,7 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{
// setup printer and print node
var p printer
- p.init(output, cfg, fset)
+ p.init(output, cfg, fset, nodeSizes)
go func() {
switch n := node.(type) {
case ast.Expr:
@@ -1076,6 +1037,17 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{
}
+// Fprint "pretty-prints" an AST node to output and returns the number
+// of bytes written and an error (if any) for a given configuration cfg.
+// Position information is interpreted relative to the file set fset.
+// The node type must be *ast.File, or assignment-compatible to ast.Expr,
+// ast.Decl, ast.Spec, or ast.Stmt.
+//
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) {
+ return cfg.fprint(output, fset, node, make(map[ast.Node]int))
+}
+
+
// Fprint "pretty-prints" an AST node to output.
// It calls Config.Fprint with default settings.
//
diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go
index 62b726913..090f92af1 100644
--- a/src/pkg/go/printer/printer_test.go
+++ b/src/pkg/go/printer/printer_test.go
@@ -13,6 +13,7 @@ import (
"go/token"
"path/filepath"
"testing"
+ "time"
)
@@ -45,7 +46,7 @@ const (
)
-func check(t *testing.T, source, golden string, mode checkMode) {
+func runcheck(t *testing.T, source, golden string, mode checkMode) {
// parse source
prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments)
if err != nil {
@@ -109,6 +110,32 @@ func check(t *testing.T, source, golden string, mode checkMode) {
}
+func check(t *testing.T, source, golden string, mode checkMode) {
+ // start a timer to produce a time-out signal
+ tc := make(chan int)
+ go func() {
+ time.Sleep(10e9) // plenty of a safety margin, even for very slow machines
+ tc <- 0
+ }()
+
+ // run the test
+ cc := make(chan int)
+ go func() {
+ runcheck(t, source, golden, mode)
+ cc <- 0
+ }()
+
+ // wait for the first finisher
+ select {
+ case <-tc:
+ // test running past time out
+ t.Errorf("%s: running too slowly", source)
+ case <-cc:
+ // test finished within alloted time margin
+ }
+}
+
+
type entry struct {
source, golden string
mode checkMode
@@ -124,16 +151,20 @@ var data = []entry{
{"expressions.input", "expressions.raw", rawFormat},
{"declarations.input", "declarations.golden", 0},
{"statements.input", "statements.golden", 0},
+ {"slow.input", "slow.golden", 0},
}
func TestFiles(t *testing.T) {
- for _, e := range data {
+ for i, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden, e.mode)
// TODO(gri) check that golden is idempotent
- //check(t, golden, golden, e.mode);
+ //check(t, golden, golden, e.mode)
+ if testing.Short() && i >= 3 {
+ break
+ }
}
}
diff --git a/src/pkg/go/printer/testdata/declarations.golden b/src/pkg/go/printer/testdata/declarations.golden
index 1c091b929..c1b255842 100644
--- a/src/pkg/go/printer/testdata/declarations.golden
+++ b/src/pkg/go/printer/testdata/declarations.golden
@@ -7,10 +7,10 @@ package imports
import "io"
import (
- _ "io"
+ _ "io"
)
-import _ "io"
+import _ "io"
import (
"io"
@@ -20,40 +20,40 @@ import (
import (
"io"
- aLongRename "io"
+ aLongRename "io"
- b "io"
+ b "io"
)
import (
"unrenamed"
- renamed "renameMe"
- . "io"
- _ "io"
+ renamed "renameMe"
+ . "io"
+ _ "io"
"io"
- . "os"
+ . "os"
)
// no newlines between consecutive single imports, but
// respect extra line breaks in the source (at most one empty line)
-import _ "io"
-import _ "io"
-import _ "io"
+import _ "io"
+import _ "io"
+import _ "io"
-import _ "os"
-import _ "os"
-import _ "os"
+import _ "os"
+import _ "os"
+import _ "os"
-import _ "fmt"
-import _ "fmt"
-import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
import "foo" // a comment
import "bar" // a comment
import (
- _ "foo"
+ _ "foo"
// a comment
"bar"
"foo" // a comment
@@ -63,17 +63,17 @@ import (
// comments + renames
import (
"unrenamed" // a comment
- renamed "renameMe"
- . "io" /* a comment */
- _ "io/ioutil" // a comment
+ renamed "renameMe"
+ . "io" /* a comment */
+ _ "io/ioutil" // a comment
"io" // testing alignment
- . "os"
+ . "os"
// a comment
)
// a case that caused problems in the past (comment placement)
import (
- . "fmt"
+ . "fmt"
"io"
"malloc" // for the malloc count test only
"math"
@@ -81,9 +81,38 @@ import (
"testing"
)
+// more import examples
+import (
+ "xxx"
+ "much longer name" // comment
+ "short name" // comment
+)
+
+import (
+ _ "xxx"
+ "much longer name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
// at least one empty line between declarations of different kind
-import _ "io"
+import _ "io"
var _ int
diff --git a/src/pkg/go/printer/testdata/declarations.input b/src/pkg/go/printer/testdata/declarations.input
index c826462f9..c8b37e12b 100644
--- a/src/pkg/go/printer/testdata/declarations.input
+++ b/src/pkg/go/printer/testdata/declarations.input
@@ -81,6 +81,35 @@ import (
"testing"
)
+// more import examples
+import (
+ "xxx"
+ "much longer name" // comment
+ "short name" // comment
+)
+
+import (
+ _ "xxx"
+ "much longer name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
// at least one empty line between declarations of different kind
import _ "io"
diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden
index 7f18f338a..c1a7e970b 100644
--- a/src/pkg/go/printer/testdata/expressions.golden
+++ b/src/pkg/go/printer/testdata/expressions.golden
@@ -224,11 +224,7 @@ func _() {
_ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0}
- _ = struct {
- s struct {
- int
- }
- }{struct{ int }{0}} // compositeLit context not propagated => multiLine result
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
}
@@ -257,8 +253,8 @@ bar`
var _ = ``
var _ = `foo`
var _ =
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = // comment
@@ -266,8 +262,8 @@ bar`
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = /* comment */ ``
@@ -280,12 +276,12 @@ bar`
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var board = []int(
-`...........
+ `...........
...........
....●●●....
....●●●....
@@ -300,8 +296,8 @@ bar`
var state = S{
"foo",
- // the next line should not be indented
-`...........
+ // the next line should remain indented
+ `...........
...........
....●●●....
....●●●....
@@ -623,3 +619,13 @@ func _() {
b.(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC,0666); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/pkg/go/printer/testdata/expressions.input b/src/pkg/go/printer/testdata/expressions.input
index 6bcd9b5f8..b87381198 100644
--- a/src/pkg/go/printer/testdata/expressions.input
+++ b/src/pkg/go/printer/testdata/expressions.input
@@ -224,7 +224,7 @@ func _() {
_ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0}
- _ = struct{ s struct { int } }{struct{ int}{0}} // compositeLit context not propagated => multiLine result
+ _ = struct{ s struct { int } }{struct{ int}{0} }
}
@@ -256,7 +256,7 @@ var _ =
var _ =
`foo`
var _ =
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
@@ -266,7 +266,7 @@ bar`
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
@@ -282,7 +282,7 @@ bar`
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
@@ -304,7 +304,7 @@ var board = []int(
var state = S{
"foo",
- // the next line should not be indented
+ // the next line should remain indented
`...........
...........
....●●●....
@@ -625,3 +625,13 @@ baz()
(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw
index f1944c94b..735cd943e 100644
--- a/src/pkg/go/printer/testdata/expressions.raw
+++ b/src/pkg/go/printer/testdata/expressions.raw
@@ -224,11 +224,7 @@ func _() {
_ = struct{ x int }{0}
_ = struct{ x, y, z int }{0, 1, 2}
_ = struct{ int }{0}
- _ = struct {
- s struct {
- int
- }
- }{struct{ int }{0}} // compositeLit context not propagated => multiLine result
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
}
@@ -243,7 +239,8 @@ func _() {
_ = `foo
bar`
_ = `three spaces before the end of the line starting here:
-they must not be removed` }
+they must not be removed`
+}
func _() {
@@ -256,8 +253,8 @@ bar`
var _ = ``
var _ = `foo`
var _ =
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = // comment
@@ -265,8 +262,8 @@ bar`
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = /* comment */ ``
@@ -279,12 +276,12 @@ bar`
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var board = []int(
-`...........
+ `...........
...........
....●●●....
....●●●....
@@ -299,8 +296,8 @@ bar`
var state = S{
"foo",
- // the next line should not be indented
-`...........
+ // the next line should remain indented
+ `...........
...........
....●●●....
....●●●....
@@ -622,3 +619,13 @@ func _() {
b.(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC,0666); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/src/pkg/go/printer/testdata/slow.golden b/src/pkg/go/printer/testdata/slow.golden
new file mode 100644
index 000000000..43a15cb1d
--- /dev/null
+++ b/src/pkg/go/printer/testdata/slow.golden
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() *Foo {
+ return &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 42: &Foo{},
+ 21: &Bar{},
+ 11: &Baz{whatever: "it's just a test"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 43: &Foo{},
+ 22: &Bar{},
+ 13: &Baz{whatever: "this is nuts"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 61: &Foo{},
+ 71: &Bar{},
+ 11: &Baz{whatever: "no, it's Go"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 0: &Foo{},
+ -2: &Bar{},
+ -11: &Baz{whatever: "we need to go deeper"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -2: &Foo{},
+ -5: &Bar{},
+ -7: &Baz{whatever: "are you serious?"}}}},
+ bang: &Bar{foo: []*Foo{}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -100: &Foo{},
+ 50: &Bar{},
+ 20: &Baz{whatever: "na, not really ..."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 2: &Foo{},
+ 1: &Bar{},
+ -1: &Baz{whatever: "... it's just a test."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/pkg/go/printer/testdata/slow.input b/src/pkg/go/printer/testdata/slow.input
new file mode 100644
index 000000000..0e5a23d88
--- /dev/null
+++ b/src/pkg/go/printer/testdata/slow.input
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() (*Foo) {
+return &Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+42: &Foo{},
+21: &Bar{},
+11: &Baz{ whatever: "it's just a test" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+43: &Foo{},
+22: &Bar{},
+13: &Baz{ whatever: "this is nuts" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+61: &Foo{},
+71: &Bar{},
+11: &Baz{ whatever: "no, it's Go" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+0: &Foo{},
+-2: &Bar{},
+-11: &Baz{ whatever: "we need to go deeper" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-2: &Foo{},
+-5: &Bar{},
+-7: &Baz{ whatever: "are you serious?" }}}},
+ bang: &Bar{foo: []*Foo{}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-100: &Foo{},
+50: &Bar{},
+20: &Baz{ whatever: "na, not really ..." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+2: &Foo{},
+1: &Bar{},
+-1: &Baz{ whatever: "... it's just a test." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go
index 153707f59..2f949ad25 100644
--- a/src/pkg/go/scanner/scanner.go
+++ b/src/pkg/go/scanner/scanner.go
@@ -177,11 +177,11 @@ var prefix = []byte("//line ")
func (S *Scanner) interpretLineComment(text []byte) {
if bytes.HasPrefix(text, prefix) {
// get filename and line number, if any
- if i := bytes.Index(text, []byte{':'}); i > 0 {
+ if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 {
// valid //line filename:line comment;
filename := filepath.Clean(string(text[len(prefix):i]))
- if filename[0] != '/' {
+ if !filepath.IsAbs(filename) {
// make filename relative to current directory
filename = filepath.Join(S.dir, filename)
}
@@ -538,14 +538,12 @@ func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Toke
}
-var newline = []byte{'\n'}
-
-// Scan scans the next token and returns the token position pos,
-// the token tok, and the literal text lit corresponding to the
+// Scan scans the next token and returns the token position,
+// the token, and the literal string corresponding to the
// token. The source end is indicated by token.EOF.
//
// If the returned token is token.SEMICOLON, the corresponding
-// literal value is ";" if the semicolon was present in the source,
+// literal string is ";" if the semicolon was present in the source,
// and "\n" if the semicolon was inserted because of a newline or
// at EOF.
//
@@ -560,7 +558,7 @@ var newline = []byte{'\n'}
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
-func (S *Scanner) Scan() (token.Pos, token.Token, []byte) {
+func (S *Scanner) Scan() (token.Pos, token.Token, string) {
scanAgain:
S.skipWhitespace()
@@ -586,7 +584,7 @@ scanAgain:
case -1:
if S.insertSemi {
S.insertSemi = false // EOF consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
}
tok = token.EOF
case '\n':
@@ -594,7 +592,7 @@ scanAgain:
// set in the first place and exited early
// from S.skipWhitespace()
S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
case '"':
insertSemi = true
tok = token.STRING
@@ -662,7 +660,7 @@ scanAgain:
S.offset = offs
S.rdOffset = offs + 1
S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
}
S.scanComment()
if S.mode&ScanComments == 0 {
@@ -711,5 +709,9 @@ scanAgain:
if S.mode&InsertSemis != 0 {
S.insertSemi = insertSemi
}
- return S.file.Pos(offs), tok, S.src[offs:S.offset]
+
+ // TODO(gri): The scanner API should change such that the literal string
+ // is only valid if an actual literal was scanned. This will
+ // permit a more efficient implementation.
+ return S.file.Pos(offs), tok, string(S.src[offs:S.offset])
}
diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go
index c622ff482..8afb00ee5 100644
--- a/src/pkg/go/scanner/scanner_test.go
+++ b/src/pkg/go/scanner/scanner_test.go
@@ -7,6 +7,8 @@ package scanner
import (
"go/token"
"os"
+ "path/filepath"
+ "runtime"
"testing"
)
@@ -232,12 +234,11 @@ func TestScan(t *testing.T) {
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
for {
- pos, tok, litb := s.Scan()
+ pos, tok, lit := s.Scan()
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
- lit := string(litb)
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
@@ -255,7 +256,7 @@ func TestScan(t *testing.T) {
}
epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount
- if tok == token.COMMENT && litb[1] == '/' {
+ if tok == token.COMMENT && lit[1] == '/' {
// correct for unaccounted '/n' in //-style comment
epos.Offset++
epos.Line++
@@ -290,7 +291,7 @@ func checkSemi(t *testing.T, line string, mode uint) {
semiPos.Column++
pos, tok, lit = S.Scan()
if tok == token.SEMICOLON {
- if string(lit) != semiLit {
+ if lit != semiLit {
t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
}
checkPos(t, line, pos, semiPos)
@@ -443,32 +444,41 @@ func TestSemis(t *testing.T) {
}
}
-
-var segments = []struct {
+type segment struct {
srcline string // a line of source text
filename string // filename for current token
line int // line number for current token
-}{
+}
+
+var segments = []segment{
// exactly one token per line since the test consumes one token per segment
- {" line1", "dir/TestLineComments", 1},
- {"\nline2", "dir/TestLineComments", 2},
- {"\nline3 //line File1.go:100", "dir/TestLineComments", 3}, // bad line comment, ignored
- {"\nline4", "dir/TestLineComments", 4},
- {"\n//line File1.go:100\n line100", "dir/File1.go", 100},
- {"\n//line File2.go:200\n line200", "dir/File2.go", 200},
+ {" line1", filepath.Join("dir", "TestLineComments"), 1},
+ {"\nline2", filepath.Join("dir", "TestLineComments"), 2},
+ {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored
+ {"\nline4", filepath.Join("dir", "TestLineComments"), 4},
+ {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 100},
+ {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 200},
{"\n//line :1\n line1", "dir", 1},
- {"\n//line foo:42\n line42", "dir/foo", 42},
- {"\n //line foo:42\n line44", "dir/foo", 44}, // bad line comment, ignored
- {"\n//line foo 42\n line46", "dir/foo", 46}, // bad line comment, ignored
- {"\n//line foo:42 extra text\n line48", "dir/foo", 48}, // bad line comment, ignored
- {"\n//line /bar:42\n line42", "/bar", 42},
- {"\n//line ./foo:42\n line42", "dir/foo", 42},
- {"\n//line a/b/c/File1.go:100\n line100", "dir/a/b/c/File1.go", 100},
+ {"\n//line foo:42\n line42", filepath.Join("dir", "foo"), 42},
+ {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored
+ {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored
+ {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored
+ {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42},
+ {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42},
+ {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100},
+}
+
+var winsegments = []segment{
+ {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100},
}
// Verify that comments of the form "//line filename:line" are interpreted correctly.
func TestLineComments(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ segments = append(segments, winsegments...)
+ }
+
// make source
var src string
for _, e := range segments {
@@ -477,12 +487,12 @@ func TestLineComments(t *testing.T) {
// verify scan
var S Scanner
- file := fset.AddFile("dir/TestLineComments", fset.Base(), len(src))
+ file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
S.Init(file, []byte(src), nil, 0)
for _, s := range segments {
p, _, lit := S.Scan()
pos := file.Position(p)
- checkPos(t, string(lit), p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
+ checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
}
if S.ErrorCount != 0 {
@@ -536,10 +546,10 @@ func TestIllegalChars(t *testing.T) {
for offs, ch := range src {
pos, tok, lit := s.Scan()
if poffs := file.Offset(pos); poffs != offs {
- t.Errorf("bad position for %s: got %d, expected %d", string(lit), poffs, offs)
+ t.Errorf("bad position for %s: got %d, expected %d", lit, poffs, offs)
}
- if tok == token.ILLEGAL && string(lit) != string(ch) {
- t.Errorf("bad token: got %s, expected %s", string(lit), string(ch))
+ if tok == token.ILLEGAL && lit != string(ch) {
+ t.Errorf("bad token: got %s, expected %s", lit, string(ch))
}
}
diff --git a/src/pkg/go/token/token.go b/src/pkg/go/token/token.go
index 2a2d3ecc4..a5f21df16 100644
--- a/src/pkg/go/token/token.go
+++ b/src/pkg/go/token/token.go
@@ -126,10 +126,7 @@ const (
)
-// At the moment we have no array literal syntax that lets us describe
-// the index for each element - use a map for now to make sure they are
-// in sync.
-var tokens = map[Token]string{
+var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
@@ -237,10 +234,14 @@ var tokens = map[Token]string{
// constant name (e.g. for the token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
- if str, exists := tokens[tok]; exists {
- return str
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
}
- return "token(" + strconv.Itoa(int(tok)) + ")"
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
}
diff --git a/src/pkg/go/typechecker/Makefile b/src/pkg/go/typechecker/Makefile
index 62b2aa7fe..83af3ef4e 100644
--- a/src/pkg/go/typechecker/Makefile
+++ b/src/pkg/go/typechecker/Makefile
@@ -7,6 +7,7 @@ include ../../../Make.inc
TARG=go/typechecker
GOFILES=\
scope.go\
+ type.go\
typechecker.go\
universe.go\
diff --git a/src/pkg/go/typechecker/scope.go b/src/pkg/go/typechecker/scope.go
index 114c93ea8..a4bee6e69 100644
--- a/src/pkg/go/typechecker/scope.go
+++ b/src/pkg/go/typechecker/scope.go
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file implements scope support functions.
+// DEPRECATED FILE - WILL GO AWAY EVENTUALLY.
+//
+// Scope handling is now done in go/parser.
+// The functionality here is only present to
+// keep the typechecker running for now.
package typechecker
-import (
- "fmt"
- "go/ast"
- "go/token"
-)
+import "go/ast"
func (tc *typechecker) openScope() *ast.Scope {
@@ -24,52 +24,25 @@ func (tc *typechecker) closeScope() {
}
-// objPos computes the source position of the declaration of an object name.
-// Only required for error reporting, so doesn't have to be fast.
-func objPos(obj *ast.Object) (pos token.Pos) {
- switch d := obj.Decl.(type) {
- case *ast.Field:
- for _, n := range d.Names {
- if n.Name == obj.Name {
- return n.Pos()
- }
- }
- case *ast.ValueSpec:
- for _, n := range d.Names {
- if n.Name == obj.Name {
- return n.Pos()
- }
- }
- case *ast.TypeSpec:
- return d.Name.Pos()
- case *ast.FuncDecl:
- return d.Name.Pos()
- }
- if debug {
- fmt.Printf("decl = %T\n", obj.Decl)
- }
- panic("unreachable")
-}
-
-
// declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields.
// It returns the newly allocated object. If an object with the same name already exists in scope, an error
// is reported and the object is not inserted.
-// (Objects with _ name are always inserted into a scope without errors, but they cannot be found.)
-func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object {
+func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
obj := ast.NewObj(kind, name.Name)
obj.Decl = decl
- obj.N = n
+ //obj.N = n
name.Obj = obj
- if alt := scope.Insert(obj); alt != obj {
- tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, objPos(alt))
+ if name.Name != "_" {
+ if alt := scope.Insert(obj); alt != nil {
+ tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, tc.fset.Position(alt.Pos()).String())
+ }
}
return obj
}
// decl is the same as declInScope(tc.topScope, ...)
-func (tc *typechecker) decl(kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object {
+func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
return tc.declInScope(tc.topScope, kind, name, decl, n)
}
@@ -91,7 +64,7 @@ func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) {
// findField returns the object with the given name if visible in the type's scope.
// If no such object is found, an error is reported and a bad object is returned instead.
-func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Object) {
+func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) {
// TODO(gri) This is simplistic at the moment and ignores anonymous fields.
obj = typ.Scope.Lookup(name.Name)
if obj == nil {
@@ -100,20 +73,3 @@ func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Objec
}
return
}
-
-
-// printScope prints the objects in a scope.
-func printScope(scope *ast.Scope) {
- fmt.Printf("scope %p {", scope)
- if scope != nil && len(scope.Objects) > 0 {
- fmt.Println()
- for _, obj := range scope.Objects {
- form := "void"
- if obj.Type != nil {
- form = obj.Type.Form.String()
- }
- fmt.Printf("\t%s\t%s\n", obj.Name, form)
- }
- }
- fmt.Printf("}\n")
-}
diff --git a/src/pkg/go/typechecker/testdata/test0.go b/src/pkg/go/typechecker/testdata/test0.src
index 4e317f214..4e317f214 100644
--- a/src/pkg/go/typechecker/testdata/test0.go
+++ b/src/pkg/go/typechecker/testdata/test0.src
diff --git a/src/pkg/go/typechecker/testdata/test1.go b/src/pkg/go/typechecker/testdata/test1.src
index b0808ee7a..b5531fb9f 100644
--- a/src/pkg/go/typechecker/testdata/test1.go
+++ b/src/pkg/go/typechecker/testdata/test1.src
@@ -7,7 +7,7 @@
package P1
const (
- c1 /* ERROR "missing initializer" */
+ c1 = 0
c2 int = 0
c3, c4 = 0
)
diff --git a/src/pkg/go/typechecker/testdata/test3.go b/src/pkg/go/typechecker/testdata/test3.src
index ea35808a0..2e1a9fa8f 100644
--- a/src/pkg/go/typechecker/testdata/test3.go
+++ b/src/pkg/go/typechecker/testdata/test3.src
@@ -27,8 +27,11 @@ func (T) m1 /* ERROR "already declared" */ () {}
func (x *T) m2(u, x /* ERROR "already declared" */ int) {}
func (x *T) m3(a, b, c int) (u, x /* ERROR "already declared" */ int) {}
-func (T) _(x, x /* ERROR "already declared" */ int) {}
-func (T) _() (x, x /* ERROR "already declared" */ int) {}
+// The following are disabled for now because the typechecker
+// in in the process of being rewritten and cannot handle them
+// at the moment
+//func (T) _(x, x /* "already declared" */ int) {}
+//func (T) _() (x, x /* "already declared" */ int) {}
//func (PT) _() {}
diff --git a/src/pkg/go/typechecker/testdata/test4.go b/src/pkg/go/typechecker/testdata/test4.src
index bb9aee3ad..94d3558f9 100644
--- a/src/pkg/go/typechecker/testdata/test4.go
+++ b/src/pkg/go/typechecker/testdata/test4.src
@@ -7,5 +7,5 @@
package P4
const (
- c0 /* ERROR "missing initializer" */
+ c0 = 0
)
diff --git a/src/pkg/go/typechecker/type.go b/src/pkg/go/typechecker/type.go
new file mode 100644
index 000000000..62b4e9d3e
--- /dev/null
+++ b/src/pkg/go/typechecker/type.go
@@ -0,0 +1,125 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typechecker
+
+import "go/ast"
+
+
+// A Type represents a Go type.
+type Type struct {
+ Form Form
+ Obj *ast.Object // corresponding type name, or nil
+ Scope *ast.Scope // fields and methods, always present
+ N uint // basic type id, array length, number of function results, or channel direction
+ Key, Elt *Type // map key and array, pointer, slice, map or channel element
+ Params *ast.Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil
+ Expr ast.Expr // corresponding AST expression
+}
+
+
+// NewType creates a new type of a given form.
+func NewType(form Form) *Type {
+ return &Type{Form: form, Scope: ast.NewScope(nil)}
+}
+
+
+// Form describes the form of a type.
+type Form int
+
+// The list of possible type forms.
+const (
+ BadType Form = iota // for error handling
+ Unresolved // type not fully setup
+ Basic
+ Array
+ Struct
+ Pointer
+ Function
+ Method
+ Interface
+ Slice
+ Map
+ Channel
+ Tuple
+)
+
+
+var formStrings = [...]string{
+ BadType: "badType",
+ Unresolved: "unresolved",
+ Basic: "basic",
+ Array: "array",
+ Struct: "struct",
+ Pointer: "pointer",
+ Function: "function",
+ Method: "method",
+ Interface: "interface",
+ Slice: "slice",
+ Map: "map",
+ Channel: "channel",
+ Tuple: "tuple",
+}
+
+
+func (form Form) String() string { return formStrings[form] }
+
+
+// The list of basic type id's.
+const (
+ Bool = iota
+ Byte
+ Uint
+ Int
+ Float
+ Complex
+ Uintptr
+ String
+
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+
+ Int8
+ Int16
+ Int32
+ Int64
+
+ Float32
+ Float64
+
+ Complex64
+ Complex128
+
+ // TODO(gri) ideal types are missing
+)
+
+
+var BasicTypes = map[uint]string{
+ Bool: "bool",
+ Byte: "byte",
+ Uint: "uint",
+ Int: "int",
+ Float: "float",
+ Complex: "complex",
+ Uintptr: "uintptr",
+ String: "string",
+
+ Uint8: "uint8",
+ Uint16: "uint16",
+ Uint32: "uint32",
+ Uint64: "uint64",
+
+ Int8: "int8",
+ Int16: "int16",
+ Int32: "int32",
+ Int64: "int64",
+
+ Float32: "float32",
+ Float64: "float64",
+
+ Complex64: "complex64",
+ Complex128: "complex128",
+}
diff --git a/src/pkg/go/typechecker/typechecker.go b/src/pkg/go/typechecker/typechecker.go
index e9aefa240..b151f5834 100644
--- a/src/pkg/go/typechecker/typechecker.go
+++ b/src/pkg/go/typechecker/typechecker.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// INCOMPLETE PACKAGE.
+// DEPRECATED PACKAGE - SEE go/types INSTEAD.
// This package implements typechecking of a Go AST.
// The result of the typecheck is an augmented AST
// with object and type information for each identifier.
@@ -53,7 +53,7 @@ func CheckPackage(fset *token.FileSet, pkg *ast.Package, importer Importer) os.E
//
func CheckFile(fset *token.FileSet, file *ast.File, importer Importer) os.Error {
// create a single-file dummy package
- pkg := &ast.Package{file.Name.Name, nil, map[string]*ast.File{fset.Position(file.Name.NamePos).Filename: file}}
+ pkg := &ast.Package{file.Name.Name, nil, nil, map[string]*ast.File{fset.Position(file.Name.NamePos).Filename: file}}
return CheckPackage(fset, pkg, importer)
}
@@ -65,6 +65,7 @@ type typechecker struct {
fset *token.FileSet
scanner.ErrorVector
importer Importer
+ globals []*ast.Object // list of global objects
topScope *ast.Scope // current top-most scope
cyclemap map[*ast.Object]bool // for cycle detection
iota int // current value of iota
@@ -94,7 +95,7 @@ phase 1: declare all global objects; also collect all function and method declar
- report global double declarations
phase 2: bind methods to their receiver base types
- - received base types must be declared in the package, thus for
+ - receiver base types must be declared in the package, thus for
each method a corresponding (unresolved) type must exist
- report method double declarations and errors with base types
@@ -142,16 +143,16 @@ func (tc *typechecker) checkPackage(pkg *ast.Package) {
}
// phase 3: resolve all global objects
- // (note that objects with _ name are also in the scope)
tc.cyclemap = make(map[*ast.Object]bool)
- for _, obj := range tc.topScope.Objects {
+ for _, obj := range tc.globals {
tc.resolve(obj)
}
assert(len(tc.cyclemap) == 0)
// 4: sequentially typecheck function and method bodies
for _, f := range funcs {
- tc.checkBlock(f.Body.List, f.Name.Obj.Type)
+ ftype, _ := f.Name.Obj.Type.(*Type)
+ tc.checkBlock(f.Body.List, ftype)
}
pkg.Scope = tc.topScope
@@ -183,11 +184,11 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
}
}
for _, name := range s.Names {
- tc.decl(ast.Con, name, s, iota)
+ tc.globals = append(tc.globals, tc.decl(ast.Con, name, s, iota))
}
case token.VAR:
for _, name := range s.Names {
- tc.decl(ast.Var, name, s, 0)
+ tc.globals = append(tc.globals, tc.decl(ast.Var, name, s, 0))
}
default:
panic("unreachable")
@@ -196,9 +197,10 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
iota++
case *ast.TypeSpec:
obj := tc.decl(ast.Typ, s.Name, s, 0)
+ tc.globals = append(tc.globals, obj)
// give all type objects an unresolved type so
// that we can collect methods in the type scope
- typ := ast.NewType(ast.Unresolved)
+ typ := NewType(Unresolved)
obj.Type = typ
typ.Obj = obj
default:
@@ -208,7 +210,7 @@ func (tc *typechecker) declGlobal(global ast.Decl) {
case *ast.FuncDecl:
if d.Recv == nil {
- tc.decl(ast.Fun, d.Name, d, 0)
+ tc.globals = append(tc.globals, tc.decl(ast.Fun, d.Name, d, 0))
}
default:
@@ -239,8 +241,8 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
} else if obj.Kind != ast.Typ {
tc.Errorf(name.Pos(), "invalid receiver: %s is not a type", name.Name)
} else {
- typ := obj.Type
- assert(typ.Form == ast.Unresolved)
+ typ := obj.Type.(*Type)
+ assert(typ.Form == Unresolved)
scope = typ.Scope
}
}
@@ -261,7 +263,7 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
func (tc *typechecker) resolve(obj *ast.Object) {
// check for declaration cycles
if tc.cyclemap[obj] {
- tc.Errorf(objPos(obj), "illegal cycle in declaration of %s", obj.Name)
+ tc.Errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name)
obj.Kind = ast.Bad
return
}
@@ -271,7 +273,7 @@ func (tc *typechecker) resolve(obj *ast.Object) {
}()
// resolve non-type objects
- typ := obj.Type
+ typ, _ := obj.Type.(*Type)
if typ == nil {
switch obj.Kind {
case ast.Bad:
@@ -282,12 +284,12 @@ func (tc *typechecker) resolve(obj *ast.Object) {
case ast.Var:
tc.declVar(obj)
- //obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false)
+ obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false)
case ast.Fun:
- obj.Type = ast.NewType(ast.Function)
+ obj.Type = NewType(Function)
t := obj.Decl.(*ast.FuncDecl).Type
- tc.declSignature(obj.Type, nil, t.Params, t.Results)
+ tc.declSignature(obj.Type.(*Type), nil, t.Params, t.Results)
default:
// type objects have non-nil types when resolve is called
@@ -300,32 +302,34 @@ func (tc *typechecker) resolve(obj *ast.Object) {
}
// resolve type objects
- if typ.Form == ast.Unresolved {
+ if typ.Form == Unresolved {
tc.typeFor(typ, typ.Obj.Decl.(*ast.TypeSpec).Type, false)
// provide types for all methods
for _, obj := range typ.Scope.Objects {
if obj.Kind == ast.Fun {
assert(obj.Type == nil)
- obj.Type = ast.NewType(ast.Method)
+ obj.Type = NewType(Method)
f := obj.Decl.(*ast.FuncDecl)
t := f.Type
- tc.declSignature(obj.Type, f.Recv, t.Params, t.Results)
+ tc.declSignature(obj.Type.(*Type), f.Recv, t.Params, t.Results)
}
}
}
}
-func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *ast.Type) {
+func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) {
tc.openScope()
defer tc.closeScope()
// inject function/method parameters into block scope, if any
if ftype != nil {
for _, par := range ftype.Params.Objects {
- obj := tc.topScope.Insert(par)
- assert(obj == par) // ftype has no double declarations
+ if par.Name != "_" {
+ alt := tc.topScope.Insert(par)
+ assert(alt == nil) // ftype has no double declarations
+ }
}
}
@@ -362,8 +366,8 @@ func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref b
}
-func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.FieldList) {
- assert((typ.Form == ast.Method) == (recv != nil))
+func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) {
+ assert((typ.Form == Method) == (recv != nil))
typ.Params = ast.NewScope(nil)
tc.declFields(typ.Params, recv, true)
tc.declFields(typ.Params, params, true)
@@ -371,7 +375,7 @@ func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.F
}
-func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Type) {
+func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) {
x = unparen(x)
// type name
@@ -381,10 +385,10 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if obj.Kind != ast.Typ {
tc.Errorf(t.Pos(), "%s is not a type", t.Name)
if def == nil {
- typ = ast.NewType(ast.BadType)
+ typ = NewType(BadType)
} else {
typ = def
- typ.Form = ast.BadType
+ typ.Form = BadType
}
typ.Expr = x
return
@@ -393,7 +397,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if !ref {
tc.resolve(obj) // check for cycles even if type resolved
}
- typ = obj.Type
+ typ = obj.Type.(*Type)
if def != nil {
// new type declaration: copy type structure
@@ -410,7 +414,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
// type literal
typ = def
if typ == nil {
- typ = ast.NewType(ast.BadType)
+ typ = NewType(BadType)
}
typ.Expr = x
@@ -419,42 +423,42 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty
if debug {
fmt.Println("qualified identifier unimplemented")
}
- typ.Form = ast.BadType
+ typ.Form = BadType
case *ast.StarExpr:
- typ.Form = ast.Pointer
+ typ.Form = Pointer
typ.Elt = tc.typeFor(nil, t.X, true)
case *ast.ArrayType:
if t.Len != nil {
- typ.Form = ast.Array
+ typ.Form = Array
// TODO(gri) compute the real length
// (this may call resolve recursively)
(*typ).N = 42
} else {
- typ.Form = ast.Slice
+ typ.Form = Slice
}
typ.Elt = tc.typeFor(nil, t.Elt, t.Len == nil)
case *ast.StructType:
- typ.Form = ast.Struct
+ typ.Form = Struct
tc.declFields(typ.Scope, t.Fields, false)
case *ast.FuncType:
- typ.Form = ast.Function
+ typ.Form = Function
tc.declSignature(typ, nil, t.Params, t.Results)
case *ast.InterfaceType:
- typ.Form = ast.Interface
+ typ.Form = Interface
tc.declFields(typ.Scope, t.Methods, true)
case *ast.MapType:
- typ.Form = ast.Map
+ typ.Form = Map
typ.Key = tc.typeFor(nil, t.Key, true)
typ.Elt = tc.typeFor(nil, t.Value, true)
case *ast.ChanType:
- typ.Form = ast.Channel
+ typ.Form = Channel
typ.N = uint(t.Dir)
typ.Elt = tc.typeFor(nil, t.Value, true)
diff --git a/src/pkg/go/typechecker/typechecker_test.go b/src/pkg/go/typechecker/typechecker_test.go
index 33f4a6223..d16e06921 100644
--- a/src/pkg/go/typechecker/typechecker_test.go
+++ b/src/pkg/go/typechecker/typechecker_test.go
@@ -78,7 +78,7 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) {
case token.EOF:
break loop
case token.COMMENT:
- s := errRx.FindSubmatch(lit)
+ s := errRx.FindStringSubmatch(lit)
if len(s) == 2 {
list = append(list, &scanner.Error{fset.Position(prev), string(s[1])})
}
@@ -93,7 +93,7 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) {
func testFilter(f *os.FileInfo) bool {
- return strings.HasSuffix(f.Name, ".go") && f.Name[0] != '.'
+ return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.'
}
diff --git a/src/pkg/go/typechecker/universe.go b/src/pkg/go/typechecker/universe.go
index db950737f..abc8bbbd4 100644
--- a/src/pkg/go/typechecker/universe.go
+++ b/src/pkg/go/typechecker/universe.go
@@ -14,7 +14,7 @@ var Universe *ast.Scope
func def(obj *ast.Object) {
alt := Universe.Insert(obj)
- if alt != obj {
+ if alt != nil {
panic("object declared twice")
}
}
@@ -24,8 +24,8 @@ func init() {
Universe = ast.NewScope(nil)
// basic types
- for n, name := range ast.BasicTypes {
- typ := ast.NewType(ast.Basic)
+ for n, name := range BasicTypes {
+ typ := NewType(Basic)
typ.N = n
obj := ast.NewObj(ast.Typ, name)
obj.Type = typ
diff --git a/src/pkg/go/types/Makefile b/src/pkg/go/types/Makefile
new file mode 100644
index 000000000..54e762b36
--- /dev/null
+++ b/src/pkg/go/types/Makefile
@@ -0,0 +1,15 @@
+# Copyright 2010 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../../Make.inc
+
+TARG=go/types
+GOFILES=\
+ const.go\
+ exportdata.go\
+ gcimporter.go\
+ types.go\
+ universe.go\
+
+include ../../../Make.pkg
diff --git a/src/pkg/go/types/const.go b/src/pkg/go/types/const.go
new file mode 100644
index 000000000..6fdc22f6b
--- /dev/null
+++ b/src/pkg/go/types/const.go
@@ -0,0 +1,347 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements operations on ideal constants.
+
+package types
+
+import (
+ "big"
+ "go/token"
+ "strconv"
+)
+
+
+// TODO(gri) Consider changing the API so Const is an interface
+// and operations on consts don't have to type switch.
+
+// A Const implements an ideal constant Value.
+// The zero value z for a Const is not a valid constant value.
+type Const struct {
+ // representation of constant values:
+ // ideal bool -> bool
+ // ideal int -> *big.Int
+ // ideal float -> *big.Rat
+ // ideal complex -> cmplx
+ // ideal string -> string
+ val interface{}
+}
+
+
+// Representation of complex values.
+type cmplx struct {
+ re, im *big.Rat
+}
+
+
+func assert(cond bool) {
+ if !cond {
+ panic("go/types internal error: assertion failed")
+ }
+}
+
+
+// MakeConst makes an ideal constant from a literal
+// token and the corresponding literal string.
+func MakeConst(tok token.Token, lit string) Const {
+ switch tok {
+ case token.INT:
+ var x big.Int
+ _, ok := x.SetString(lit, 0)
+ assert(ok)
+ return Const{&x}
+ case token.FLOAT:
+ var y big.Rat
+ _, ok := y.SetString(lit)
+ assert(ok)
+ return Const{&y}
+ case token.IMAG:
+ assert(lit[len(lit)-1] == 'i')
+ var im big.Rat
+ _, ok := im.SetString(lit[0 : len(lit)-1])
+ assert(ok)
+ return Const{cmplx{big.NewRat(0, 1), &im}}
+ case token.CHAR:
+ assert(lit[0] == '\'' && lit[len(lit)-1] == '\'')
+ code, _, _, err := strconv.UnquoteChar(lit[1:len(lit)-1], '\'')
+ assert(err == nil)
+ return Const{big.NewInt(int64(code))}
+ case token.STRING:
+ s, err := strconv.Unquote(lit)
+ assert(err == nil)
+ return Const{s}
+ }
+ panic("unreachable")
+}
+
+
+// MakeZero returns the zero constant for the given type.
+func MakeZero(typ *Type) Const {
+ // TODO(gri) fix this
+ return Const{0}
+}
+
+
+// Match attempts to match the internal constant representations of x and y.
+// If the attempt is successful, the result is the values of x and y,
+// if necessary converted to have the same internal representation; otherwise
+// the results are invalid.
+func (x Const) Match(y Const) (u, v Const) {
+ switch a := x.val.(type) {
+ case bool:
+ if _, ok := y.val.(bool); ok {
+ u, v = x, y
+ }
+ case *big.Int:
+ switch y.val.(type) {
+ case *big.Int:
+ u, v = x, y
+ case *big.Rat:
+ var z big.Rat
+ z.SetInt(a)
+ u, v = Const{&z}, y
+ case cmplx:
+ var z big.Rat
+ z.SetInt(a)
+ u, v = Const{cmplx{&z, big.NewRat(0, 1)}}, y
+ }
+ case *big.Rat:
+ switch y.val.(type) {
+ case *big.Int:
+ v, u = y.Match(x)
+ case *big.Rat:
+ u, v = x, y
+ case cmplx:
+ u, v = Const{cmplx{a, big.NewRat(0, 0)}}, y
+ }
+ case cmplx:
+ switch y.val.(type) {
+ case *big.Int, *big.Rat:
+ v, u = y.Match(x)
+ case cmplx:
+ u, v = x, y
+ }
+ case string:
+ if _, ok := y.val.(string); ok {
+ u, v = x, y
+ }
+ default:
+ panic("unreachable")
+ }
+ return
+}
+
+
+// Convert attempts to convert the constant x to a given type.
+// If the attempt is successful, the result is the new constant;
+// otherwise the result is invalid.
+func (x Const) Convert(typ *Type) Const {
+ // TODO(gri) implement this
+ switch x := x.val.(type) {
+ case bool:
+ case *big.Int:
+ case *big.Rat:
+ case cmplx:
+ case string:
+ }
+ return x
+}
+
+
+func (x Const) String() string {
+ switch x := x.val.(type) {
+ case bool:
+ if x {
+ return "true"
+ }
+ return "false"
+ case *big.Int:
+ return x.String()
+ case *big.Rat:
+ return x.FloatString(10) // 10 digits of precision after decimal point seems fine
+ case cmplx:
+ // TODO(gri) don't print 0 components
+ return x.re.FloatString(10) + " + " + x.im.FloatString(10) + "i"
+ case string:
+ return x
+ }
+ panic("unreachable")
+}
+
+
+func (x Const) UnaryOp(op token.Token) Const {
+ panic("unimplemented")
+}
+
+
+func (x Const) BinaryOp(op token.Token, y Const) Const {
+ var z interface{}
+ switch x := x.val.(type) {
+ case bool:
+ z = binaryBoolOp(x, op, y.val.(bool))
+ case *big.Int:
+ z = binaryIntOp(x, op, y.val.(*big.Int))
+ case *big.Rat:
+ z = binaryFloatOp(x, op, y.val.(*big.Rat))
+ case cmplx:
+ z = binaryCmplxOp(x, op, y.val.(cmplx))
+ case string:
+ z = binaryStringOp(x, op, y.val.(string))
+ default:
+ panic("unreachable")
+ }
+ return Const{z}
+}
+
+
+func binaryBoolOp(x bool, op token.Token, y bool) interface{} {
+ switch op {
+ case token.EQL:
+ return x == y
+ case token.NEQ:
+ return x != y
+ }
+ panic("unreachable")
+}
+
+
+func binaryIntOp(x *big.Int, op token.Token, y *big.Int) interface{} {
+ var z big.Int
+ switch op {
+ case token.ADD:
+ return z.Add(x, y)
+ case token.SUB:
+ return z.Sub(x, y)
+ case token.MUL:
+ return z.Mul(x, y)
+ case token.QUO:
+ return z.Quo(x, y)
+ case token.REM:
+ return z.Rem(x, y)
+ case token.AND:
+ return z.And(x, y)
+ case token.OR:
+ return z.Or(x, y)
+ case token.XOR:
+ return z.Xor(x, y)
+ case token.AND_NOT:
+ return z.AndNot(x, y)
+ case token.SHL:
+ panic("unimplemented")
+ case token.SHR:
+ panic("unimplemented")
+ case token.EQL:
+ return x.Cmp(y) == 0
+ case token.NEQ:
+ return x.Cmp(y) != 0
+ case token.LSS:
+ return x.Cmp(y) < 0
+ case token.LEQ:
+ return x.Cmp(y) <= 0
+ case token.GTR:
+ return x.Cmp(y) > 0
+ case token.GEQ:
+ return x.Cmp(y) >= 0
+ }
+ panic("unreachable")
+}
+
+
+func binaryFloatOp(x *big.Rat, op token.Token, y *big.Rat) interface{} {
+ var z big.Rat
+ switch op {
+ case token.ADD:
+ return z.Add(x, y)
+ case token.SUB:
+ return z.Sub(x, y)
+ case token.MUL:
+ return z.Mul(x, y)
+ case token.QUO:
+ return z.Quo(x, y)
+ case token.EQL:
+ return x.Cmp(y) == 0
+ case token.NEQ:
+ return x.Cmp(y) != 0
+ case token.LSS:
+ return x.Cmp(y) < 0
+ case token.LEQ:
+ return x.Cmp(y) <= 0
+ case token.GTR:
+ return x.Cmp(y) > 0
+ case token.GEQ:
+ return x.Cmp(y) >= 0
+ }
+ panic("unreachable")
+}
+
+
+func binaryCmplxOp(x cmplx, op token.Token, y cmplx) interface{} {
+ a, b := x.re, x.im
+ c, d := y.re, y.im
+ switch op {
+ case token.ADD:
+ // (a+c) + i(b+d)
+ var re, im big.Rat
+ re.Add(a, c)
+ im.Add(b, d)
+ return cmplx{&re, &im}
+ case token.SUB:
+ // (a-c) + i(b-d)
+ var re, im big.Rat
+ re.Sub(a, c)
+ im.Sub(b, d)
+ return cmplx{&re, &im}
+ case token.MUL:
+ // (ac-bd) + i(bc+ad)
+ var ac, bd, bc, ad big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ var re, im big.Rat
+ re.Sub(&ac, &bd)
+ im.Add(&bc, &ad)
+ return cmplx{&re, &im}
+ case token.QUO:
+ // (ac+bd)/s + i(bc-ad)/s, with s = cc + dd
+ var ac, bd, bc, ad, s big.Rat
+ ac.Mul(a, c)
+ bd.Mul(b, d)
+ bc.Mul(b, c)
+ ad.Mul(a, d)
+ s.Add(c.Mul(c, c), d.Mul(d, d))
+ var re, im big.Rat
+ re.Add(&ac, &bd)
+ re.Quo(&re, &s)
+ im.Sub(&bc, &ad)
+ im.Quo(&im, &s)
+ return cmplx{&re, &im}
+ case token.EQL:
+ return a.Cmp(c) == 0 && b.Cmp(d) == 0
+ case token.NEQ:
+ return a.Cmp(c) != 0 || b.Cmp(d) != 0
+ }
+ panic("unreachable")
+}
+
+
+func binaryStringOp(x string, op token.Token, y string) interface{} {
+ switch op {
+ case token.ADD:
+ return x + y
+ case token.EQL:
+ return x == y
+ case token.NEQ:
+ return x != y
+ case token.LSS:
+ return x < y
+ case token.LEQ:
+ return x <= y
+ case token.GTR:
+ return x > y
+ case token.GEQ:
+ return x >= y
+ }
+ panic("unreachable")
+}
diff --git a/src/pkg/go/types/exportdata.go b/src/pkg/go/types/exportdata.go
new file mode 100644
index 000000000..cb08ffe18
--- /dev/null
+++ b/src/pkg/go/types/exportdata.go
@@ -0,0 +1,135 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements ExportData.
+
+package types
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+
+func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 64+12+6+6+8+10+2)
+ _, err = io.ReadFull(buf, hdr)
+ if err != nil {
+ return
+ }
+ if trace {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[64+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = os.ErrorString("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:64]))
+ return
+}
+
+
+type dataReader struct {
+ *bufio.Reader
+ io.Closer
+}
+
+
+// ExportData returns a readCloser positioned at the beginning of the
+// export data section of the given object/archive file, or an error.
+// It is the caller's responsibility to close the readCloser.
+//
+func ExportData(filename string) (rc io.ReadCloser, err os.Error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ file.Close()
+ // Add file name to error.
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ buf := bufio.NewReader(file)
+
+ // Read first line to make sure this is an object file.
+ line, err := buf.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF, which should
+ // be second archive entry.
+ var name string
+ var size int
+
+ // First entry should be __.SYMDEF.
+ // Read and discard.
+ if name, size, err = readGopackHeader(buf); err != nil {
+ return
+ }
+ if name != "__.SYMDEF" {
+ err = os.ErrorString("go archive does not begin with __.SYMDEF")
+ return
+ }
+ const block = 4096
+ tmp := make([]byte, block)
+ for size > 0 {
+ n := size
+ if n > block {
+ n = block
+ }
+ _, err = io.ReadFull(buf, tmp[:n])
+ if err != nil {
+ return
+ }
+ size -= n
+ }
+
+ // Second entry should be __.PKGDEF.
+ if name, size, err = readGopackHeader(buf); err != nil {
+ return
+ }
+ if name != "__.PKGDEF" {
+ err = os.ErrorString("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ line, err = buf.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = os.ErrorString("not a go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line with $$.
+ for line[0] != '$' {
+ line, err = buf.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ }
+
+ rc = &dataReader{buf, file}
+ return
+}
diff --git a/src/pkg/go/types/gcimporter.go b/src/pkg/go/types/gcimporter.go
new file mode 100644
index 000000000..9e0ae6285
--- /dev/null
+++ b/src/pkg/go/types/gcimporter.go
@@ -0,0 +1,786 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements an ast.Importer for gc generated object files.
+// TODO(gri) Eventually move this into a separate package outside types.
+
+package types
+
+import (
+ "big"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "runtime"
+ "scanner"
+ "strconv"
+)
+
+
+const trace = false // set to true for debugging
+
+var (
+ pkgRoot = filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH)
+ pkgExts = [...]string{".a", ".5", ".6", ".8"}
+)
+
+
+// findPkg returns the filename and package id for an import path.
+// If no file was found, an empty filename is returned.
+func findPkg(path string) (filename, id string) {
+ if len(path) == 0 {
+ return
+ }
+
+ id = path
+ var noext string
+ switch path[0] {
+ default:
+ // "x" -> "$GOROOT/pkg/$GOOS_$GOARCH/x.ext", "x"
+ noext = filepath.Join(pkgRoot, path)
+
+ case '.':
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ cwd, err := os.Getwd()
+ if err != nil {
+ return
+ }
+ noext = filepath.Join(cwd, path)
+ id = noext
+
+ case '/':
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && f.IsRegular() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+
+// gcParser parses the exports inside a gc compiler-produced
+// object/archive file and populates its scope with the results.
+type gcParser struct {
+ scanner scanner.Scanner
+ tok int // current token
+ lit string // literal string; only valid for Ident, Int, String tokens
+ id string // package id of imported package
+ scope *ast.Scope // scope of imported package; alias for deps[id]
+ deps map[string]*ast.Scope // package id -> package scope
+}
+
+
+func (p *gcParser) init(filename, id string, src io.Reader) {
+ p.scanner.Init(src)
+ p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
+ p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ p.scanner.Whitespace = 1<<'\t' | 1<<' '
+ p.scanner.Filename = filename // for good error messages
+ p.next()
+ p.id = id
+ p.scope = ast.NewScope(nil)
+ p.deps = map[string]*ast.Scope{"unsafe": Unsafe, id: p.scope}
+}
+
+
+func (p *gcParser) next() {
+ p.tok = p.scanner.Scan()
+ switch p.tok {
+ case scanner.Ident, scanner.Int, scanner.String:
+ p.lit = p.scanner.TokenText()
+ default:
+ p.lit = ""
+ }
+ if trace {
+ fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
+ }
+}
+
+
+// GcImporter implements the ast.Importer signature.
+func GcImporter(path string) (name string, scope *ast.Scope, err os.Error) {
+ if path == "unsafe" {
+ return path, Unsafe, nil
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ err = r.(importError) // will re-panic if r is not an importError
+ if trace {
+ panic(err) // force a stack trace
+ }
+ }
+ }()
+
+ filename, id := findPkg(path)
+ if filename == "" {
+ err = os.ErrorString("can't find import: " + id)
+ return
+ }
+
+ buf, err := ExportData(filename)
+ if err != nil {
+ return
+ }
+ defer buf.Close()
+
+ if trace {
+ fmt.Printf("importing %s\n", filename)
+ }
+
+ var p gcParser
+ p.init(filename, id, buf)
+ name, scope = p.parseExport()
+
+ return
+}
+
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// Internal errors are boxed as importErrors.
+type importError struct {
+ pos scanner.Position
+ err os.Error
+}
+
+
+func (e importError) String() string {
+ return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
+}
+
+
+func (p *gcParser) error(err interface{}) {
+ if s, ok := err.(string); ok {
+ err = os.ErrorString(s)
+ }
+ // panic with a runtime.Error if err is not an os.Error
+ panic(importError{p.scanner.Pos(), err.(os.Error)})
+}
+
+
+func (p *gcParser) errorf(format string, args ...interface{}) {
+ p.error(fmt.Sprintf(format, args...))
+}
+
+
+func (p *gcParser) expect(tok int) string {
+ lit := p.lit
+ if p.tok != tok {
+ p.errorf("expected %q, got %q (%q)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
+ }
+ p.next()
+ return lit
+}
+
+
+func (p *gcParser) expectSpecial(tok string) {
+ sep := 'x' // not white space
+ i := 0
+ for i < len(tok) && p.tok == int(tok[i]) && sep > ' ' {
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ i++
+ }
+ if i < len(tok) {
+ p.errorf("expected %q, got %q", tok, tok[0:i])
+ }
+}
+
+
+func (p *gcParser) expectKeyword(keyword string) {
+ lit := p.expect(scanner.Ident)
+ if lit != keyword {
+ p.errorf("expected keyword %s, got %q", keyword, lit)
+ }
+}
+
+
+// ----------------------------------------------------------------------------
+// Import declarations
+
+// ImportPath = string_lit .
+//
+func (p *gcParser) parsePkgId() *ast.Scope {
+ id, err := strconv.Unquote(p.expect(scanner.String))
+ if err != nil {
+ p.error(err)
+ }
+
+ scope := p.scope // id == "" stands for the imported package id
+ if id != "" {
+ if scope = p.deps[id]; scope == nil {
+ scope = ast.NewScope(nil)
+ p.deps[id] = scope
+ }
+ }
+
+ return scope
+}
+
+
+// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
+func (p *gcParser) parseDotIdent() string {
+ ident := ""
+ if p.tok != scanner.Int {
+ sep := 'x' // not white space
+ for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
+ ident += p.lit
+ sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
+ p.next()
+ }
+ }
+ if ident == "" {
+ p.expect(scanner.Ident) // use expect() for error handling
+ }
+ return ident
+}
+
+
+// ExportedName = ImportPath "." dotIdentifier .
+//
+func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object {
+ scope := p.parsePkgId()
+ p.expect('.')
+ name := p.parseDotIdent()
+
+ // a type may have been declared before - if it exists
+ // already in the respective package scope, return that
+ // type
+ if kind == ast.Typ {
+ if obj := scope.Lookup(name); obj != nil {
+ assert(obj.Kind == ast.Typ)
+ return obj
+ }
+ }
+
+ // any other object must be a newly declared object -
+ // create it and insert it into the package scope
+ obj := ast.NewObj(kind, name)
+ if scope.Insert(obj) != nil {
+ p.errorf("already declared: %s", obj.Name)
+ }
+
+ // a new type object is a named type and may be referred
+ // to before the underlying type is known - set it up
+ if kind == ast.Typ {
+ obj.Type = &Name{Obj: obj}
+ }
+
+ return obj
+}
+
+
+// ----------------------------------------------------------------------------
+// Types
+
+// BasicType = identifier .
+//
+func (p *gcParser) parseBasicType() Type {
+ obj := Universe.Lookup(p.expect(scanner.Ident))
+ if obj == nil || obj.Kind != ast.Typ {
+ p.errorf("not a basic type: %s", obj.Name)
+ }
+ return obj.Type.(Type)
+}
+
+
+// ArrayType = "[" int_lit "]" Type .
+//
+func (p *gcParser) parseArrayType() Type {
+ // "[" already consumed and lookahead known not to be "]"
+ lit := p.expect(scanner.Int)
+ p.expect(']')
+ elt := p.parseType()
+ n, err := strconv.Atoui64(lit)
+ if err != nil {
+ p.error(err)
+ }
+ return &Array{Len: n, Elt: elt}
+}
+
+
+// MapType = "map" "[" Type "]" Type .
+//
+func (p *gcParser) parseMapType() Type {
+ p.expectKeyword("map")
+ p.expect('[')
+ key := p.parseType()
+ p.expect(']')
+ elt := p.parseType()
+ return &Map{Key: key, Elt: elt}
+}
+
+
+// Name = identifier | "?" .
+//
+func (p *gcParser) parseName() (name string) {
+ switch p.tok {
+ case scanner.Ident:
+ name = p.lit
+ p.next()
+ case '?':
+ // anonymous
+ p.next()
+ default:
+ p.error("name expected")
+ }
+ return
+}
+
+
+// Field = Name Type [ ":" string_lit ] .
+//
+func (p *gcParser) parseField(scope *ast.Scope) {
+ // TODO(gri) The code below is not correct for anonymous fields:
+ // The name is the type name; it should not be empty.
+ name := p.parseName()
+ ftyp := p.parseType()
+ if name == "" {
+ // anonymous field - ftyp must be T or *T and T must be a type name
+ ftyp = Deref(ftyp)
+ if ftyp, ok := ftyp.(*Name); ok {
+ name = ftyp.Obj.Name
+ } else {
+ p.errorf("anonymous field expected")
+ }
+ }
+ if p.tok == ':' {
+ p.next()
+ tag := p.expect(scanner.String)
+ _ = tag // TODO(gri) store tag somewhere
+ }
+ fld := ast.NewObj(ast.Var, name)
+ fld.Type = ftyp
+ scope.Insert(fld)
+}
+
+
+// StructType = "struct" "{" [ FieldList ] "}" .
+// FieldList = Field { ";" Field } .
+//
+func (p *gcParser) parseStructType() Type {
+ p.expectKeyword("struct")
+ p.expect('{')
+ scope := ast.NewScope(nil)
+ if p.tok != '}' {
+ p.parseField(scope)
+ for p.tok == ';' {
+ p.next()
+ p.parseField(scope)
+ }
+ }
+ p.expect('}')
+ return &Struct{}
+}
+
+
+// Parameter = ( identifier | "?" ) [ "..." ] Type .
+//
+func (p *gcParser) parseParameter(scope *ast.Scope, isVariadic *bool) {
+ name := p.parseName()
+ if name == "" {
+ name = "_" // cannot access unnamed identifiers
+ }
+ if isVariadic != nil {
+ if *isVariadic {
+ p.error("... not on final argument")
+ }
+ if p.tok == '.' {
+ p.expectSpecial("...")
+ *isVariadic = true
+ }
+ }
+ ptyp := p.parseType()
+ par := ast.NewObj(ast.Var, name)
+ par.Type = ptyp
+ scope.Insert(par)
+}
+
+
+// Parameters = "(" [ ParameterList ] ")" .
+// ParameterList = { Parameter "," } Parameter .
+//
+func (p *gcParser) parseParameters(scope *ast.Scope, isVariadic *bool) {
+ p.expect('(')
+ if p.tok != ')' {
+ p.parseParameter(scope, isVariadic)
+ for p.tok == ',' {
+ p.next()
+ p.parseParameter(scope, isVariadic)
+ }
+ }
+ p.expect(')')
+}
+
+
+// Signature = Parameters [ Result ] .
+// Result = Type | Parameters .
+//
+func (p *gcParser) parseSignature(scope *ast.Scope, isVariadic *bool) {
+ p.parseParameters(scope, isVariadic)
+
+ // optional result type
+ switch p.tok {
+ case scanner.Ident, scanner.String, '[', '*', '<':
+ // single, unnamed result
+ result := ast.NewObj(ast.Var, "_")
+ result.Type = p.parseType()
+ scope.Insert(result)
+ case '(':
+ // named or multiple result(s)
+ p.parseParameters(scope, nil)
+ }
+}
+
+
+// FuncType = "func" Signature .
+//
+func (p *gcParser) parseFuncType() Type {
+ // "func" already consumed
+ scope := ast.NewScope(nil)
+ isVariadic := false
+ p.parseSignature(scope, &isVariadic)
+ return &Func{IsVariadic: isVariadic}
+}
+
+
+// MethodSpec = identifier Signature .
+//
+func (p *gcParser) parseMethodSpec(scope *ast.Scope) {
+ p.expect(scanner.Ident)
+ isVariadic := false
+ p.parseSignature(scope, &isVariadic)
+}
+
+
+// InterfaceType = "interface" "{" [ MethodList ] "}" .
+// MethodList = MethodSpec { ";" MethodSpec } .
+//
+func (p *gcParser) parseInterfaceType() Type {
+ p.expectKeyword("interface")
+ p.expect('{')
+ scope := ast.NewScope(nil)
+ if p.tok != '}' {
+ p.parseMethodSpec(scope)
+ for p.tok == ';' {
+ p.next()
+ p.parseMethodSpec(scope)
+ }
+ }
+ p.expect('}')
+ return &Interface{}
+}
+
+
+// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
+//
+func (p *gcParser) parseChanType() Type {
+ dir := ast.SEND | ast.RECV
+ if p.tok == scanner.Ident {
+ p.expectKeyword("chan")
+ if p.tok == '<' {
+ p.expectSpecial("<-")
+ dir = ast.SEND
+ }
+ } else {
+ p.expectSpecial("<-")
+ p.expectKeyword("chan")
+ dir = ast.RECV
+ }
+ elt := p.parseType()
+ return &Chan{Dir: dir, Elt: elt}
+}
+
+
+// Type =
+// BasicType | TypeName | ArrayType | SliceType | StructType |
+// PointerType | FuncType | InterfaceType | MapType | ChanType |
+// "(" Type ")" .
+// BasicType = ident .
+// TypeName = ExportedName .
+// SliceType = "[" "]" Type .
+// PointerType = "*" Type .
+//
+func (p *gcParser) parseType() Type {
+ switch p.tok {
+ case scanner.Ident:
+ switch p.lit {
+ default:
+ return p.parseBasicType()
+ case "struct":
+ return p.parseStructType()
+ case "func":
+ p.next() // parseFuncType assumes "func" is already consumed
+ return p.parseFuncType()
+ case "interface":
+ return p.parseInterfaceType()
+ case "map":
+ return p.parseMapType()
+ case "chan":
+ return p.parseChanType()
+ }
+ case scanner.String:
+ // TypeName
+ return p.parseExportedName(ast.Typ).Type.(Type)
+ case '[':
+ p.next() // look ahead
+ if p.tok == ']' {
+ // SliceType
+ p.next()
+ return &Slice{Elt: p.parseType()}
+ }
+ return p.parseArrayType()
+ case '*':
+ // PointerType
+ p.next()
+ return &Pointer{Base: p.parseType()}
+ case '<':
+ return p.parseChanType()
+ case '(':
+ // "(" Type ")"
+ p.next()
+ typ := p.parseType()
+ p.expect(')')
+ return typ
+ }
+ p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
+ return nil
+}
+
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// ImportDecl = "import" identifier string_lit .
+//
+func (p *gcParser) parseImportDecl() {
+ p.expectKeyword("import")
+ // The identifier has no semantic meaning in the import data.
+ // It exists so that error messages can print the real package
+ // name: binary.ByteOrder instead of "encoding/binary".ByteOrder.
+ // TODO(gri): Save package id -> package name mapping.
+ p.expect(scanner.Ident)
+ p.parsePkgId()
+}
+
+
+// int_lit = [ "+" | "-" ] { "0" ... "9" } .
+//
+func (p *gcParser) parseInt() (sign, val string) {
+ switch p.tok {
+ case '-':
+ p.next()
+ sign = "-"
+ case '+':
+ p.next()
+ }
+ val = p.expect(scanner.Int)
+ return
+}
+
+
+// number = int_lit [ "p" int_lit ] .
+//
+func (p *gcParser) parseNumber() Const {
+ // mantissa
+ sign, val := p.parseInt()
+ mant, ok := new(big.Int).SetString(sign+val, 10)
+ assert(ok)
+
+ if p.lit == "p" {
+ // exponent (base 2)
+ p.next()
+ sign, val = p.parseInt()
+ exp, err := strconv.Atoui(val)
+ if err != nil {
+ p.error(err)
+ }
+ if sign == "-" {
+ denom := big.NewInt(1)
+ denom.Lsh(denom, exp)
+ return Const{new(big.Rat).SetFrac(mant, denom)}
+ }
+ if exp > 0 {
+ mant.Lsh(mant, exp)
+ }
+ return Const{new(big.Rat).SetInt(mant)}
+ }
+
+ return Const{mant}
+}
+
+
+// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
+// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
+// bool_lit = "true" | "false" .
+// complex_lit = "(" float_lit "+" float_lit ")" .
+// string_lit = `"` { unicode_char } `"` .
+//
+func (p *gcParser) parseConstDecl() {
+ p.expectKeyword("const")
+ obj := p.parseExportedName(ast.Con)
+ var x Const
+ var typ Type
+ if p.tok != '=' {
+ obj.Type = p.parseType()
+ }
+ p.expect('=')
+ switch p.tok {
+ case scanner.Ident:
+ // bool_lit
+ if p.lit != "true" && p.lit != "false" {
+ p.error("expected true or false")
+ }
+ x = Const{p.lit == "true"}
+ typ = Bool.Underlying
+ p.next()
+ case '-', scanner.Int:
+ // int_lit
+ x = p.parseNumber()
+ typ = Int.Underlying
+ if _, ok := x.val.(*big.Rat); ok {
+ typ = Float64.Underlying
+ }
+ case '(':
+ // complex_lit
+ p.next()
+ re := p.parseNumber()
+ p.expect('+')
+ im := p.parseNumber()
+ p.expect(')')
+ x = Const{cmplx{re.val.(*big.Rat), im.val.(*big.Rat)}}
+ typ = Complex128.Underlying
+ case scanner.String:
+ // string_lit
+ x = MakeConst(token.STRING, p.lit)
+ p.next()
+ typ = String.Underlying
+ default:
+ p.error("expected literal")
+ }
+ if obj.Type == nil {
+ obj.Type = typ
+ }
+ _ = x // TODO(gri) store x somewhere
+}
+
+
+// TypeDecl = "type" ExportedName Type .
+//
+func (p *gcParser) parseTypeDecl() {
+ p.expectKeyword("type")
+ obj := p.parseExportedName(ast.Typ)
+ typ := p.parseType()
+
+ name := obj.Type.(*Name)
+ assert(name.Underlying == nil)
+ assert(Underlying(typ) == typ)
+ name.Underlying = typ
+}
+
+
+// VarDecl = "var" ExportedName Type .
+//
+func (p *gcParser) parseVarDecl() {
+ p.expectKeyword("var")
+ obj := p.parseExportedName(ast.Var)
+ obj.Type = p.parseType()
+}
+
+
+// FuncDecl = "func" ExportedName Signature .
+//
+func (p *gcParser) parseFuncDecl() {
+ // "func" already consumed
+ obj := p.parseExportedName(ast.Fun)
+ obj.Type = p.parseFuncType()
+}
+
+
+// MethodDecl = "func" Receiver identifier Signature .
+// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
+//
+func (p *gcParser) parseMethodDecl() {
+ // "func" already consumed
+ scope := ast.NewScope(nil) // method scope
+ p.expect('(')
+ p.parseParameter(scope, nil) // receiver
+ p.expect(')')
+ p.expect(scanner.Ident)
+ isVariadic := false
+ p.parseSignature(scope, &isVariadic)
+
+}
+
+
+// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
+//
+func (p *gcParser) parseDecl() {
+ switch p.lit {
+ case "import":
+ p.parseImportDecl()
+ case "const":
+ p.parseConstDecl()
+ case "type":
+ p.parseTypeDecl()
+ case "var":
+ p.parseVarDecl()
+ case "func":
+ p.next() // look ahead
+ if p.tok == '(' {
+ p.parseMethodDecl()
+ } else {
+ p.parseFuncDecl()
+ }
+ }
+ p.expect('\n')
+}
+
+
+// ----------------------------------------------------------------------------
+// Export
+
+// Export = "PackageClause { Decl } "$$" .
+// PackageClause = "package" identifier [ "safe" ] "\n" .
+//
+func (p *gcParser) parseExport() (string, *ast.Scope) {
+ p.expectKeyword("package")
+ name := p.expect(scanner.Ident)
+ if p.tok != '\n' {
+ // A package is safe if it was compiled with the -u flag,
+ // which disables the unsafe package.
+ // TODO(gri) remember "safe" package
+ p.expectKeyword("safe")
+ }
+ p.expect('\n')
+
+ for p.tok != '$' && p.tok != scanner.EOF {
+ p.parseDecl()
+ }
+
+ if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
+ // don't call next()/expect() since reading past the
+ // export data may cause scanner errors (e.g. NUL chars)
+ p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
+ }
+
+ if n := p.scanner.ErrorCount; n != 0 {
+ p.errorf("expected no scanner errors, got %d", n)
+ }
+
+ return name, p.scope
+}
diff --git a/src/pkg/go/types/gcimporter_test.go b/src/pkg/go/types/gcimporter_test.go
new file mode 100644
index 000000000..556e761df
--- /dev/null
+++ b/src/pkg/go/types/gcimporter_test.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "exec"
+ "io/ioutil"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+
+var gcName, gcPath string // compiler name and path
+
+func init() {
+ // determine compiler
+ switch runtime.GOARCH {
+ case "386":
+ gcName = "8g"
+ case "amd64":
+ gcName = "6g"
+ case "arm":
+ gcName = "5g"
+ default:
+ gcName = "unknown-GOARCH-compiler"
+ gcPath = gcName
+ return
+ }
+ gcPath, _ = exec.LookPath(gcName)
+}
+
+
+func compile(t *testing.T, dirname, filename string) {
+ cmd, err := exec.Run(gcPath, []string{gcPath, filename}, nil, dirname, exec.DevNull, exec.Pipe, exec.MergeWithStdout)
+ if err != nil {
+ t.Errorf("%s %s failed: %s", gcName, filename, err)
+ return
+ }
+ defer cmd.Close()
+
+ msg, err := cmd.Wait(0)
+ if err != nil {
+ t.Errorf("%s %s failed: %s", gcName, filename, err)
+ return
+ }
+
+ if !msg.Exited() || msg.ExitStatus() != 0 {
+ t.Errorf("%s %s failed: exit status = %d", gcName, filename, msg.ExitStatus())
+ output, _ := ioutil.ReadAll(cmd.Stdout)
+ t.Log(string(output))
+ }
+}
+
+
+func testPath(t *testing.T, path string) bool {
+ _, _, err := GcImporter(path)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return false
+ }
+ return true
+}
+
+
+const maxTime = 3e9 // maximum allotted testing time in ns
+
+func testDir(t *testing.T, dir string, endTime int64) (nimports int) {
+ dirname := filepath.Join(pkgRoot, dir)
+ list, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ t.Errorf("testDir(%s): %s", dirname, err)
+ }
+ for _, f := range list {
+ if time.Nanoseconds() >= endTime {
+ t.Log("testing time used up")
+ return
+ }
+ switch {
+ case f.IsRegular():
+ // try extensions
+ for _, ext := range pkgExts {
+ if strings.HasSuffix(f.Name, ext) {
+ name := f.Name[0 : len(f.Name)-len(ext)] // remove extension
+ if testPath(t, filepath.Join(dir, name)) {
+ nimports++
+ }
+ }
+ }
+ case f.IsDirectory():
+ nimports += testDir(t, filepath.Join(dir, f.Name), endTime)
+ }
+ }
+ return
+}
+
+
+func TestGcImport(t *testing.T) {
+ compile(t, "testdata", "exports.go")
+
+ nimports := 0
+ if testPath(t, "./testdata/exports") {
+ nimports++
+ }
+ nimports += testDir(t, "", time.Nanoseconds()+maxTime) // installed packages
+ t.Logf("tested %d imports", nimports)
+}
diff --git a/src/pkg/go/types/testdata/exports.go b/src/pkg/go/types/testdata/exports.go
new file mode 100644
index 000000000..13efe012a
--- /dev/null
+++ b/src/pkg/go/types/testdata/exports.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate a .6 object file which
+// serves as test file for gcimporter_test.go.
+
+package exports
+
+import (
+ "go/ast"
+)
+
+
+const (
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456E+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+)
+
+
+type (
+ T1 int
+ T2 [10]int
+ T3 []int
+ T4 *int
+ T5 chan int
+ T6a chan<- int
+ T6b chan (<-chan int)
+ T6c chan<- (chan int)
+ T7 <-chan *ast.File
+ T8 struct{}
+ T9 struct {
+ a int
+ b, c float32
+ d []string "tag"
+ }
+ T10 struct {
+ T8
+ T9
+ _ *T10
+ }
+ T11 map[int]string
+ T12 interface{}
+ T13 interface {
+ m1()
+ m2(int) float32
+ }
+ T14 interface {
+ T12
+ T13
+ m3(x ...struct{}) []T9
+ }
+ T15 func()
+ T16 func(int)
+ T17 func(x int)
+ T18 func() float32
+ T19 func() (x float32)
+ T20 func(...interface{})
+ T21 struct{ next *T21 }
+ T22 struct{ link *T23 }
+ T23 struct{ link *T22 }
+ T24 *T24
+ T25 *T26
+ T26 *T27
+ T27 *T25
+ T28 func(T28) T28
+)
+
+
+var (
+ V0 int
+ V1 = -991.0
+)
+
+
+func F1() {}
+func F2(x int) {}
+func F3() int { return 0 }
+func F4() float32 { return 0 }
+func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
+
+
+func (p *T1) M1()
diff --git a/src/pkg/go/types/types.go b/src/pkg/go/types/types.go
new file mode 100644
index 000000000..72384e121
--- /dev/null
+++ b/src/pkg/go/types/types.go
@@ -0,0 +1,122 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// PACKAGE UNDER CONSTRUCTION. ANY AND ALL PARTS MAY CHANGE.
+// The types package declares the types used to represent Go types.
+//
+package types
+
+import "go/ast"
+
+
+// All types implement the Type interface.
+type Type interface {
+ isType()
+}
+
+
+// All concrete types embed ImplementsType which
+// ensures that all types implement the Type interface.
+type ImplementsType struct{}
+
+func (t *ImplementsType) isType() {}
+
+
+// A Basic represents a (unnamed) basic type.
+type Basic struct {
+ ImplementsType
+ // TODO(gri) need a field specifying the exact basic type
+}
+
+
+// An Array represents an array type [Len]Elt.
+type Array struct {
+ ImplementsType
+ Len uint64
+ Elt Type
+}
+
+
+// A Slice represents a slice type []Elt.
+type Slice struct {
+ ImplementsType
+ Elt Type
+}
+
+
+// A Struct represents a struct type struct{...}.
+type Struct struct {
+ ImplementsType
+ // TODO(gri) need to remember fields.
+}
+
+
+// A Pointer represents a pointer type *Base.
+type Pointer struct {
+ ImplementsType
+ Base Type
+}
+
+
+// A Func represents a function type func(...) (...).
+type Func struct {
+ ImplementsType
+ IsVariadic bool
+ // TODO(gri) need to remember parameters.
+}
+
+
+// An Interface represents an interface type interface{...}.
+type Interface struct {
+ ImplementsType
+ // TODO(gri) need to remember methods.
+}
+
+
+// A Map represents a map type map[Key]Elt.
+type Map struct {
+ ImplementsType
+ Key, Elt Type
+}
+
+
+// A Chan represents a channel type chan Elt, <-chan Elt, or chan<-Elt.
+type Chan struct {
+ ImplementsType
+ Dir ast.ChanDir
+ Elt Type
+}
+
+
+// A Name represents a named type as declared in a type declaration.
+type Name struct {
+ ImplementsType
+ Underlying Type // nil if not fully declared
+ Obj *ast.Object // corresponding declared object
+ // TODO(gri) need to remember fields and methods.
+}
+
+
+// If typ is a pointer type, Deref returns the pointer's base type;
+// otherwise it returns typ.
+func Deref(typ Type) Type {
+ if typ, ok := typ.(*Pointer); ok {
+ return typ.Base
+ }
+ return typ
+}
+
+
+// Underlying returns the underlying type of a type.
+func Underlying(typ Type) Type {
+ if typ, ok := typ.(*Name); ok {
+ utyp := typ.Underlying
+ if _, ok := utyp.(*Basic); ok {
+ return typ
+ }
+ return utyp
+
+ }
+ return typ
+}
diff --git a/src/pkg/go/types/universe.go b/src/pkg/go/types/universe.go
new file mode 100644
index 000000000..2a54a8ac1
--- /dev/null
+++ b/src/pkg/go/types/universe.go
@@ -0,0 +1,113 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// FILE UNDER CONSTRUCTION. ANY AND ALL PARTS MAY CHANGE.
+// This file implements the universe and unsafe package scopes.
+
+package types
+
+import "go/ast"
+
+
+var (
+ scope, // current scope to use for initialization
+ Universe,
+ Unsafe *ast.Scope
+)
+
+
+func define(kind ast.ObjKind, name string) *ast.Object {
+ obj := ast.NewObj(kind, name)
+ if scope.Insert(obj) != nil {
+ panic("types internal error: double declaration")
+ }
+ return obj
+}
+
+
+func defType(name string) *Name {
+ obj := define(ast.Typ, name)
+ typ := &Name{Underlying: &Basic{}, Obj: obj}
+ obj.Type = typ
+ return typ
+}
+
+
+func defConst(name string) {
+ obj := define(ast.Con, name)
+ _ = obj // TODO(gri) fill in other properties
+}
+
+
+func defFun(name string) {
+ obj := define(ast.Fun, name)
+ _ = obj // TODO(gri) fill in other properties
+}
+
+
+var (
+ Bool,
+ Int,
+ Float64,
+ Complex128,
+ String *Name
+)
+
+
+func init() {
+ Universe = ast.NewScope(nil)
+ scope = Universe
+
+ Bool = defType("bool")
+ defType("byte") // TODO(gri) should be an alias for uint8
+ defType("complex64")
+ Complex128 = defType("complex128")
+ defType("float32")
+ Float64 = defType("float64")
+ defType("int8")
+ defType("int16")
+ defType("int32")
+ defType("int64")
+ String = defType("string")
+ defType("uint8")
+ defType("uint16")
+ defType("uint32")
+ defType("uint64")
+ Int = defType("int")
+ defType("uint")
+ defType("uintptr")
+
+ defConst("true")
+ defConst("false")
+ defConst("iota")
+ defConst("nil")
+
+ defFun("append")
+ defFun("cap")
+ defFun("close")
+ defFun("complex")
+ defFun("copy")
+ defFun("imag")
+ defFun("len")
+ defFun("make")
+ defFun("new")
+ defFun("panic")
+ defFun("print")
+ defFun("println")
+ defFun("real")
+ defFun("recover")
+
+ Unsafe = ast.NewScope(nil)
+ scope = Unsafe
+ defType("Pointer")
+
+ defFun("Alignof")
+ defFun("New")
+ defFun("NewArray")
+ defFun("Offsetof")
+ defFun("Reflect")
+ defFun("Sizeof")
+ defFun("Typeof")
+ defFun("Unreflect")
+}