diff options
Diffstat (limited to 'src/pkg/go')
48 files changed, 204 insertions, 927 deletions
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go index b1c7d4ab1..22bd5ee22 100644 --- a/src/pkg/go/ast/ast.go +++ b/src/pkg/go/ast/ast.go @@ -13,7 +13,6 @@ import ( "utf8" ) - // ---------------------------------------------------------------------------- // Interfaces // @@ -31,35 +30,30 @@ import ( // That position information is needed to properly position comments // when printing the construct. - // All node types implement the Node interface. type Node interface { Pos() token.Pos // position of first character belonging to the node End() token.Pos // position of first character immediately after the node } - // All expression nodes implement the Expr interface. type Expr interface { Node exprNode() } - // All statement nodes implement the Stmt interface. type Stmt interface { Node stmtNode() } - // All declaration nodes implement the Decl interface. type Decl interface { Node declNode() } - // ---------------------------------------------------------------------------- // Comments @@ -69,11 +63,9 @@ type Comment struct { Text string // comment text (excluding '\n' for //-style comments) } - func (c *Comment) Pos() token.Pos { return c.Slash } func (c *Comment) End() token.Pos { return token.Pos(int(c.Slash) + len(c.Text)) } - // A CommentGroup represents a sequence of comments // with no other tokens and no empty lines between. // @@ -81,11 +73,9 @@ type CommentGroup struct { List []*Comment // len(List) > 0 } - func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() } func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() } - // ---------------------------------------------------------------------------- // Expressions and types @@ -101,7 +91,6 @@ type Field struct { Comment *CommentGroup // line comments; or nil } - func (f *Field) Pos() token.Pos { if len(f.Names) > 0 { return f.Names[0].Pos() @@ -109,7 +98,6 @@ func (f *Field) Pos() token.Pos { return f.Type.Pos() } - func (f *Field) End() token.Pos { if f.Tag != nil { return f.Tag.End() @@ -117,7 +105,6 @@ func (f *Field) End() token.Pos { return f.Type.End() } - // A FieldList represents a list of Fields, enclosed by parentheses or braces. type FieldList struct { Opening token.Pos // position of opening parenthesis/brace, if any @@ -125,7 +112,6 @@ type FieldList struct { Closing token.Pos // position of closing parenthesis/brace, if any } - func (f *FieldList) Pos() token.Pos { if f.Opening.IsValid() { return f.Opening @@ -138,7 +124,6 @@ func (f *FieldList) Pos() token.Pos { return token.NoPos } - func (f *FieldList) End() token.Pos { if f.Closing.IsValid() { return f.Closing + 1 @@ -151,7 +136,6 @@ func (f *FieldList) End() token.Pos { return token.NoPos } - // NumFields returns the number of (named and anonymous fields) in a FieldList. func (f *FieldList) NumFields() int { n := 0 @@ -167,7 +151,6 @@ func (f *FieldList) NumFields() int { return n } - // An expression is represented by a tree consisting of one // or more of the following concrete expression nodes. // @@ -298,7 +281,6 @@ type ( } ) - // The direction of a channel type is indicated by one // of the following constants. // @@ -309,7 +291,6 @@ const ( RECV ) - // A type is represented by a tree consisting of one // or more of the following type-specific expression // nodes. @@ -360,7 +341,6 @@ type ( } ) - // Pos and End implementations for expression/type nodes. // func (x *BadExpr) Pos() token.Pos { return x.From } @@ -391,7 +371,6 @@ func (x *InterfaceType) Pos() token.Pos { return x.Interface } func (x *MapType) Pos() token.Pos { return x.Map } func (x *ChanType) Pos() token.Pos { return x.Begin } - func (x *BadExpr) End() token.Pos { return x.To } func (x *Ident) End() token.Pos { return token.Pos(int(x.NamePos) + len(x.Name)) } func (x *Ellipsis) End() token.Pos { @@ -430,7 +409,6 @@ func (x *InterfaceType) End() token.Pos { return x.Methods.End() } func (x *MapType) End() token.Pos { return x.Value.End() } func (x *ChanType) End() token.Pos { return x.Value.End() } - // exprNode() ensures that only expression/type nodes can be // assigned to an ExprNode. // @@ -458,7 +436,6 @@ func (x *InterfaceType) exprNode() {} func (x *MapType) exprNode() {} func (x *ChanType) exprNode() {} - // ---------------------------------------------------------------------------- // Convenience functions for Idents @@ -469,7 +446,6 @@ var noPos token.Pos // func NewIdent(name string) *Ident { return &Ident{noPos, name, nil} } - // IsExported returns whether name is an exported Go symbol // (i.e., whether it begins with an uppercase letter). // @@ -478,13 +454,11 @@ func IsExported(name string) bool { return unicode.IsUpper(ch) } - // IsExported returns whether id is an exported Go symbol // (i.e., whether it begins with an uppercase letter). // func (id *Ident) IsExported() bool { return IsExported(id.Name) } - func (id *Ident) String() string { if id != nil { return id.Name @@ -492,7 +466,6 @@ func (id *Ident) String() string { return "<nil>" } - // ---------------------------------------------------------------------------- // Statements @@ -660,7 +633,6 @@ type ( } ) - // Pos and End implementations for statement nodes. // func (s *BadStmt) Pos() token.Pos { return s.From } @@ -685,7 +657,6 @@ func (s *SelectStmt) Pos() token.Pos { return s.Select } func (s *ForStmt) Pos() token.Pos { return s.For } func (s *RangeStmt) Pos() token.Pos { return s.For } - func (s *BadStmt) End() token.Pos { return s.To } func (s *DeclStmt) End() token.Pos { return s.Decl.End() } func (s *EmptyStmt) End() token.Pos { @@ -737,7 +708,6 @@ func (s *SelectStmt) End() token.Pos { return s.Body.End() } func (s *ForStmt) End() token.Pos { return s.Body.End() } func (s *RangeStmt) End() token.Pos { return s.Body.End() } - // stmtNode() ensures that only statement nodes can be // assigned to a StmtNode. // @@ -763,7 +733,6 @@ func (s *SelectStmt) stmtNode() {} func (s *ForStmt) stmtNode() {} func (s *RangeStmt) stmtNode() {} - // ---------------------------------------------------------------------------- // Declarations @@ -805,7 +774,6 @@ type ( } ) - // Pos and End implementations for spec nodes. // func (s *ImportSpec) Pos() token.Pos { @@ -817,7 +785,6 @@ func (s *ImportSpec) Pos() token.Pos { func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() } func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() } - func (s *ImportSpec) End() token.Pos { return s.Path.End() } func (s *ValueSpec) End() token.Pos { if n := len(s.Values); n > 0 { @@ -830,7 +797,6 @@ func (s *ValueSpec) End() token.Pos { } func (s *TypeSpec) End() token.Pos { return s.Type.End() } - // specNode() ensures that only spec nodes can be // assigned to a Spec. // @@ -838,7 +804,6 @@ func (s *ImportSpec) specNode() {} func (s *ValueSpec) specNode() {} func (s *TypeSpec) specNode() {} - // A declaration is represented by one of the following declaration nodes. // type ( @@ -880,14 +845,12 @@ type ( } ) - // Pos and End implementations for declaration nodes. // func (d *BadDecl) Pos() token.Pos { return d.From } func (d *GenDecl) Pos() token.Pos { return d.TokPos } func (d *FuncDecl) Pos() token.Pos { return d.Type.Pos() } - func (d *BadDecl) End() token.Pos { return d.To } func (d *GenDecl) End() token.Pos { if d.Rparen.IsValid() { @@ -902,7 +865,6 @@ func (d *FuncDecl) End() token.Pos { return d.Type.End() } - // declNode() ensures that only declaration nodes can be // assigned to a DeclNode. // @@ -910,7 +872,6 @@ func (d *BadDecl) declNode() {} func (d *GenDecl) declNode() {} func (d *FuncDecl) declNode() {} - // ---------------------------------------------------------------------------- // Files and packages @@ -931,7 +892,6 @@ type File struct { Comments []*CommentGroup // list of all comments in the source file } - func (f *File) Pos() token.Pos { return f.Package } func (f *File) End() token.Pos { if n := len(f.Decls); n > 0 { @@ -940,7 +900,6 @@ func (f *File) End() token.Pos { return f.Name.End() } - // A Package node represents a set of source files // collectively building a Go package. // @@ -951,6 +910,5 @@ type Package struct { Files map[string]*File // Go source files by filename } - func (p *Package) Pos() token.Pos { return token.NoPos } func (p *Package) End() token.Pos { return token.NoPos } diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go index 0907fd53d..26733430d 100644 --- a/src/pkg/go/ast/filter.go +++ b/src/pkg/go/ast/filter.go @@ -20,7 +20,6 @@ func identListExports(list []*Ident) []*Ident { return list[0:j] } - // fieldName assumes that x is the type of an anonymous field and // returns the corresponding field name. If x is not an acceptable // anonymous field, the result is nil. @@ -39,7 +38,6 @@ func fieldName(x Expr) *Ident { return nil } - func fieldListExports(fields *FieldList) (removedFields bool) { if fields == nil { return @@ -78,7 +76,6 @@ func fieldListExports(fields *FieldList) (removedFields bool) { return } - func paramListExports(fields *FieldList) { if fields == nil { return @@ -88,7 +85,6 @@ func paramListExports(fields *FieldList) { } } - func typeExports(typ Expr) { switch t := typ.(type) { case *ArrayType: @@ -112,7 +108,6 @@ func typeExports(typ Expr) { } } - func specExports(spec Spec) bool { switch s := spec.(type) { case *ValueSpec: @@ -130,7 +125,6 @@ func specExports(spec Spec) bool { return false } - func specListExports(list []Spec) []Spec { j := 0 for _, s := range list { @@ -142,7 +136,6 @@ func specListExports(list []Spec) []Spec { return list[0:j] } - func declExports(decl Decl) bool { switch d := decl.(type) { case *GenDecl: @@ -155,7 +148,6 @@ func declExports(decl Decl) bool { return false } - // FileExports trims the AST for a Go source file in place such that only // exported nodes remain: all top-level identifiers which are not exported // and their associated information (such as type, initial value, or function @@ -178,7 +170,6 @@ func FileExports(src *File) bool { return j > 0 } - // PackageExports trims the AST for a Go package in place such that only // exported nodes remain. The pkg.Files list is not changed, so that file // names and top-level package comments don't get lost. @@ -196,7 +187,6 @@ func PackageExports(pkg *Package) bool { return hasExports } - // ---------------------------------------------------------------------------- // General filtering @@ -213,7 +203,6 @@ func filterIdentList(list []*Ident, f Filter) []*Ident { return list[0:j] } - func filterFieldList(fields *FieldList, filter Filter) (removedFields bool) { if fields == nil { return false @@ -246,7 +235,6 @@ func filterFieldList(fields *FieldList, filter Filter) (removedFields bool) { return } - func filterSpec(spec Spec, f Filter) bool { switch s := spec.(type) { case *ValueSpec: @@ -272,7 +260,6 @@ func filterSpec(spec Spec, f Filter) bool { return false } - func filterSpecList(list []Spec, f Filter) []Spec { j := 0 for _, s := range list { @@ -284,7 +271,6 @@ func filterSpecList(list []Spec, f Filter) []Spec { return list[0:j] } - // FilterDecl trims the AST for a Go declaration in place by removing // all names (including struct field and interface method names, but // not from parameter lists) that don't pass through the filter f. @@ -303,7 +289,6 @@ func FilterDecl(decl Decl, f Filter) bool { return false } - // FilterFile trims the AST for a Go file in place by removing all // names from top-level declarations (including struct field and // interface method names, but not from parameter lists) that don't @@ -326,7 +311,6 @@ func FilterFile(src *File, f Filter) bool { return j > 0 } - // FilterPackage trims the AST for a Go package in place by removing all // names from top-level declarations (including struct field and // interface method names, but not from parameter lists) that don't @@ -348,7 +332,6 @@ func FilterPackage(pkg *Package, f Filter) bool { return hasDecls } - // ---------------------------------------------------------------------------- // Merging of package files @@ -368,7 +351,6 @@ const ( // var separator = &Comment{noPos, "//"} - // MergePackageFiles creates a file AST by merging the ASTs of the // files belonging to a package. The mode flags control merging behavior. // diff --git a/src/pkg/go/ast/print.go b/src/pkg/go/ast/print.go index 81e1da1d0..62a30481d 100644 --- a/src/pkg/go/ast/print.go +++ b/src/pkg/go/ast/print.go @@ -14,11 +14,9 @@ import ( "reflect" ) - // A FieldFilter may be provided to Fprint to control the output. type FieldFilter func(name string, value reflect.Value) bool - // NotNilFilter returns true for field values that are not nil; // it returns false otherwise. func NotNilFilter(_ string, v reflect.Value) bool { @@ -29,7 +27,6 @@ func NotNilFilter(_ string, v reflect.Value) bool { return true } - // Fprint prints the (sub-)tree starting at AST node x to w. // If fset != nil, position information is interpreted relative // to that file set. Otherwise positions are printed as integer @@ -68,14 +65,12 @@ func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n i return } - // Print prints x to standard output, skipping nil fields. // Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter). func Print(fset *token.FileSet, x interface{}) (int, os.Error) { return Fprint(os.Stdout, fset, x, NotNilFilter) } - type printer struct { output io.Writer fset *token.FileSet @@ -87,7 +82,6 @@ type printer struct { line int // current line number } - var indent = []byte(". ") func (p *printer) Write(data []byte) (n int, err os.Error) { @@ -120,14 +114,12 @@ func (p *printer) Write(data []byte) (n int, err os.Error) { return } - // localError wraps locally caught os.Errors so we can distinguish // them from genuine panics which we don't want to return as errors. type localError struct { err os.Error } - // printf is a convenience wrapper that takes care of print errors. func (p *printer) printf(format string, args ...interface{}) { n, err := fmt.Fprintf(p, format, args...) @@ -137,7 +129,6 @@ func (p *printer) printf(format string, args ...interface{}) { } } - // Implementation note: Print is written for AST nodes but could be // used to print arbitrary data structures; such a version should // probably be in a different package. diff --git a/src/pkg/go/ast/print_test.go b/src/pkg/go/ast/print_test.go index 30b396fcf..f4e8f7a78 100644 --- a/src/pkg/go/ast/print_test.go +++ b/src/pkg/go/ast/print_test.go @@ -10,7 +10,6 @@ import ( "testing" ) - var tests = []struct { x interface{} // x is printed as s s string @@ -49,7 +48,6 @@ var tests = []struct { 3 }`}, } - // Split s into lines, trim whitespace from all lines, and return // the concatenated non-empty lines. func trim(s string) string { @@ -65,7 +63,6 @@ func trim(s string) string { return strings.Join(lines[0:i], "\n") } - func TestPrint(t *testing.T) { var buf bytes.Buffer for _, test := range tests { diff --git a/src/pkg/go/ast/resolve.go b/src/pkg/go/ast/resolve.go index ecd2e8a7c..3927a799e 100644 --- a/src/pkg/go/ast/resolve.go +++ b/src/pkg/go/ast/resolve.go @@ -14,23 +14,19 @@ import ( "strconv" ) - type pkgBuilder struct { scanner.ErrorVector fset *token.FileSet } - func (p *pkgBuilder) error(pos token.Pos, msg string) { p.Error(p.fset.Position(pos), msg) } - func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) { p.error(pos, fmt.Sprintf(format, args...)) } - func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) { alt := scope.Insert(obj) if alt == nil && altScope != nil { @@ -46,7 +42,6 @@ func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) { } } - func resolve(scope *Scope, ident *Ident) bool { for ; scope != nil; scope = scope.Outer { if obj := scope.Lookup(ident.Name); obj != nil { @@ -57,7 +52,6 @@ func resolve(scope *Scope, ident *Ident) bool { return false } - // An Importer resolves import paths to package Objects. // The imports map records the packages already imported, // indexed by package id (canonical import path). @@ -69,7 +63,6 @@ func resolve(scope *Scope, ident *Ident) bool { // return pkg. type Importer func(imports map[string]*Object, path string) (pkg *Object, err os.Error) - // NewPackage creates a new Package node from a set of File nodes. It resolves // unresolved identifiers across files and updates each file's Unresolved list // accordingly. If a non-nil importer and universe scope are provided, they are diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go index b966f786f..92e366980 100644 --- a/src/pkg/go/ast/scope.go +++ b/src/pkg/go/ast/scope.go @@ -12,7 +12,6 @@ import ( "go/token" ) - // A Scope maintains the set of named language entities declared // in the scope and a link to the immediately surrounding (outer) // scope. @@ -22,14 +21,12 @@ type Scope struct { Objects map[string]*Object } - // NewScope creates a new scope nested in the outer scope. func NewScope(outer *Scope) *Scope { const n = 4 // initial scope capacity return &Scope{outer, make(map[string]*Object, n)} } - // Lookup returns the object with the given name if it is // found in scope s, otherwise it returns nil. Outer scopes // are ignored. @@ -38,7 +35,6 @@ func (s *Scope) Lookup(name string) *Object { return s.Objects[name] } - // Insert attempts to insert a named object obj into the scope s. // If the scope already contains an object alt with the same name, // Insert leaves the scope unchanged and returns alt. Otherwise @@ -51,7 +47,6 @@ func (s *Scope) Insert(obj *Object) (alt *Object) { return } - // Debugging support func (s *Scope) String() string { var buf bytes.Buffer @@ -66,7 +61,6 @@ func (s *Scope) String() string { return buf.String() } - // ---------------------------------------------------------------------------- // Objects @@ -91,13 +85,11 @@ type Object struct { Type interface{} // place holder for type information; may be nil } - // NewObj creates a new object of a given kind and name. func NewObj(kind ObjKind, name string) *Object { return &Object{Kind: kind, Name: name} } - // Pos computes the source position of the declaration of an object name. // The result may be an invalid position if it cannot be computed // (obj.Decl may be nil or not correct). @@ -137,7 +129,6 @@ func (obj *Object) Pos() token.Pos { return token.NoPos } - // ObKind describes what an object represents. type ObjKind int @@ -152,7 +143,6 @@ const ( Lbl // label ) - var objKindStrings = [...]string{ Bad: "bad", Pkg: "package", @@ -163,5 +153,4 @@ var objKindStrings = [...]string{ Lbl: "label", } - func (kind ObjKind) String() string { return objKindStrings[kind] } diff --git a/src/pkg/go/ast/walk.go b/src/pkg/go/ast/walk.go index 95c4b3a35..181cfd149 100644 --- a/src/pkg/go/ast/walk.go +++ b/src/pkg/go/ast/walk.go @@ -13,7 +13,6 @@ type Visitor interface { Visit(node Node) (w Visitor) } - // Helper functions for common node lists. They may be empty. func walkIdentList(v Visitor, list []*Ident) { @@ -22,28 +21,24 @@ func walkIdentList(v Visitor, list []*Ident) { } } - func walkExprList(v Visitor, list []Expr) { for _, x := range list { Walk(v, x) } } - func walkStmtList(v Visitor, list []Stmt) { for _, x := range list { Walk(v, x) } } - func walkDeclList(v Visitor, list []Decl) { for _, x := range list { Walk(v, x) } } - // TODO(gri): Investigate if providing a closure to Walk leads to // simpler use (and may help eliminate Inspect in turn). @@ -369,7 +364,6 @@ func Walk(v Visitor, node Node) { v.Visit(nil) } - type inspector func(Node) bool func (f inspector) Visit(node Node) Visitor { @@ -379,7 +373,6 @@ func (f inspector) Visit(node Node) Visitor { return nil } - // Inspect traverses an AST in depth-first order: It starts by calling // f(node); node must not be nil. If f returns true, Inspect invokes f // for all the non-nil children of node, recursively. diff --git a/src/pkg/go/build/build.go b/src/pkg/go/build/build.go index d83a6666e..97f92bfb6 100644 --- a/src/pkg/go/build/build.go +++ b/src/pkg/go/build/build.go @@ -61,7 +61,9 @@ func Build(tree *Tree, pkg string, info *DirInfo) (*Script, os.Error) { if len(info.CgoFiles) > 0 { cgoFiles := b.abss(info.CgoFiles...) s.addInput(cgoFiles...) - outGo, outObj := b.cgo(cgoFiles) + cgoCFiles := b.abss(info.CFiles...) + s.addInput(cgoCFiles...) + outGo, outObj := b.cgo(cgoFiles, cgoCFiles) gofiles = append(gofiles, outGo...) ofiles = append(ofiles, outObj...) s.addIntermediate(outGo...) @@ -182,7 +184,7 @@ func (s *Script) Clean() (err os.Error) { return } -// Clean removes the Script's Intermediate and Output files. +// Nuke removes the Script's Intermediate and Output files. // It tries to remove every file and returns the first error it encounters. func (s *Script) Nuke() (err os.Error) { // Reverse order so that directories get removed after the files they contain. @@ -213,6 +215,14 @@ func (c *Cmd) String() string { // Run executes the Cmd. func (c *Cmd) Run() os.Error { + if c.Args[0] == "mkdir" { + for _, p := range c.Output { + if err := os.MkdirAll(p, 0777); err != nil { + return fmt.Errorf("command %q: %v", c, err) + } + } + return nil + } out := new(bytes.Buffer) cmd := exec.Command(c.Args[0], c.Args[1:]...) cmd.Dir = c.Dir @@ -362,7 +372,7 @@ func (b *build) gccArgs(args ...string) []string { var cgoRe = regexp.MustCompile(`[/\\:]`) -func (b *build) cgo(cgofiles []string) (outGo, outObj []string) { +func (b *build) cgo(cgofiles, cgocfiles []string) (outGo, outObj []string) { // cgo // TODO(adg): CGOPKGPATH // TODO(adg): CGO_FLAGS @@ -405,6 +415,12 @@ func (b *build) cgo(cgofiles []string) (outGo, outObj []string) { b.script.addIntermediate(ofile) } } + for _, cfile := range cgocfiles { + ofile := b.obj + cgoRe.ReplaceAllString(cfile[:len(cfile)-1], "_") + "o" + b.gccCompile(ofile, cfile) + linkobj = append(linkobj, ofile) + outObj = append(outObj, ofile) + } dynObj := b.obj + "_cgo_.o" b.gccLink(dynObj, linkobj...) b.script.addIntermediate(dynObj) diff --git a/src/pkg/go/build/cgotest/cgotest.c b/src/pkg/go/build/cgotest/cgotest.c new file mode 100644 index 000000000..b13acb227 --- /dev/null +++ b/src/pkg/go/build/cgotest/cgotest.c @@ -0,0 +1,9 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +int +Add(int x, int y, int *sum) +{ + sum = x+y; +} diff --git a/src/pkg/go/build/cgotest/cgotest.go b/src/pkg/go/build/cgotest/cgotest.go index 32b931861..93bbf0688 100644 --- a/src/pkg/go/build/cgotest/cgotest.go +++ b/src/pkg/go/build/cgotest/cgotest.go @@ -7,6 +7,13 @@ package cgotest /* char* greeting = "hello, world"; */ +// #include "cgotest.h" import "C" +import "unsafe" var Greeting = C.GoString(C.greeting) + +func DoAdd(x, y int) (sum int) { + C.Add(C.int(x), C.int(y), (*C.int)(unsafe.Pointer(&sum))) + return +} diff --git a/src/pkg/go/build/cgotest/cgotest.h b/src/pkg/go/build/cgotest/cgotest.h new file mode 100644 index 000000000..9c73643b6 --- /dev/null +++ b/src/pkg/go/build/cgotest/cgotest.h @@ -0,0 +1,5 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +extern int Add(int, int, int *); diff --git a/src/pkg/go/build/path.go b/src/pkg/go/build/path.go index ea588abbd..e39b5f8fa 100644 --- a/src/pkg/go/build/path.go +++ b/src/pkg/go/build/path.go @@ -10,10 +10,9 @@ import ( "os" "path/filepath" "runtime" - "strings" ) -// Path is a validated list of Trees derived from $GOPATH at init. +// Path is a validated list of Trees derived from $GOROOT and $GOPATH at init. var Path []*Tree // Tree describes a Go source tree, either $GOROOT or one from $GOPATH. @@ -55,6 +54,11 @@ func (t *Tree) PkgDir() string { // BinDir returns the tree's binary executable directory. func (t *Tree) BinDir() string { + if t.Goroot { + if gobin := os.Getenv("GOBIN"); gobin != "" { + return gobin + } + } return filepath.Join(t.Path, "bin") } @@ -79,7 +83,10 @@ func (t *Tree) HasPkg(pkg string) bool { // TODO(adg): check object version is consistent } -var ErrNotFound = os.NewError("package could not be found locally") +var ( + ErrNotFound = os.NewError("go/build: package could not be found locally") + ErrTreeNotFound = os.NewError("go/build: no valid GOROOT or GOPATH could be found") +) // FindTree takes an import or filesystem path and returns the // tree where the package source should be and the package import path. @@ -93,7 +100,7 @@ func FindTree(path string) (tree *Tree, pkg string, err os.Error) { } for _, t := range Path { tpath := t.SrcDir() + string(filepath.Separator) - if !strings.HasPrefix(path, tpath) { + if !filepath.HasPrefix(path, tpath) { continue } tree = t @@ -111,14 +118,22 @@ func FindTree(path string) (tree *Tree, pkg string, err os.Error) { return } } - err = ErrNotFound + if tree == nil { + err = ErrTreeNotFound + } else { + err = ErrNotFound + } return } // isLocalPath returns whether the given path is local (/foo ./foo ../foo . ..) +// Windows paths that starts with drive letter (c:\foo c:foo) are considered local. func isLocalPath(s string) bool { const sep = string(filepath.Separator) - return strings.HasPrefix(s, sep) || strings.HasPrefix(s, "."+sep) || strings.HasPrefix(s, ".."+sep) || s == "." || s == ".." + return s == "." || s == ".." || + filepath.HasPrefix(s, sep) || + filepath.HasPrefix(s, "."+sep) || filepath.HasPrefix(s, ".."+sep) || + filepath.VolumeName(s) != "" } var ( @@ -133,12 +148,13 @@ var ( // set up Path: parse and validate GOROOT and GOPATH variables func init() { root := runtime.GOROOT() - p, err := newTree(root) + t, err := newTree(root) if err != nil { - log.Fatalf("Invalid GOROOT %q: %v", root, err) + log.Printf("go/build: invalid GOROOT %q: %v", root, err) + } else { + t.Goroot = true + Path = []*Tree{t} } - p.Goroot = true - Path = []*Tree{p} for _, p := range filepath.SplitList(os.Getenv("GOPATH")) { if p == "" { @@ -146,7 +162,7 @@ func init() { } t, err := newTree(p) if err != nil { - log.Printf("Invalid GOPATH %q: %v", p, err) + log.Printf("go/build: invalid GOPATH %q: %v", p, err) continue } Path = append(Path, t) @@ -160,7 +176,7 @@ func init() { } // use GOROOT if no valid GOPATH specified - if defaultTree == nil { + if defaultTree == nil && len(Path) > 0 { defaultTree = Path[0] } } diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go index 85640af79..e1989226b 100644 --- a/src/pkg/go/doc/comment.go +++ b/src/pkg/go/doc/comment.go @@ -11,13 +11,11 @@ import ( "io" "regexp" "strings" - "template" // for htmlEscape + "template" // for HTMLEscape ) - func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } - func stripTrailingWhitespace(s string) string { i := len(s) for i > 0 && isWhitespace(s[i-1]) { @@ -26,7 +24,6 @@ func stripTrailingWhitespace(s string) string { return s[0:i] } - // CommentText returns the text of comment, // with the comment markers - //, /*, and */ - removed. func CommentText(comment *ast.CommentGroup) string { @@ -85,7 +82,6 @@ func CommentText(comment *ast.CommentGroup) string { return strings.Join(lines, "\n") } - // Split bytes into lines. func split(text []byte) [][]byte { // count lines @@ -119,7 +115,6 @@ func split(text []byte) [][]byte { return out } - var ( ldquo = []byte("“") rdquo = []byte("”") @@ -148,7 +143,6 @@ func commentEscape(w io.Writer, s []byte, nice bool) { template.HTMLEscape(w, s[last:]) } - const ( // Regexp for Go identifiers identRx = `[a-zA-Z_][a-zA-Z_0-9]*` // TODO(gri) ASCII only for now - fix this @@ -176,7 +170,6 @@ var ( html_endpre = []byte("</pre>\n") ) - // Emphasize and escape a line of text for HTML. URLs are converted into links; // if the URL also appears in the words map, the link is taken from the map (if // the corresponding map value is the empty string, the URL is not converted @@ -235,7 +228,6 @@ func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) { commentEscape(w, line, nice) } - func indentLen(s []byte) int { i := 0 for i < len(s) && (s[i] == ' ' || s[i] == '\t') { @@ -244,10 +236,8 @@ func indentLen(s []byte) int { return i } - func isBlank(s []byte) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n') } - func commonPrefix(a, b []byte) []byte { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] { @@ -256,7 +246,6 @@ func commonPrefix(a, b []byte) []byte { return a[0:i] } - func unindent(block [][]byte) { if len(block) == 0 { return @@ -279,7 +268,6 @@ func unindent(block [][]byte) { } } - // Convert comment text to formatted HTML. // The comment was prepared by DocReader, // so it is known not to have leading, trailing blank lines diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go index b26cd2bed..c7fed9784 100644 --- a/src/pkg/go/doc/doc.go +++ b/src/pkg/go/doc/doc.go @@ -12,7 +12,6 @@ import ( "sort" ) - // ---------------------------------------------------------------------------- type typeDoc struct { @@ -25,7 +24,6 @@ type typeDoc struct { methods map[string]*ast.FuncDecl } - // docReader accumulates documentation for a single package. // It modifies the AST: Comments (declaration documentation) // that have been collected by the DocReader are set to nil @@ -42,14 +40,12 @@ type docReader struct { bugs []*ast.CommentGroup } - func (doc *docReader) init(pkgName string) { doc.pkgName = pkgName doc.types = make(map[string]*typeDoc) doc.funcs = make(map[string]*ast.FuncDecl) } - func (doc *docReader) addDoc(comments *ast.CommentGroup) { if doc.doc == nil { // common case: just one package comment @@ -71,7 +67,6 @@ func (doc *docReader) addDoc(comments *ast.CommentGroup) { doc.doc = &ast.CommentGroup{list} } - func (doc *docReader) addType(decl *ast.GenDecl) { spec := decl.Specs[0].(*ast.TypeSpec) typ := doc.lookupTypeDoc(spec.Name.Name) @@ -84,7 +79,6 @@ func (doc *docReader) addType(decl *ast.GenDecl) { } } - func (doc *docReader) lookupTypeDoc(name string) *typeDoc { if name == "" { return nil // no type docs for anonymous types @@ -98,7 +92,6 @@ func (doc *docReader) lookupTypeDoc(name string) *typeDoc { return tdoc } - func baseTypeName(typ ast.Expr) string { switch t := typ.(type) { case *ast.Ident: @@ -113,7 +106,6 @@ func baseTypeName(typ ast.Expr) string { return "" } - func (doc *docReader) addValue(decl *ast.GenDecl) { // determine if decl should be associated with a type // Heuristic: For each typed entry, determine the type name, if any. @@ -165,7 +157,6 @@ func (doc *docReader) addValue(decl *ast.GenDecl) { *values = append(*values, decl) } - // Helper function to set the table entry for function f. Makes sure that // at least one f with associated documentation is stored in table, if there // are multiple f's with the same name. @@ -183,7 +174,6 @@ func setFunc(table map[string]*ast.FuncDecl, f *ast.FuncDecl) { table[name] = f } - func (doc *docReader) addFunc(fun *ast.FuncDecl) { name := fun.Name.Name @@ -238,7 +228,6 @@ func (doc *docReader) addFunc(fun *ast.FuncDecl) { setFunc(doc.funcs, fun) } - func (doc *docReader) addDecl(decl ast.Decl) { switch d := decl.(type) { case *ast.GenDecl: @@ -271,7 +260,6 @@ func (doc *docReader) addDecl(decl ast.Decl) { } } - func copyCommentList(list []*ast.Comment) []*ast.Comment { return append([]*ast.Comment(nil), list...) } @@ -281,7 +269,6 @@ var ( bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char ) - // addFile adds the AST for a source file to the docReader. // Adding the same AST multiple times is a no-op. // @@ -313,7 +300,6 @@ func (doc *docReader) addFile(src *ast.File) { src.Comments = nil // consumed unassociated comments - remove from ast.File node } - func NewFileDoc(file *ast.File) *PackageDoc { var r docReader r.init(file.Name.Name) @@ -321,7 +307,6 @@ func NewFileDoc(file *ast.File) *PackageDoc { return r.newDoc("", nil) } - func NewPackageDoc(pkg *ast.Package, importpath string) *PackageDoc { var r docReader r.init(pkg.Name) @@ -335,7 +320,6 @@ func NewPackageDoc(pkg *ast.Package, importpath string) *PackageDoc { return r.newDoc(importpath, filenames) } - // ---------------------------------------------------------------------------- // Conversion to external representation @@ -353,7 +337,6 @@ type sortValueDoc []*ValueDoc func (p sortValueDoc) Len() int { return len(p) } func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - func declName(d *ast.GenDecl) string { if len(d.Specs) != 1 { return "" @@ -369,7 +352,6 @@ func declName(d *ast.GenDecl) string { return "" } - func (p sortValueDoc) Less(i, j int) bool { // sort by name // pull blocks (name = "") up to top @@ -380,7 +362,6 @@ func (p sortValueDoc) Less(i, j int) bool { return p[i].order < p[j].order } - func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc { d := make([]*ValueDoc, len(list)) // big enough in any case n := 0 @@ -396,7 +377,6 @@ func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc { return d } - // FuncDoc is the documentation for a func declaration, // either a top-level function or a method function. // @@ -413,7 +393,6 @@ func (p sortFuncDoc) Len() int { return len(p) } func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name } - func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc { d := make([]*FuncDoc, len(m)) i := 0 @@ -433,7 +412,6 @@ func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc { return d } - // TypeDoc is the documentation for a declared type. // Consts and Vars are sorted lists of constants and variables of (mostly) that type. // Factories is a sorted list of factory functions that return that type. @@ -463,7 +441,6 @@ func (p sortTypeDoc) Less(i, j int) bool { return p[i].order < p[j].order } - // NOTE(rsc): This would appear not to be correct for type ( ) // blocks, but the doc extractor above has split them into // individual declarations. @@ -520,7 +497,6 @@ func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc { return d } - func makeBugDocs(list []*ast.CommentGroup) []string { d := make([]string, len(list)) for i, g := range list { @@ -529,7 +505,6 @@ func makeBugDocs(list []*ast.CommentGroup) []string { return d } - // PackageDoc is the documentation for an entire package. // type PackageDoc struct { @@ -544,7 +519,6 @@ type PackageDoc struct { Bugs []string } - // newDoc returns the accumulated documentation for the package. // func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc { @@ -565,13 +539,11 @@ func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc return p } - // ---------------------------------------------------------------------------- // Filtering by name type Filter func(string) bool - func matchFields(fields *ast.FieldList, f Filter) bool { if fields != nil { for _, field := range fields.List { @@ -585,7 +557,6 @@ func matchFields(fields *ast.FieldList, f Filter) bool { return false } - func matchDecl(d *ast.GenDecl, f Filter) bool { for _, d := range d.Specs { switch v := d.(type) { @@ -614,7 +585,6 @@ func matchDecl(d *ast.GenDecl, f Filter) bool { return false } - func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc { w := 0 for _, vd := range a { @@ -626,7 +596,6 @@ func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc { return a[0:w] } - func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc { w := 0 for _, fd := range a { @@ -638,7 +607,6 @@ func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc { return a[0:w] } - func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc { w := 0 for _, td := range a { @@ -661,7 +629,6 @@ func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc { return a[0:w] } - // Filter eliminates documentation for names that don't pass through the filter f. // TODO: Recognize "Type.Method" as a name. // diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go index 1764c38e4..4f980fc65 100644 --- a/src/pkg/go/parser/interface.go +++ b/src/pkg/go/parser/interface.go @@ -17,7 +17,6 @@ import ( "path/filepath" ) - // If src != nil, readSource converts src to a []byte if possible; // otherwise it returns an error. If src == nil, readSource returns // the result of reading the file specified by filename. @@ -49,13 +48,14 @@ func readSource(filename string, src interface{}) ([]byte, os.Error) { return ioutil.ReadFile(filename) } - -func (p *parser) parseEOF() os.Error { - p.expect(token.EOF) - return p.GetError(scanner.Sorted) +func (p *parser) errors() os.Error { + mode := scanner.Sorted + if p.mode&SpuriousErrors == 0 { + mode = scanner.NoMultiples + } + return p.GetError(mode) } - // ParseExpr parses a Go expression and returns the corresponding // AST node. The fset, filename, and src arguments have the same interpretation // as for ParseFile. If there is an error, the result expression @@ -73,9 +73,10 @@ func ParseExpr(fset *token.FileSet, filename string, src interface{}) (ast.Expr, if p.tok == token.SEMICOLON { p.next() // consume automatically inserted semicolon, if any } - return x, p.parseEOF() -} + p.expect(token.EOF) + return x, p.errors() +} // ParseStmtList parses a list of Go statements and returns the list // of corresponding AST nodes. The fset, filename, and src arguments have the same @@ -90,9 +91,11 @@ func ParseStmtList(fset *token.FileSet, filename string, src interface{}) ([]ast var p parser p.init(fset, filename, data, 0) - return p.parseStmtList(), p.parseEOF() -} + list := p.parseStmtList() + p.expect(token.EOF) + return list, p.errors() +} // ParseDeclList parses a list of Go declarations and returns the list // of corresponding AST nodes. The fset, filename, and src arguments have the same @@ -107,9 +110,11 @@ func ParseDeclList(fset *token.FileSet, filename string, src interface{}) ([]ast var p parser p.init(fset, filename, data, 0) - return p.parseDeclList(), p.parseEOF() -} + list := p.parseDeclList() + p.expect(token.EOF) + return list, p.errors() +} // ParseFile parses the source code of a single Go source file and returns // the corresponding ast.File node. The source code may be provided via @@ -139,9 +144,10 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint) var p parser p.init(fset, filename, data, mode) - return p.parseFile(), p.GetError(scanner.NoMultiples) // parseFile() reads to EOF -} + file := p.parseFile() // parseFile reads to EOF + return file, p.errors() +} // ParseFiles calls ParseFile for each file in the filenames list and returns // a map of package name -> package AST with all the packages found. The mode @@ -171,7 +177,6 @@ func ParseFiles(fset *token.FileSet, filenames []string, mode uint) (pkgs map[st return } - // ParseDir calls ParseFile for the files in the directory specified by path and // returns a map of package name -> package AST with all the packages found. If // filter != nil, only the files with os.FileInfo entries passing through the filter diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go index 586ee3a9a..9c14d1667 100644 --- a/src/pkg/go/parser/parser.go +++ b/src/pkg/go/parser/parser.go @@ -16,7 +16,6 @@ import ( "go/token" ) - // The mode parameter to the Parse* functions is a set of flags (or 0). // They control the amount of source code parsed and other optional // parser functionality. @@ -27,9 +26,9 @@ const ( ParseComments // parse comments and add them to AST Trace // print a trace of parsed productions DeclarationErrors // report declaration errors + SpuriousErrors // report all (not just the first) errors per line ) - // The parser structure holds the parser's internal state. type parser struct { file *token.File @@ -66,7 +65,6 @@ type parser struct { targetStack [][]*ast.Ident // stack of unresolved labels } - // scannerMode returns the scanner mode bits given the parser's mode bits. func scannerMode(mode uint) uint { var m uint = scanner.InsertSemis @@ -76,7 +74,6 @@ func scannerMode(mode uint) uint { return m } - func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) { p.file = fset.AddFile(filename, fset.Base(), len(src)) p.scanner.Init(p.file, src, p, scannerMode(mode)) @@ -95,7 +92,6 @@ func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uin p.openLabelScope() } - // ---------------------------------------------------------------------------- // Scoping support @@ -103,18 +99,15 @@ func (p *parser) openScope() { p.topScope = ast.NewScope(p.topScope) } - func (p *parser) closeScope() { p.topScope = p.topScope.Outer } - func (p *parser) openLabelScope() { p.labelScope = ast.NewScope(p.labelScope) p.targetStack = append(p.targetStack, nil) } - func (p *parser) closeLabelScope() { // resolve labels n := len(p.targetStack) - 1 @@ -130,7 +123,6 @@ func (p *parser) closeLabelScope() { p.labelScope = p.labelScope.Outer } - func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) { for _, ident := range idents { assert(ident.Obj == nil, "identifier already declared or resolved") @@ -152,7 +144,6 @@ func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjK } } - func (p *parser) shortVarDecl(idents []*ast.Ident) { // Go spec: A short variable declaration may redeclare variables // provided they were originally declared in the same block with @@ -178,13 +169,11 @@ func (p *parser) shortVarDecl(idents []*ast.Ident) { } } - // The unresolved object is a sentinel to mark identifiers that have been added // to the list of unresolved identifiers. The sentinel is only used for verifying // internal consistency. var unresolved = new(ast.Object) - func (p *parser) resolve(x ast.Expr) { // nothing to do if x is not an identifier or the blank identifier ident, _ := x.(*ast.Ident) @@ -210,7 +199,6 @@ func (p *parser) resolve(x ast.Expr) { p.unresolved = append(p.unresolved, ident) } - // ---------------------------------------------------------------------------- // Parsing support @@ -228,21 +216,18 @@ func (p *parser) printTrace(a ...interface{}) { fmt.Println(a...) } - func trace(p *parser, msg string) *parser { p.printTrace(msg, "(") p.indent++ return p } - // Usage pattern: defer un(trace(p, "...")); func un(p *parser) { p.indent-- p.printTrace(")") } - // Advance to the next token. func (p *parser) next0() { // Because of one-token look-ahead, print the previous token @@ -284,7 +269,6 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) { return } - // Consume a group of adjacent comments, add it to the parser's // comments list, and return it together with the line at which // the last comment in the group ends. An empty line or non-comment @@ -306,7 +290,6 @@ func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) return } - // Advance to the next non-comment token. In the process, collect // any comment groups encountered, and remember the last lead and // and line comments. @@ -357,12 +340,10 @@ func (p *parser) next() { } } - func (p *parser) error(pos token.Pos, msg string) { p.Error(p.file.Position(pos), msg) } - func (p *parser) errorExpected(pos token.Pos, msg string) { msg = "expected " + msg if pos == p.pos { @@ -380,7 +361,6 @@ func (p *parser) errorExpected(pos token.Pos, msg string) { p.error(pos, msg) } - func (p *parser) expect(tok token.Token) token.Pos { pos := p.pos if p.tok != tok { @@ -390,21 +370,18 @@ func (p *parser) expect(tok token.Token) token.Pos { return pos } - func (p *parser) expectSemi() { if p.tok != token.RPAREN && p.tok != token.RBRACE { p.expect(token.SEMICOLON) } } - func assert(cond bool, msg string) { if !cond { panic("go/parser internal error: " + msg) } } - // ---------------------------------------------------------------------------- // Identifiers @@ -420,7 +397,6 @@ func (p *parser) parseIdent() *ast.Ident { return &ast.Ident{pos, name, nil} } - func (p *parser) parseIdentList() (list []*ast.Ident) { if p.trace { defer un(trace(p, "IdentList")) @@ -435,7 +411,6 @@ func (p *parser) parseIdentList() (list []*ast.Ident) { return } - // ---------------------------------------------------------------------------- // Common productions @@ -445,16 +420,15 @@ func (p *parser) parseExprList(lhs bool) (list []ast.Expr) { defer un(trace(p, "ExpressionList")) } - list = append(list, p.parseExpr(lhs)) + list = append(list, p.checkExpr(p.parseExpr(lhs))) for p.tok == token.COMMA { p.next() - list = append(list, p.parseExpr(lhs)) + list = append(list, p.checkExpr(p.parseExpr(lhs))) } return } - func (p *parser) parseLhsList() []ast.Expr { list := p.parseExprList(true) switch p.tok { @@ -478,12 +452,10 @@ func (p *parser) parseLhsList() []ast.Expr { return list } - func (p *parser) parseRhsList() []ast.Expr { return p.parseExprList(false) } - // ---------------------------------------------------------------------------- // Types @@ -504,7 +476,6 @@ func (p *parser) parseType() ast.Expr { return typ } - // If the result is an identifier, it is not resolved. func (p *parser) parseTypeName() ast.Expr { if p.trace { @@ -525,7 +496,6 @@ func (p *parser) parseTypeName() ast.Expr { return ident } - func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { if p.trace { defer un(trace(p, "ArrayType")) @@ -545,7 +515,6 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { return &ast.ArrayType{lbrack, len, elt} } - func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident { idents := make([]*ast.Ident, len(list)) for i, x := range list { @@ -560,7 +529,6 @@ func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident { return idents } - func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field { if p.trace { defer un(trace(p, "FieldDecl")) @@ -602,7 +570,6 @@ func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field { return field } - func (p *parser) parseStructType() *ast.StructType { if p.trace { defer un(trace(p, "StructType")) @@ -624,7 +591,6 @@ func (p *parser) parseStructType() *ast.StructType { return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false} } - func (p *parser) parsePointerType() *ast.StarExpr { if p.trace { defer un(trace(p, "PointerType")) @@ -636,7 +602,6 @@ func (p *parser) parsePointerType() *ast.StarExpr { return &ast.StarExpr{star, base} } - func (p *parser) tryVarType(isParam bool) ast.Expr { if isParam && p.tok == token.ELLIPSIS { pos := p.pos @@ -654,7 +619,6 @@ func (p *parser) tryVarType(isParam bool) ast.Expr { return p.tryIdentOrType(false) } - func (p *parser) parseVarType(isParam bool) ast.Expr { typ := p.tryVarType(isParam) if typ == nil { @@ -666,7 +630,6 @@ func (p *parser) parseVarType(isParam bool) ast.Expr { return typ } - func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) { if p.trace { defer un(trace(p, "VarList")) @@ -694,7 +657,6 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) { return } - func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) { if p.trace { defer un(trace(p, "ParameterList")) @@ -739,7 +701,6 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [ return } - func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList { if p.trace { defer un(trace(p, "Parameters")) @@ -755,7 +716,6 @@ func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldLi return &ast.FieldList{lparen, params, rparen} } - func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Result")) @@ -775,7 +735,6 @@ func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList { return nil } - func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) { if p.trace { defer un(trace(p, "Signature")) @@ -787,7 +746,6 @@ func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldLis return } - func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) { if p.trace { defer un(trace(p, "FuncType")) @@ -800,7 +758,6 @@ func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) { return &ast.FuncType{pos, params, results}, scope } - func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field { if p.trace { defer un(trace(p, "MethodSpec")) @@ -829,7 +786,6 @@ func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field { return spec } - func (p *parser) parseInterfaceType() *ast.InterfaceType { if p.trace { defer un(trace(p, "InterfaceType")) @@ -848,7 +804,6 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType { return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false} } - func (p *parser) parseMapType() *ast.MapType { if p.trace { defer un(trace(p, "MapType")) @@ -863,7 +818,6 @@ func (p *parser) parseMapType() *ast.MapType { return &ast.MapType{pos, key, value} } - func (p *parser) parseChanType() *ast.ChanType { if p.trace { defer un(trace(p, "ChanType")) @@ -887,7 +841,6 @@ func (p *parser) parseChanType() *ast.ChanType { return &ast.ChanType{pos, dir, value} } - // If the result is an identifier, it is not resolved. func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr { switch p.tok { @@ -920,7 +873,6 @@ func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr { return nil } - func (p *parser) tryType() ast.Expr { typ := p.tryIdentOrType(false) if typ != nil { @@ -929,7 +881,6 @@ func (p *parser) tryType() ast.Expr { return typ } - // ---------------------------------------------------------------------------- // Blocks @@ -945,7 +896,6 @@ func (p *parser) parseStmtList() (list []ast.Stmt) { return } - func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt { if p.trace { defer un(trace(p, "Body")) @@ -962,7 +912,6 @@ func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt { return &ast.BlockStmt{lbrace, list, rbrace} } - func (p *parser) parseBlockStmt() *ast.BlockStmt { if p.trace { defer un(trace(p, "BlockStmt")) @@ -977,7 +926,6 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt { return &ast.BlockStmt{lbrace, list, rbrace} } - // ---------------------------------------------------------------------------- // Expressions @@ -999,7 +947,6 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr { return &ast.FuncLit{typ, body} } - // parseOperand may return an expression or a raw type (incl. array // types of the form [...]T. Callers must verify the result. // If lhs is set and the result is an identifier, it is not resolved. @@ -1026,7 +973,7 @@ func (p *parser) parseOperand(lhs bool) ast.Expr { lparen := p.pos p.next() p.exprLev++ - x := p.parseRhs() + x := p.parseRhsOrType() // types may be parenthesized: (some type) p.exprLev-- rparen := p.expect(token.RPAREN) return &ast.ParenExpr{lparen, x, rparen} @@ -1049,7 +996,6 @@ func (p *parser) parseOperand(lhs bool) ast.Expr { return &ast.BadExpr{pos, p.pos} } - func (p *parser) parseSelector(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "Selector")) @@ -1060,7 +1006,6 @@ func (p *parser) parseSelector(x ast.Expr) ast.Expr { return &ast.SelectorExpr{x, sel} } - func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "TypeAssertion")) @@ -1079,7 +1024,6 @@ func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { return &ast.TypeAssertExpr{x, typ} } - func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "IndexOrSlice")) @@ -1108,7 +1052,6 @@ func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr { return &ast.IndexExpr{x, lbrack, low, rbrack} } - func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { if p.trace { defer un(trace(p, "CallOrConversion")) @@ -1119,7 +1062,7 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { var list []ast.Expr var ellipsis token.Pos for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() { - list = append(list, p.parseRhs()) + list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...) if p.tok == token.ELLIPSIS { ellipsis = p.pos p.next() @@ -1135,7 +1078,6 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { return &ast.CallExpr{fun, lparen, list, ellipsis, rparen} } - func (p *parser) parseElement(keyOk bool) ast.Expr { if p.trace { defer un(trace(p, "Element")) @@ -1145,7 +1087,7 @@ func (p *parser) parseElement(keyOk bool) ast.Expr { return p.parseLiteralValue(nil) } - x := p.parseExpr(keyOk) // don't resolve if map key + x := p.checkExpr(p.parseExpr(keyOk)) // don't resolve if map key if keyOk { if p.tok == token.COLON { colon := p.pos @@ -1158,7 +1100,6 @@ func (p *parser) parseElement(keyOk bool) ast.Expr { return x } - func (p *parser) parseElementList() (list []ast.Expr) { if p.trace { defer un(trace(p, "ElementList")) @@ -1175,7 +1116,6 @@ func (p *parser) parseElementList() (list []ast.Expr) { return } - func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "LiteralValue")) @@ -1192,7 +1132,6 @@ func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { return &ast.CompositeLit{typ, lbrace, elts, rbrace} } - // checkExpr checks that x is an expression (and not a type). func (p *parser) checkExpr(x ast.Expr) ast.Expr { switch t := unparen(x).(type) { @@ -1207,19 +1146,14 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr { case *ast.IndexExpr: case *ast.SliceExpr: case *ast.TypeAssertExpr: - if t.Type == nil { - // the form X.(type) is only allowed in type switch expressions - p.errorExpected(x.Pos(), "expression") - x = &ast.BadExpr{x.Pos(), x.End()} - } + // If t.Type == nil we have a type assertion of the form + // y.(type), which is only allowed in type switch expressions. + // It's hard to exclude those but for the case where we are in + // a type switch. Instead be lenient and test this in the type + // checker. case *ast.CallExpr: case *ast.StarExpr: case *ast.UnaryExpr: - if t.Op == token.RANGE { - // the range operator is only allowed at the top of a for statement - p.errorExpected(x.Pos(), "expression") - x = &ast.BadExpr{x.Pos(), x.End()} - } case *ast.BinaryExpr: default: // all other nodes are not proper expressions @@ -1229,7 +1163,6 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr { return x } - // isTypeName returns true iff x is a (qualified) TypeName. func isTypeName(x ast.Expr) bool { switch t := x.(type) { @@ -1244,7 +1177,6 @@ func isTypeName(x ast.Expr) bool { return true } - // isLiteralType returns true iff x is a legal composite literal type. func isLiteralType(x ast.Expr) bool { switch t := x.(type) { @@ -1262,7 +1194,6 @@ func isLiteralType(x ast.Expr) bool { return true } - // If x is of the form *T, deref returns T, otherwise it returns x. func deref(x ast.Expr) ast.Expr { if p, isPtr := x.(*ast.StarExpr); isPtr { @@ -1271,7 +1202,6 @@ func deref(x ast.Expr) ast.Expr { return x } - // If x is of the form (T), unparen returns unparen(T), otherwise it returns x. func unparen(x ast.Expr) ast.Expr { if p, isParen := x.(*ast.ParenExpr); isParen { @@ -1280,7 +1210,6 @@ func unparen(x ast.Expr) ast.Expr { return x } - // checkExprOrType checks that x is an expression or a type // (and not a raw type such as [...]T). // @@ -1289,11 +1218,6 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr { case *ast.ParenExpr: panic("unreachable") case *ast.UnaryExpr: - if t.Op == token.RANGE { - // the range operator is only allowed at the top of a for statement - p.errorExpected(x.Pos(), "expression") - x = &ast.BadExpr{x.Pos(), x.End()} - } case *ast.ArrayType: if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis { p.error(len.Pos(), "expected array length, found '...'") @@ -1305,7 +1229,6 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr { return x } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr { if p.trace { @@ -1360,7 +1283,6 @@ L: return x } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parseUnaryExpr(lhs bool) ast.Expr { if p.trace { @@ -1368,7 +1290,7 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr { } switch p.tok { - case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE: + case token.ADD, token.SUB, token.NOT, token.XOR, token.AND: pos, op := p.pos, p.tok p.next() x := p.parseUnaryExpr(false) @@ -1398,7 +1320,6 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr { return p.parsePrimaryExpr(lhs) } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr { if p.trace { @@ -1422,10 +1343,10 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr { return x } - // If lhs is set and the result is an identifier, it is not resolved. -// TODO(gri): parseExpr may return a type or even a raw type ([..]int) - -// should reject when a type/raw type is obviously not allowed +// The result may be a type or even a raw type ([...]int). Callers must +// check the result (using checkExpr or checkExprOrType), depending on +// context. func (p *parser) parseExpr(lhs bool) ast.Expr { if p.trace { defer un(trace(p, "Expression")) @@ -1434,16 +1355,29 @@ func (p *parser) parseExpr(lhs bool) ast.Expr { return p.parseBinaryExpr(lhs, token.LowestPrec+1) } - func (p *parser) parseRhs() ast.Expr { - return p.parseExpr(false) + return p.checkExpr(p.parseExpr(false)) } +func (p *parser) parseRhsOrType() ast.Expr { + return p.checkExprOrType(p.parseExpr(false)) +} // ---------------------------------------------------------------------------- // Statements -func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { +// Parsing modes for parseSimpleStmt. +const ( + basic = iota + labelOk + rangeOk +) + +// parseSimpleStmt returns true as 2nd result if it parsed the assignment +// of a range clause (with mode == rangeOk). The returned statement is an +// assignment with a right-hand side that is a single unary expression of +// the form "range x". No guarantees are given for the left-hand side. +func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) { if p.trace { defer un(trace(p, "SimpleStmt")) } @@ -1456,11 +1390,20 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN, token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN, token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN: - // assignment statement + // assignment statement, possibly part of a range clause pos, tok := p.pos, p.tok p.next() - y := p.parseRhsList() - return &ast.AssignStmt{x, pos, tok, y} + var y []ast.Expr + isRange := false + if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) { + pos := p.pos + p.next() + y = []ast.Expr{&ast.UnaryExpr{pos, token.RANGE, p.parseRhs()}} + isRange = true + } else { + y = p.parseRhsList() + } + return &ast.AssignStmt{x, pos, tok, y}, isRange } if len(x) > 1 { @@ -1473,38 +1416,43 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { // labeled statement colon := p.pos p.next() - if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent { + if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent { // Go spec: The scope of a label is the body of the function // in which it is declared and excludes the body of any nested // function. stmt := &ast.LabeledStmt{label, colon, p.parseStmt()} p.declare(stmt, nil, p.labelScope, ast.Lbl, label) - return stmt + return stmt, false } - p.error(x[0].Pos(), "illegal label declaration") - return &ast.BadStmt{x[0].Pos(), colon + 1} + // The label declaration typically starts at x[0].Pos(), but the label + // declaration may be erroneous due to a token after that position (and + // before the ':'). If SpuriousErrors is not set, the (only) error re- + // ported for the line is the illegal label error instead of the token + // before the ':' that caused the problem. Thus, use the (latest) colon + // position for error reporting. + p.error(colon, "illegal label declaration") + return &ast.BadStmt{x[0].Pos(), colon + 1}, false case token.ARROW: // send statement arrow := p.pos p.next() // consume "<-" y := p.parseRhs() - return &ast.SendStmt{x[0], arrow, y} + return &ast.SendStmt{x[0], arrow, y}, false case token.INC, token.DEC: // increment or decrement s := &ast.IncDecStmt{x[0], p.pos, p.tok} p.next() // consume "++" or "--" - return s + return s, false } // expression - return &ast.ExprStmt{x[0]} + return &ast.ExprStmt{x[0]}, false } - func (p *parser) parseCallExpr() *ast.CallExpr { - x := p.parseRhs() + x := p.parseRhsOrType() // could be a conversion: (some type)(x) if call, isCall := x.(*ast.CallExpr); isCall { return call } @@ -1512,7 +1460,6 @@ func (p *parser) parseCallExpr() *ast.CallExpr { return nil } - func (p *parser) parseGoStmt() ast.Stmt { if p.trace { defer un(trace(p, "GoStmt")) @@ -1528,7 +1475,6 @@ func (p *parser) parseGoStmt() ast.Stmt { return &ast.GoStmt{pos, call} } - func (p *parser) parseDeferStmt() ast.Stmt { if p.trace { defer un(trace(p, "DeferStmt")) @@ -1544,7 +1490,6 @@ func (p *parser) parseDeferStmt() ast.Stmt { return &ast.DeferStmt{pos, call} } - func (p *parser) parseReturnStmt() *ast.ReturnStmt { if p.trace { defer un(trace(p, "ReturnStmt")) @@ -1561,7 +1506,6 @@ func (p *parser) parseReturnStmt() *ast.ReturnStmt { return &ast.ReturnStmt{pos, x} } - func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { if p.trace { defer un(trace(p, "BranchStmt")) @@ -1580,7 +1524,6 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { return &ast.BranchStmt{pos, tok, label} } - func (p *parser) makeExpr(s ast.Stmt) ast.Expr { if s == nil { return nil @@ -1592,7 +1535,6 @@ func (p *parser) makeExpr(s ast.Stmt) ast.Expr { return &ast.BadExpr{s.Pos(), s.End()} } - func (p *parser) parseIfStmt() *ast.IfStmt { if p.trace { defer un(trace(p, "IfStmt")) @@ -1611,7 +1553,7 @@ func (p *parser) parseIfStmt() *ast.IfStmt { p.next() x = p.parseRhs() } else { - s = p.parseSimpleStmt(false) + s, _ = p.parseSimpleStmt(basic) if p.tok == token.SEMICOLON { p.next() x = p.parseRhs() @@ -1635,7 +1577,6 @@ func (p *parser) parseIfStmt() *ast.IfStmt { return &ast.IfStmt{pos, s, x, body, else_} } - func (p *parser) parseTypeList() (list []ast.Expr) { if p.trace { defer un(trace(p, "TypeList")) @@ -1650,7 +1591,6 @@ func (p *parser) parseTypeList() (list []ast.Expr) { return } - func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause { if p.trace { defer un(trace(p, "CaseClause")) @@ -1677,7 +1617,6 @@ func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause { return &ast.CaseClause{pos, list, colon, body} } - func isExprSwitch(s ast.Stmt) bool { if s == nil { return true @@ -1691,7 +1630,6 @@ func isExprSwitch(s ast.Stmt) bool { return false } - func (p *parser) parseSwitchStmt() ast.Stmt { if p.trace { defer un(trace(p, "SwitchStmt")) @@ -1706,14 +1644,14 @@ func (p *parser) parseSwitchStmt() ast.Stmt { prevLev := p.exprLev p.exprLev = -1 if p.tok != token.SEMICOLON { - s2 = p.parseSimpleStmt(false) + s2, _ = p.parseSimpleStmt(basic) } if p.tok == token.SEMICOLON { p.next() s1 = s2 s2 = nil if p.tok != token.LBRACE { - s2 = p.parseSimpleStmt(false) + s2, _ = p.parseSimpleStmt(basic) } } p.exprLev = prevLev @@ -1737,7 +1675,6 @@ func (p *parser) parseSwitchStmt() ast.Stmt { return &ast.TypeSwitchStmt{pos, s1, s2, body} } - func (p *parser) parseCommClause() *ast.CommClause { if p.trace { defer un(trace(p, "CommClause")) @@ -1799,7 +1736,6 @@ func (p *parser) parseCommClause() *ast.CommClause { return &ast.CommClause{pos, comm, colon, body} } - func (p *parser) parseSelectStmt() *ast.SelectStmt { if p.trace { defer un(trace(p, "SelectStmt")) @@ -1818,7 +1754,6 @@ func (p *parser) parseSelectStmt() *ast.SelectStmt { return &ast.SelectStmt{pos, body} } - func (p *parser) parseForStmt() ast.Stmt { if p.trace { defer un(trace(p, "ForStmt")) @@ -1829,22 +1764,23 @@ func (p *parser) parseForStmt() ast.Stmt { defer p.closeScope() var s1, s2, s3 ast.Stmt + var isRange bool if p.tok != token.LBRACE { prevLev := p.exprLev p.exprLev = -1 if p.tok != token.SEMICOLON { - s2 = p.parseSimpleStmt(false) + s2, isRange = p.parseSimpleStmt(rangeOk) } - if p.tok == token.SEMICOLON { + if !isRange && p.tok == token.SEMICOLON { p.next() s1 = s2 s2 = nil if p.tok != token.SEMICOLON { - s2 = p.parseSimpleStmt(false) + s2, _ = p.parseSimpleStmt(basic) } p.expectSemi() if p.tok != token.LBRACE { - s3 = p.parseSimpleStmt(false) + s3, _ = p.parseSimpleStmt(basic) } } p.exprLev = prevLev @@ -1853,12 +1789,8 @@ func (p *parser) parseForStmt() ast.Stmt { body := p.parseBlockStmt() p.expectSemi() - if as, isAssign := s2.(*ast.AssignStmt); isAssign { - // possibly a for statement with a range clause; check assignment operator - if as.Tok != token.ASSIGN && as.Tok != token.DEFINE { - p.errorExpected(as.TokPos, "'=' or ':='") - return &ast.BadStmt{pos, body.End()} - } + if isRange { + as := s2.(*ast.AssignStmt) // check lhs var key, value ast.Expr switch len(as.Lhs) { @@ -1870,25 +1802,16 @@ func (p *parser) parseForStmt() ast.Stmt { p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions") return &ast.BadStmt{pos, body.End()} } - // check rhs - if len(as.Rhs) != 1 { - p.errorExpected(as.Rhs[0].Pos(), "1 expression") - return &ast.BadStmt{pos, body.End()} - } - if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { - // rhs is range expression - // (any short variable declaration was handled by parseSimpleStat above) - return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body} - } - p.errorExpected(s2.Pos(), "range clause") - return &ast.BadStmt{pos, body.End()} + // parseSimpleStmt returned a right-hand side that + // is a single unary expression of the form "range x" + x := as.Rhs[0].(*ast.UnaryExpr).X + return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, x, body} } // regular for statement return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body} } - func (p *parser) parseStmt() (s ast.Stmt) { if p.trace { defer un(trace(p, "Statement")) @@ -1902,7 +1825,7 @@ func (p *parser) parseStmt() (s ast.Stmt) { token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand token.LBRACK, token.STRUCT, // composite type token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators - s = p.parseSimpleStmt(true) + s, _ = p.parseSimpleStmt(labelOk) // because of the required look-ahead, labeled statements are // parsed by parseSimpleStmt - don't expect a semicolon after // them @@ -1945,13 +1868,11 @@ func (p *parser) parseStmt() (s ast.Stmt) { return } - // ---------------------------------------------------------------------------- // Declarations type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec - func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "ImportSpec")) @@ -1982,7 +1903,6 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec { if p.trace { defer un(trace(p, "ConstSpec")) @@ -2007,7 +1927,6 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec { return spec } - func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "TypeSpec")) @@ -2029,7 +1948,6 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "VarSpec")) @@ -2054,7 +1972,6 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl { if p.trace { defer un(trace(p, "GenDecl("+keyword.String()+")")) @@ -2079,7 +1996,6 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen} } - func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Receiver")) @@ -2107,7 +2023,6 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { return par } - func (p *parser) parseFuncDecl() *ast.FuncDecl { if p.trace { defer un(trace(p, "FunctionDecl")) @@ -2148,7 +2063,6 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl { return decl } - func (p *parser) parseDecl() ast.Decl { if p.trace { defer un(trace(p, "Declaration")) @@ -2179,7 +2093,6 @@ func (p *parser) parseDecl() ast.Decl { return p.parseGenDecl(p.tok, f) } - func (p *parser) parseDeclList() (list []ast.Decl) { if p.trace { defer un(trace(p, "DeclList")) @@ -2192,7 +2105,6 @@ func (p *parser) parseDeclList() (list []ast.Decl) { return } - // ---------------------------------------------------------------------------- // Source files @@ -2245,6 +2157,5 @@ func (p *parser) parseFile() *ast.File { } } - // TODO(gri): store p.imports in AST return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments} } diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go index 5b52f51d4..39a78e515 100644 --- a/src/pkg/go/parser/parser_test.go +++ b/src/pkg/go/parser/parser_test.go @@ -10,7 +10,6 @@ import ( "testing" ) - var fset = token.NewFileSet() var illegalInputs = []interface{}{ @@ -22,9 +21,26 @@ var illegalInputs = []interface{}{ `package p; func f() { if ; /* should have condition */ {} };`, `package p; func f() { if f(); /* should have condition */ {} };`, `package p; const c; /* should have constant value */`, + `package p; func f() { if _ = range x; true {} };`, + `package p; func f() { switch _ = range x; true {} };`, + `package p; func f() { for _ = range x ; ; {} };`, + `package p; func f() { for ; ; _ = range x {} };`, + `package p; func f() { for ; _ = range x ; {} };`, + `package p; var a = [1]int; /* illegal expression */`, + `package p; var a = [...]int; /* illegal expression */`, + `package p; var a = struct{} /* illegal expression */`, + `package p; var a = func(); /* illegal expression */`, + `package p; var a = interface{} /* illegal expression */`, + `package p; var a = []int /* illegal expression */`, + `package p; var a = map[int]int /* illegal expression */`, + `package p; var a = chan int; /* illegal expression */`, + `package p; var a = []int{[]int}; /* illegal expression */`, + `package p; var a = ([]int); /* illegal expression */`, + `package p; var a = a[[]int:[]int]; /* illegal expression */`, + `package p; var a = <- chan int; /* illegal expression */`, + `package p; func f() { select { case _ <- chan int: } };`, } - func TestParseIllegalInputs(t *testing.T) { for _, src := range illegalInputs { _, err := ParseFile(fset, "", src, 0) @@ -34,7 +50,6 @@ func TestParseIllegalInputs(t *testing.T) { } } - var validPrograms = []interface{}{ "package p\n", `package p;`, @@ -54,9 +69,9 @@ var validPrograms = []interface{}{ `package p; func f() { select { case x := (<-c): } };`, `package p; func f() { if ; true {} };`, `package p; func f() { switch ; {} };`, + `package p; func f() { for _ = range "foo" + "bar" {} };`, } - func TestParseValidPrograms(t *testing.T) { for _, src := range validPrograms { _, err := ParseFile(fset, "", src, 0) @@ -66,13 +81,11 @@ func TestParseValidPrograms(t *testing.T) { } } - var validFiles = []string{ "parser.go", "parser_test.go", } - func TestParse3(t *testing.T) { for _, filename := range validFiles { _, err := ParseFile(fset, filename, nil, DeclarationErrors) @@ -82,7 +95,6 @@ func TestParse3(t *testing.T) { } } - func nameFilter(filename string) bool { switch filename { case "parser.go": @@ -94,10 +106,8 @@ func nameFilter(filename string) bool { return true } - func dirFilter(f *os.FileInfo) bool { return nameFilter(f.Name) } - func TestParse4(t *testing.T) { path := "." pkgs, err := ParseDir(fset, path, dirFilter, 0) diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go index f2b79d810..9cd975ec1 100644 --- a/src/pkg/go/printer/nodes.go +++ b/src/pkg/go/printer/nodes.go @@ -14,7 +14,6 @@ import ( "go/token" ) - // Other formatting issues: // - better comment formatting for /*-style comments at the end of a line (e.g. a declaration) // when the comment spans multiple lines; if such a comment is just two lines, formatting is @@ -23,7 +22,6 @@ import ( // - should use blank instead of tab to separate one-line function bodies from // the function header unless there is a group of consecutive one-liners - // ---------------------------------------------------------------------------- // Common AST nodes. @@ -56,7 +54,6 @@ func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (prin return } - // setComment sets g as the next comment if g != nil and if node comments // are enabled - this mode is used when printing source code fragments such // as exports only. It assumes that there are no other pending comments to @@ -78,7 +75,6 @@ func (p *printer) setComment(g *ast.CommentGroup) { p.cindex = 0 } - type exprListMode uint const ( @@ -90,7 +86,6 @@ const ( periodSep // elements are separated by periods ) - // Sets multiLine to true if the identifier list spans multiple lines. // If indent is set, a multi-line identifier list is indented after the // first linebreak encountered. @@ -107,7 +102,6 @@ func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) { p.exprList(token.NoPos, xlist, 1, mode, multiLine, token.NoPos) } - // Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. Sets multiLine to true if the list spans multiple @@ -271,7 +265,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp } } - // Sets multiLine to true if the the parameter list spans multiple lines. func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) { p.print(fields.Opening, token.LPAREN) @@ -302,7 +295,6 @@ func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) { p.print(fields.Closing, token.RPAREN) } - // Sets multiLine to true if the signature spans multiple lines. func (p *printer) signature(params, result *ast.FieldList, multiLine *bool) { p.parameters(params, multiLine) @@ -318,7 +310,6 @@ func (p *printer) signature(params, result *ast.FieldList, multiLine *bool) { } } - func identListSize(list []*ast.Ident, maxSize int) (size int) { for i, x := range list { if i > 0 { @@ -332,7 +323,6 @@ func identListSize(list []*ast.Ident, maxSize int) (size int) { return } - func (p *printer) isOneLineFieldList(list []*ast.Field) bool { if len(list) != 1 { return false // allow only one field @@ -351,18 +341,11 @@ func (p *printer) isOneLineFieldList(list []*ast.Field) bool { return namesSize+typeSize <= maxSize } - func (p *printer) setLineComment(text string) { p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, text}}}) } - func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { - p.nesting++ - defer func() { - p.nesting-- - }() - lbrace := fields.Opening list := fields.List rbrace := fields.Closing @@ -475,7 +458,6 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) p.print(unindent, formfeed, rbrace, token.RBRACE) } - // ---------------------------------------------------------------------------- // Expressions @@ -534,7 +516,6 @@ func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { return } - func cutoff(e *ast.BinaryExpr, depth int) int { has4, has5, maxProblem := walkBinary(e) if maxProblem > 0 { @@ -552,7 +533,6 @@ func cutoff(e *ast.BinaryExpr, depth int) int { return 4 } - func diffPrec(expr ast.Expr, prec int) int { x, ok := expr.(*ast.BinaryExpr) if !ok || prec != x.Op.Precedence() { @@ -561,7 +541,6 @@ func diffPrec(expr ast.Expr, prec int) int { return 0 } - func reduceDepth(depth int) int { depth-- if depth < 1 { @@ -570,7 +549,6 @@ func reduceDepth(depth int) int { return depth } - // Format the binary expression: decide the cutoff and then format. // Let's call depth == 1 Normal mode, and depth > 1 Compact mode. // (Algorithm suggestion by Russ Cox.) @@ -648,13 +626,11 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL } } - func isBinary(expr ast.Expr) bool { _, ok := expr.(*ast.BinaryExpr) return ok } - // If the expression contains one or more selector expressions, splits it into // two expressions at the rightmost period. Writes entire expr to suffix when // selector isn't found. Rewrites AST nodes for calls, index expressions and @@ -694,7 +670,6 @@ func splitSelector(expr ast.Expr) (body, suffix ast.Expr) { return } - // Convert an expression into an expression list split at the periods of // selector expressions. func selectorExprList(expr ast.Expr) (list []ast.Expr) { @@ -713,7 +688,6 @@ func selectorExprList(expr ast.Expr) (list []ast.Expr) { return } - // Sets multiLine to true if the expression spans multiple lines. func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) { p.print(expr.Pos()) @@ -900,19 +874,16 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) { return } - func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) { p.expr1(x, token.LowestPrec, depth, multiLine) } - // Sets multiLine to true if the expression spans multiple lines. func (p *printer) expr(x ast.Expr, multiLine *bool) { const depth = 1 p.expr1(x, token.LowestPrec, depth, multiLine) } - // ---------------------------------------------------------------------------- // Statements @@ -937,7 +908,6 @@ func (p *printer) stmtList(list []ast.Stmt, _indent int, nextIsRBrace bool) { } } - // block prints an *ast.BlockStmt; it always spans at least two lines. func (p *printer) block(s *ast.BlockStmt, indent int) { p.print(s.Pos(), token.LBRACE) @@ -946,7 +916,6 @@ func (p *printer) block(s *ast.BlockStmt, indent int) { p.print(s.Rbrace, token.RBRACE) } - func isTypeName(x ast.Expr) bool { switch t := x.(type) { case *ast.Ident: @@ -957,7 +926,6 @@ func isTypeName(x ast.Expr) bool { return false } - func stripParens(x ast.Expr) ast.Expr { if px, strip := x.(*ast.ParenExpr); strip { // parentheses must not be stripped if there are any @@ -984,7 +952,6 @@ func stripParens(x ast.Expr) ast.Expr { return x } - func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) { p.print(blank) needsBlank := false @@ -1019,7 +986,6 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po } } - // Sets multiLine to true if the statements spans multiple lines. func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { p.print(stmt.Pos()) @@ -1193,7 +1159,6 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { return } - // ---------------------------------------------------------------------------- // Declarations @@ -1262,7 +1227,6 @@ func keepTypeColumn(specs []ast.Spec) []bool { return m } - func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine *bool) { p.setComment(s.Doc) p.identList(s.Names, doIndent, multiLine) // always present @@ -1287,7 +1251,6 @@ func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine } } - // The parameter n is the number of specs in the group. If doIndent is set, // multi-line identifier lists in the spec are indented when the first // linebreak is encountered. @@ -1336,7 +1299,6 @@ func (p *printer) spec(spec ast.Spec, n int, doIndent bool, multiLine *bool) { } } - // Sets multiLine to true if the declaration spans multiple lines. func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) { p.setComment(d.Doc) @@ -1380,7 +1342,6 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) { } } - // nodeSize determines the size of n in chars after formatting. // The result is <= maxSize if the node fits on one line with at // most maxSize chars and the formatted output doesn't contain @@ -1418,7 +1379,6 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { return } - func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool { pos1 := b.Pos() pos2 := b.Rbrace @@ -1442,18 +1402,12 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool { return headerSize+bodySize <= maxSize } - // Sets multiLine to true if the function body spans multiple lines. func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLine *bool) { if b == nil { return } - p.nesting++ - defer func() { - p.nesting-- - }() - if p.isOneLineFunc(b, headerSize) { sep := vtab if isLit { @@ -1479,7 +1433,6 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi *multiLine = true } - // distance returns the column difference between from and to if both // are on the same line; if they are on different lines (or unknown) // the result is infinity. @@ -1491,7 +1444,6 @@ func (p *printer) distance(from0 token.Pos, to token.Position) int { return infinity } - // Sets multiLine to true if the declaration spans multiple lines. func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) { p.setComment(d.Doc) @@ -1505,7 +1457,6 @@ func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) { p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false, multiLine) } - // Sets multiLine to true if the declaration spans multiple lines. func (p *printer) decl(decl ast.Decl, multiLine *bool) { switch d := decl.(type) { @@ -1520,7 +1471,6 @@ func (p *printer) decl(decl ast.Decl, multiLine *bool) { } } - // ---------------------------------------------------------------------------- // Files @@ -1535,7 +1485,6 @@ func declToken(decl ast.Decl) (tok token.Token) { return } - func (p *printer) file(src *ast.File) { p.setComment(src.Doc) p.print(src.Pos(), token.PACKAGE, blank) diff --git a/src/pkg/go/printer/performance_test.go b/src/pkg/go/printer/performance_test.go index 31de0b7ad..84fb2808e 100644 --- a/src/pkg/go/printer/performance_test.go +++ b/src/pkg/go/printer/performance_test.go @@ -17,17 +17,14 @@ import ( "testing" ) - var testfile *ast.File - func testprint(out io.Writer, file *ast.File) { if _, err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil { log.Fatalf("print error: %s", err) } } - // cannot initialize in init because (printer) Fprint launches goroutines. func initialize() { const filename = "testdata/parser.go" @@ -51,7 +48,6 @@ func initialize() { testfile = file } - func BenchmarkPrint(b *testing.B) { if testfile == nil { initialize() diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go index 40b15dd79..871fefa0c 100644 --- a/src/pkg/go/printer/printer.go +++ b/src/pkg/go/printer/printer.go @@ -17,7 +17,6 @@ import ( "tabwriter" ) - const debug = false // enable for debugging @@ -33,7 +32,6 @@ const ( unindent = whiteSpace('<') ) - var ( esc = []byte{tabwriter.Escape} htab = []byte{'\t'} @@ -42,16 +40,13 @@ var ( formfeeds = []byte("\f\f\f\f\f\f\f\f") // more than the max determined by nlines ) - // Special positions var noPos token.Position // use noPos when a position is needed but not known var infinity = 1 << 30 - // Use ignoreMultiLine if the multiLine information is not important. var ignoreMultiLine = new(bool) - // A pmode value represents the current printer mode. type pmode int @@ -60,7 +55,6 @@ const ( noExtraLinebreak ) - type printer struct { // Configuration (does not change after initialization) output io.Writer @@ -69,7 +63,6 @@ type printer struct { errors chan os.Error // Current state - nesting int // nesting level (0: top-level (package scope), >0: functions/decls.) written int // number of bytes written indent int // current indentation mode pmode // current printer mode @@ -98,7 +91,6 @@ type printer struct { nodeSizes map[ast.Node]int } - func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) { p.output = output p.Config = *cfg @@ -108,7 +100,6 @@ func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeS p.nodeSizes = nodeSizes } - func (p *printer) internalError(msg ...interface{}) { if debug { fmt.Print(p.pos.String() + ": ") @@ -117,7 +108,6 @@ func (p *printer) internalError(msg ...interface{}) { } } - // escape escapes string s by bracketing it with tabwriter.Escape. // Escaped strings pass through tabwriter unchanged. (Note that // valid Go programs cannot contain tabwriter.Escape bytes since @@ -131,26 +121,20 @@ func (p *printer) escape(s string) string { return p.litbuf.String() } - // nlines returns the adjusted number of linebreaks given the desired number -// of breaks n such that min <= result <= max where max depends on the current -// nesting level. +// of breaks n such that min <= result <= max. // func (p *printer) nlines(n, min int) int { - if n < min { + const max = 2 // max. number of newlines + switch { + case n < min: return min - } - max := 3 // max. number of newlines at the top level (p.nesting == 0) - if p.nesting > 0 { - max = 2 // max. number of newlines everywhere else - } - if n > max { + case n > max: return max } return n } - // write0 writes raw (uninterpreted) data to p.output and handles errors. // write0 does not indent after newlines, and does not HTML-escape or update p.pos. // @@ -165,7 +149,6 @@ func (p *printer) write0(data []byte) { } } - // write interprets data and writes it to p.output. It inserts indentation // after a line break unless in a tabwriter escape sequence. // It updates p.pos as a side-effect. @@ -220,7 +203,6 @@ func (p *printer) write(data []byte) { p.pos.Column += d } - func (p *printer) writeNewlines(n int, useFF bool) { if n > 0 { n = p.nlines(n, 0) @@ -232,7 +214,6 @@ func (p *printer) writeNewlines(n int, useFF bool) { } } - // writeItem writes data at position pos. data is the text corresponding to // a single lexical token, but may also be comment text. pos is the actual // (or at least very accurately estimated) position of the data in the original @@ -261,7 +242,6 @@ func (p *printer) writeItem(pos token.Position, data string) { p.last = p.pos } - // writeCommentPrefix writes the whitespace before a comment. // If there is any pending whitespace, it consumes as much of // it as is likely to help position the comment nicely. @@ -368,7 +348,6 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment } } - // TODO(gri): It should be possible to convert the code below from using // []byte to string and in the process eliminate some conversions. @@ -398,7 +377,6 @@ func split(text []byte) [][]byte { return lines } - func isBlank(s []byte) bool { for _, b := range s { if b > ' ' { @@ -408,7 +386,6 @@ func isBlank(s []byte) bool { return true } - func commonPrefix(a, b []byte) []byte { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { @@ -417,7 +394,6 @@ func commonPrefix(a, b []byte) []byte { return a[0:i] } - func stripCommonPrefix(lines [][]byte) { if len(lines) < 2 { return // at most one line - nothing to do @@ -545,7 +521,6 @@ func stripCommonPrefix(lines [][]byte) { } } - func (p *printer) writeComment(comment *ast.Comment) { text := comment.Text @@ -575,7 +550,6 @@ func (p *printer) writeComment(comment *ast.Comment) { } } - // writeCommentSuffix writes a line break after a comment if indicated // and processes any leftover indentation information. If a line break // is needed, the kind of break (newline vs formfeed) depends on the @@ -613,7 +587,6 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) { return } - // intersperseComments consumes all comments that appear before the next token // tok and prints it together with the buffered whitespace (i.e., the whitespace // that needs to be written before the next token). A heuristic is used to mix @@ -651,7 +624,6 @@ func (p *printer) intersperseComments(next token.Position, tok token.Token) (dro return false } - // whiteWhitespace writes the first n whitespace entries. func (p *printer) writeWhitespace(n int) { // write entries @@ -701,7 +673,6 @@ func (p *printer) writeWhitespace(n int) { p.wsbuf = p.wsbuf[0:i] } - // ---------------------------------------------------------------------------- // Printing interface @@ -724,7 +695,6 @@ func mayCombine(prev token.Token, next byte) (b bool) { return } - // print prints a list of "items" (roughly corresponding to syntactic // tokens, but also including whitespace and formatting information). // It is the only print function that should be called directly from @@ -812,7 +782,6 @@ func (p *printer) print(args ...interface{}) { } } - // commentBefore returns true iff the current comment occurs // before the next position in the source code. // @@ -820,7 +789,6 @@ func (p *printer) commentBefore(next token.Position) bool { return p.cindex < len(p.comments) && p.fset.Position(p.comments[p.cindex].List[0].Pos()).Offset < next.Offset } - // Flush prints any pending comments and whitespace occurring // textually before the position of the next token tok. Flush // returns true if a pending formfeed character was dropped @@ -838,7 +806,6 @@ func (p *printer) flush(next token.Position, tok token.Token) (droppedFF bool) { return } - // ---------------------------------------------------------------------------- // Trimmer @@ -854,7 +821,6 @@ type trimmer struct { space bytes.Buffer } - // trimmer is implemented as a state machine. // It can be in one of the following states: const ( @@ -863,7 +829,6 @@ const ( inText // inside text ) - // Design note: It is tempting to eliminate extra blanks occurring in // whitespace in this function as it could simplify some // of the blanks logic in the node printing functions. @@ -941,7 +906,6 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { return } - // ---------------------------------------------------------------------------- // Public interface @@ -952,14 +916,12 @@ const ( UseSpaces // use spaces instead of tabs for alignment ) - // A Config node controls the output of Fprint. type Config struct { Mode uint // default: 0 Tabwidth int // default: 8 } - // fprint implements Fprint and takes a nodesSizes map for setting up the printer state. func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) { // redirect output through a trimmer to eliminate trailing whitespace @@ -994,11 +956,9 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{ go func() { switch n := node.(type) { case ast.Expr: - p.nesting = 1 p.useNodeComments = true p.expr(n, ignoreMultiLine) case ast.Stmt: - p.nesting = 1 p.useNodeComments = true // A labeled statement will un-indent to position the // label. Set indent to 1 so we don't get indent "underflow". @@ -1007,15 +967,12 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{ } p.stmt(n, false, ignoreMultiLine) case ast.Decl: - p.nesting = 1 p.useNodeComments = true p.decl(n, ignoreMultiLine) case ast.Spec: - p.nesting = 1 p.useNodeComments = true p.spec(n, 1, false, ignoreMultiLine) case *ast.File: - p.nesting = 0 p.comments = n.Comments p.useNodeComments = n.Comments == nil p.file(n) @@ -1036,7 +993,6 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{ return p.written, err } - // Fprint "pretty-prints" an AST node to output and returns the number // of bytes written and an error (if any) for a given configuration cfg. // Position information is interpreted relative to the file set fset. @@ -1047,7 +1003,6 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{ return cfg.fprint(output, fset, node, make(map[ast.Node]int)) } - // Fprint "pretty-prints" an AST node to output. // It calls Config.Fprint with default settings. // diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go index 090f92af1..ff2d906b5 100644 --- a/src/pkg/go/printer/printer_test.go +++ b/src/pkg/go/printer/printer_test.go @@ -16,19 +16,15 @@ import ( "time" ) - const ( dataDir = "testdata" tabwidth = 8 ) - var update = flag.Bool("update", false, "update golden files") - var fset = token.NewFileSet() - func lineString(text []byte, i int) string { i0 := i for i < len(text) && text[i] != '\n' { @@ -37,7 +33,6 @@ func lineString(text []byte, i int) string { return string(text[i0:i]) } - type checkMode uint const ( @@ -45,7 +40,6 @@ const ( rawFormat ) - func runcheck(t *testing.T, source, golden string, mode checkMode) { // parse source prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments) @@ -109,7 +103,6 @@ func runcheck(t *testing.T, source, golden string, mode checkMode) { } } - func check(t *testing.T, source, golden string, mode checkMode) { // start a timer to produce a time-out signal tc := make(chan int) @@ -135,7 +128,6 @@ func check(t *testing.T, source, golden string, mode checkMode) { } } - type entry struct { source, golden string mode checkMode @@ -154,7 +146,6 @@ var data = []entry{ {"slow.input", "slow.golden", 0}, } - func TestFiles(t *testing.T) { for i, e := range data { source := filepath.Join(dataDir, e.source) @@ -168,7 +159,6 @@ func TestFiles(t *testing.T) { } } - // TestLineComments, using a simple test case, checks that consequtive line // comments are properly terminated with a newline even if the AST position // information is incorrect. diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden index b177c3571..7b332252c 100644 --- a/src/pkg/go/printer/testdata/comments.golden +++ b/src/pkg/go/printer/testdata/comments.golden @@ -106,7 +106,6 @@ type S3 struct { var x int // x var () - // This comment SHOULD be associated with the next declaration. func f0() { const pi = 3.14 // pi @@ -128,12 +127,10 @@ func f1() { f0() } - func _() { // this comment should be properly indented } - func _(x int) int { if x < 0 { // the tab printed before this comment's // must not affect the remaining lines return -x // this statement should be properly indented @@ -144,7 +141,6 @@ func _(x int) int { return x } - func typeswitch(x interface{}) { switch v := x.(type) { case bool, int, float: @@ -211,7 +207,6 @@ func _() { aligned line */ } - func _() { /* freestanding comment @@ -292,7 +287,6 @@ func _() { aligned line */ } - func _() { /* freestanding comment @@ -409,7 +403,6 @@ func _() { */ } - // Some interesting interspersed comments func _( /* this */ x /* is */ /* an */ int) { } @@ -434,7 +427,6 @@ func _() { _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */ } } - // Comments immediately adjacent to punctuation (for which the go/printer // may only have estimated position information) must remain after the punctuation. func _() { @@ -466,7 +458,6 @@ func _() { } } - // Line comments with tabs func _() { var finput *bufio.Reader // input file @@ -479,5 +470,4 @@ func _() { var lflag bool // -l - disable line directives } - /* This comment is the last entry in this file. It must be printed and should be followed by a newline */ diff --git a/src/pkg/go/printer/testdata/comments.x b/src/pkg/go/printer/testdata/comments.x index 30a182f49..ae7729286 100644 --- a/src/pkg/go/printer/testdata/comments.x +++ b/src/pkg/go/printer/testdata/comments.x @@ -2,7 +2,6 @@ // package main - // The SZ struct; it is empty. type SZ struct{} diff --git a/src/pkg/go/printer/testdata/declarations.golden b/src/pkg/go/printer/testdata/declarations.golden index fac72f651..970533e8c 100644 --- a/src/pkg/go/printer/testdata/declarations.golden +++ b/src/pkg/go/printer/testdata/declarations.golden @@ -44,7 +44,6 @@ import _ "os" import _ "os" import _ "os" - import _ "fmt" import _ "fmt" import _ "fmt" @@ -116,7 +115,6 @@ import _ "io" var _ int - // printing of constant literals const ( _ = "foobar" @@ -158,7 +156,6 @@ const ( bar` ) - func _() { type _ int type _ *int @@ -203,7 +200,6 @@ func _() { var _ func() interface{} } - // don't lose blank lines in grouped declarations const ( _ int = 0 @@ -220,7 +216,6 @@ const ( _ ) - type ( _ int _ struct{} @@ -231,7 +226,6 @@ type ( _ map[string]int ) - var ( _ int = 0 _ float = 1 @@ -244,7 +238,6 @@ var ( _ bool ) - // don't lose blank lines in this struct type _ struct { String struct { @@ -293,7 +286,6 @@ type _ struct { } } - // no tabs for single or ungrouped decls func _() { const xxxxxx = 0 @@ -426,7 +418,6 @@ var ( filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially ) - // formatting of structs type _ struct{} @@ -494,14 +485,12 @@ type _ struct { r, s float // this line should be indented } - // difficult cases type _ struct { bool // comment text []byte // comment } - // formatting of interfaces type EI interface{} @@ -527,7 +516,6 @@ type _ interface { // this comment must not change indentation gggggggggggg(x, y, z int) // hurray } - // formatting of variable declarations func _() { type day struct { @@ -545,7 +533,6 @@ func _() { ) } - // formatting of multi-line variable declarations var a1, b1, c1 int // all on one line @@ -558,7 +545,6 @@ var ( a4, b4, c4 int // this line should be indented ) - func _() { var privateKey2 = &Block{Type: "RSA PRIVATE KEY", Headers: map[string]string{}, @@ -570,7 +556,6 @@ func _() { } } - func _() { var Universe = Scope{ Names: map[string]*Ident{ @@ -614,7 +599,6 @@ func _() { } } - // alignment of map composite entries var _ = map[int]int{ // small key sizes: always align even if size ratios are large @@ -638,21 +622,18 @@ var _ = map[int]int{ abcde: a, // align with previous line } - func _() { var _ = T{ a, // must introduce trailing comma } } - // formatting of function results func _() func() {} func _() func(int) { return nil } func _() func(int) int { return nil } func _() func(int) func(int) func() { return nil } - // formatting of consecutive single-line functions func _() {} func _() {} @@ -680,7 +661,6 @@ func _() int { return x } - // making function declarations safe for new semicolon rules func _() { /* multi-line func because of comment */ } @@ -689,7 +669,6 @@ func _() { /* multi-line func because block is on multiple lines */ } - // ellipsis parameters func _(...int) func _(...*int) @@ -711,7 +690,6 @@ func _(x ...func(...int)) func _(x ...map[string]int) func _(x ...chan int) - // these parameter lists must remain multi-line since they are multi-line in the source func _(bool, int) { diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden index a5e2fdc3b..d0cf24ad6 100644 --- a/src/pkg/go/printer/testdata/expressions.golden +++ b/src/pkg/go/printer/testdata/expressions.golden @@ -17,7 +17,6 @@ var ( p *int ) - func _() { // no spaces around simple or parenthesized expressions _ = (a + 0) @@ -115,7 +114,6 @@ func _() { x < y || z > 42 } - func _() { _ = a + b _ = a + b + c @@ -187,7 +185,6 @@ func _() { _ = token(matchType + xlength<<lengthShift + xoffset) } - func f(x int, args ...int) { f(0, args...) f(1, args) @@ -226,7 +223,6 @@ func f(x int, args ...int) { _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x) } - func _() { _ = T{} _ = struct{}{} @@ -236,7 +232,6 @@ func _() { _ = map[int]T{} } - // one-line structs/interfaces in composite literals (up to a threshold) func _() { _ = struct{}{} @@ -246,7 +241,6 @@ func _() { _ = struct{ s struct{ int } }{struct{ int }{0}} } - func _() { // do not modify literals _ = "tab1 tab2 tab3 end" // string contains 3 tabs @@ -261,7 +255,6 @@ func _() { they must not be removed` } - func _() { // smart handling of indentation for multi-line raw strings var _ = `` @@ -332,7 +325,6 @@ bar` } } - func _() { // one-line function literals (body is on a single line) _ = func() {} @@ -361,7 +353,6 @@ func _() { }) } - func _() { _ = [][]int{ []int{1}, @@ -381,7 +372,6 @@ func _() { _ = [][]int{{1}, {1, 2}, {1, 2, 3}} } - // various multi-line expressions func _() { // do not add extra indentation to multi-line string lists @@ -397,25 +387,21 @@ func _() { } } - const _ = F1 + `string = "%s";` + `ptr = *;` + `datafmt.T2 = s ["-" p "-"];` - const _ = `datafmt "datafmt";` + `default = "%v";` + `array = *;` + `datafmt.T3 = s {" " a a / ","};` - const _ = `datafmt "datafmt";` + `default = "%v";` + `array = *;` + `datafmt.T3 = s {" " a a / ","};` - func _() { _ = F1 + `string = "%s";` + @@ -434,7 +420,6 @@ func _() { `datafmt.T3 = s {" " a a / ","};` } - func _() { // respect source lines in multi-line expressions _ = a + @@ -448,7 +433,6 @@ func _() { _ = "170141183460469231731687303715884105727" // prime } - // Alignment after overlong lines const ( _ = "991" @@ -459,7 +443,6 @@ const ( _ = "170141183460469231731687303715884105727" // prime ) - // Correct placement of operators and comments in multi-line expressions func _() { _ = a + // comment @@ -471,7 +454,6 @@ func _() { _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required" } - // Correct placement of terminating comma/closing parentheses in multi-line calls. func _() { f(1, @@ -497,7 +479,6 @@ func _() { ) } - // Align comments in multi-line lists of single-line expressions. var txpix = [NCOL]draw.Color{ draw.Yellow, // yellow @@ -512,7 +493,6 @@ var txpix = [NCOL]draw.Color{ draw.Color(0xBB005DFF), /* maroon */ } - func same(t, u *Time) bool { // respect source lines in multi-line expressions return t.Year == u.Year && @@ -526,7 +506,6 @@ func same(t, u *Time) bool { t.Zone == u.Zone } - func (p *parser) charClass() { // respect source lines in multi-line expressions if cc.negate && len(cc.ranges) == 2 && @@ -536,7 +515,6 @@ func (p *parser) charClass() { } } - func addState(s []state, inst instr, match []int) { // handle comments correctly in multi-line expressions for i := 0; i < l; i++ { @@ -639,7 +617,6 @@ func _() { c } - // Don't introduce extra newlines in strangely formatted expression lists. func f() { // os.Open parameters should remain on two lines diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw index 308d9edff..d7819a3ba 100644 --- a/src/pkg/go/printer/testdata/expressions.raw +++ b/src/pkg/go/printer/testdata/expressions.raw @@ -17,7 +17,6 @@ var ( p *int ) - func _() { // no spaces around simple or parenthesized expressions _ = (a + 0) @@ -115,7 +114,6 @@ func _() { x < y || z > 42 } - func _() { _ = a + b _ = a + b + c @@ -187,7 +185,6 @@ func _() { _ = token(matchType + xlength<<lengthShift + xoffset) } - func f(x int, args ...int) { f(0, args...) f(1, args) @@ -226,7 +223,6 @@ func f(x int, args ...int) { _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x) } - func _() { _ = T{} _ = struct{}{} @@ -236,7 +232,6 @@ func _() { _ = map[int]T{} } - // one-line structs/interfaces in composite literals (up to a threshold) func _() { _ = struct{}{} @@ -246,7 +241,6 @@ func _() { _ = struct{ s struct{ int } }{struct{ int }{0}} } - func _() { // do not modify literals _ = "tab1 tab2 tab3 end" // string contains 3 tabs @@ -261,7 +255,6 @@ func _() { they must not be removed` } - func _() { // smart handling of indentation for multi-line raw strings var _ = `` @@ -332,7 +325,6 @@ bar` } } - func _() { // one-line function literals (body is on a single line) _ = func() {} @@ -361,7 +353,6 @@ func _() { }) } - func _() { _ = [][]int{ []int{1}, @@ -381,7 +372,6 @@ func _() { _ = [][]int{{1}, {1, 2}, {1, 2, 3}} } - // various multi-line expressions func _() { // do not add extra indentation to multi-line string lists @@ -397,25 +387,21 @@ func _() { } } - const _ = F1 + `string = "%s";` + `ptr = *;` + `datafmt.T2 = s ["-" p "-"];` - const _ = `datafmt "datafmt";` + `default = "%v";` + `array = *;` + `datafmt.T3 = s {" " a a / ","};` - const _ = `datafmt "datafmt";` + `default = "%v";` + `array = *;` + `datafmt.T3 = s {" " a a / ","};` - func _() { _ = F1 + `string = "%s";` + @@ -434,7 +420,6 @@ func _() { `datafmt.T3 = s {" " a a / ","};` } - func _() { // respect source lines in multi-line expressions _ = a + @@ -448,7 +433,6 @@ func _() { _ = "170141183460469231731687303715884105727" // prime } - // Alignment after overlong lines const ( _ = "991" @@ -459,7 +443,6 @@ const ( _ = "170141183460469231731687303715884105727" // prime ) - // Correct placement of operators and comments in multi-line expressions func _() { _ = a + // comment @@ -471,7 +454,6 @@ func _() { _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required" } - // Correct placement of terminating comma/closing parentheses in multi-line calls. func _() { f(1, @@ -497,7 +479,6 @@ func _() { ) } - // Align comments in multi-line lists of single-line expressions. var txpix = [NCOL]draw.Color{ draw.Yellow, // yellow @@ -512,7 +493,6 @@ var txpix = [NCOL]draw.Color{ draw.Color(0xBB005DFF), /* maroon */ } - func same(t, u *Time) bool { // respect source lines in multi-line expressions return t.Year == u.Year && @@ -526,7 +506,6 @@ func same(t, u *Time) bool { t.Zone == u.Zone } - func (p *parser) charClass() { // respect source lines in multi-line expressions if cc.negate && len(cc.ranges) == 2 && @@ -536,7 +515,6 @@ func (p *parser) charClass() { } } - func addState(s []state, inst instr, match []int) { // handle comments correctly in multi-line expressions for i := 0; i < l; i++ { @@ -639,7 +617,6 @@ func _() { c } - // Don't introduce extra newlines in strangely formatted expression lists. func f() { // os.Open parameters should remain on two lines diff --git a/src/pkg/go/printer/testdata/parser.go b/src/pkg/go/printer/testdata/parser.go index 5c57e41d1..2d27af499 100644 --- a/src/pkg/go/printer/testdata/parser.go +++ b/src/pkg/go/printer/testdata/parser.go @@ -16,7 +16,6 @@ import ( "go/token" ) - // The mode parameter to the Parse* functions is a set of flags (or 0). // They control the amount of source code parsed and other optional // parser functionality. @@ -29,7 +28,6 @@ const ( DeclarationErrors // report declaration errors ) - // The parser structure holds the parser's internal state. type parser struct { file *token.File @@ -66,7 +64,6 @@ type parser struct { targetStack [][]*ast.Ident // stack of unresolved labels } - // scannerMode returns the scanner mode bits given the parser's mode bits. func scannerMode(mode uint) uint { var m uint = scanner.InsertSemis @@ -76,7 +73,6 @@ func scannerMode(mode uint) uint { return m } - func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) { p.file = fset.AddFile(filename, fset.Base(), len(src)) p.scanner.Init(p.file, src, p, scannerMode(mode)) @@ -95,7 +91,6 @@ func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uin p.openLabelScope() } - // ---------------------------------------------------------------------------- // Scoping support @@ -103,18 +98,15 @@ func (p *parser) openScope() { p.topScope = ast.NewScope(p.topScope) } - func (p *parser) closeScope() { p.topScope = p.topScope.Outer } - func (p *parser) openLabelScope() { p.labelScope = ast.NewScope(p.labelScope) p.targetStack = append(p.targetStack, nil) } - func (p *parser) closeLabelScope() { // resolve labels n := len(p.targetStack) - 1 @@ -130,7 +122,6 @@ func (p *parser) closeLabelScope() { p.labelScope = p.labelScope.Outer } - func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) { for _, ident := range idents { assert(ident.Obj == nil, "identifier already declared or resolved") @@ -151,7 +142,6 @@ func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, i } } - func (p *parser) shortVarDecl(idents []*ast.Ident) { // Go spec: A short variable declaration may redeclare variables // provided they were originally declared in the same block with @@ -177,13 +167,11 @@ func (p *parser) shortVarDecl(idents []*ast.Ident) { } } - // The unresolved object is a sentinel to mark identifiers that have been added // to the list of unresolved identifiers. The sentinel is only used for verifying // internal consistency. var unresolved = new(ast.Object) - func (p *parser) resolve(x ast.Expr) { // nothing to do if x is not an identifier or the blank identifier ident, _ := x.(*ast.Ident) @@ -209,7 +197,6 @@ func (p *parser) resolve(x ast.Expr) { p.unresolved = append(p.unresolved, ident) } - // ---------------------------------------------------------------------------- // Parsing support @@ -227,21 +214,18 @@ func (p *parser) printTrace(a ...interface{}) { fmt.Println(a...) } - func trace(p *parser, msg string) *parser { p.printTrace(msg, "(") p.indent++ return p } - // Usage pattern: defer un(trace(p, "...")); func un(p *parser) { p.indent-- p.printTrace(")") } - // Advance to the next token. func (p *parser) next0() { // Because of one-token look-ahead, print the previous token @@ -283,7 +267,6 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) { return } - // Consume a group of adjacent comments, add it to the parser's // comments list, and return it together with the line at which // the last comment in the group ends. An empty line or non-comment @@ -305,7 +288,6 @@ func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) return } - // Advance to the next non-comment token. In the process, collect // any comment groups encountered, and remember the last lead and // and line comments. @@ -356,12 +338,10 @@ func (p *parser) next() { } } - func (p *parser) error(pos token.Pos, msg string) { p.Error(p.file.Position(pos), msg) } - func (p *parser) errorExpected(pos token.Pos, msg string) { msg = "expected " + msg if pos == p.pos { @@ -379,7 +359,6 @@ func (p *parser) errorExpected(pos token.Pos, msg string) { p.error(pos, msg) } - func (p *parser) expect(tok token.Token) token.Pos { pos := p.pos if p.tok != tok { @@ -389,21 +368,18 @@ func (p *parser) expect(tok token.Token) token.Pos { return pos } - func (p *parser) expectSemi() { if p.tok != token.RPAREN && p.tok != token.RBRACE { p.expect(token.SEMICOLON) } } - func assert(cond bool, msg string) { if !cond { panic("go/parser internal error: " + msg) } } - // ---------------------------------------------------------------------------- // Identifiers @@ -419,7 +395,6 @@ func (p *parser) parseIdent() *ast.Ident { return &ast.Ident{pos, name, nil} } - func (p *parser) parseIdentList() (list []*ast.Ident) { if p.trace { defer un(trace(p, "IdentList")) @@ -434,7 +409,6 @@ func (p *parser) parseIdentList() (list []*ast.Ident) { return } - // ---------------------------------------------------------------------------- // Common productions @@ -453,7 +427,6 @@ func (p *parser) parseExprList(lhs bool) (list []ast.Expr) { return } - func (p *parser) parseLhsList() []ast.Expr { list := p.parseExprList(true) switch p.tok { @@ -477,12 +450,10 @@ func (p *parser) parseLhsList() []ast.Expr { return list } - func (p *parser) parseRhsList() []ast.Expr { return p.parseExprList(false) } - // ---------------------------------------------------------------------------- // Types @@ -503,7 +474,6 @@ func (p *parser) parseType() ast.Expr { return typ } - // If the result is an identifier, it is not resolved. func (p *parser) parseTypeName() ast.Expr { if p.trace { @@ -524,7 +494,6 @@ func (p *parser) parseTypeName() ast.Expr { return ident } - func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { if p.trace { defer un(trace(p, "ArrayType")) @@ -544,7 +513,6 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { return &ast.ArrayType{lbrack, len, elt} } - func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident { idents := make([]*ast.Ident, len(list)) for i, x := range list { @@ -559,7 +527,6 @@ func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident { return idents } - func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field { if p.trace { defer un(trace(p, "FieldDecl")) @@ -601,7 +568,6 @@ func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field { return field } - func (p *parser) parseStructType() *ast.StructType { if p.trace { defer un(trace(p, "StructType")) @@ -623,7 +589,6 @@ func (p *parser) parseStructType() *ast.StructType { return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false} } - func (p *parser) parsePointerType() *ast.StarExpr { if p.trace { defer un(trace(p, "PointerType")) @@ -635,7 +600,6 @@ func (p *parser) parsePointerType() *ast.StarExpr { return &ast.StarExpr{star, base} } - func (p *parser) tryVarType(isParam bool) ast.Expr { if isParam && p.tok == token.ELLIPSIS { pos := p.pos @@ -653,7 +617,6 @@ func (p *parser) tryVarType(isParam bool) ast.Expr { return p.tryIdentOrType(false) } - func (p *parser) parseVarType(isParam bool) ast.Expr { typ := p.tryVarType(isParam) if typ == nil { @@ -665,7 +628,6 @@ func (p *parser) parseVarType(isParam bool) ast.Expr { return typ } - func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) { if p.trace { defer un(trace(p, "VarList")) @@ -693,7 +655,6 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) { return } - func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) { if p.trace { defer un(trace(p, "ParameterList")) @@ -738,7 +699,6 @@ func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params [ return } - func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList { if p.trace { defer un(trace(p, "Parameters")) @@ -754,7 +714,6 @@ func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldLi return &ast.FieldList{lparen, params, rparen} } - func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Result")) @@ -774,7 +733,6 @@ func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList { return nil } - func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) { if p.trace { defer un(trace(p, "Signature")) @@ -786,7 +744,6 @@ func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldLis return } - func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) { if p.trace { defer un(trace(p, "FuncType")) @@ -799,7 +756,6 @@ func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) { return &ast.FuncType{pos, params, results}, scope } - func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field { if p.trace { defer un(trace(p, "MethodSpec")) @@ -827,7 +783,6 @@ func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field { return spec } - func (p *parser) parseInterfaceType() *ast.InterfaceType { if p.trace { defer un(trace(p, "InterfaceType")) @@ -846,7 +801,6 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType { return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false} } - func (p *parser) parseMapType() *ast.MapType { if p.trace { defer un(trace(p, "MapType")) @@ -861,7 +815,6 @@ func (p *parser) parseMapType() *ast.MapType { return &ast.MapType{pos, key, value} } - func (p *parser) parseChanType() *ast.ChanType { if p.trace { defer un(trace(p, "ChanType")) @@ -885,7 +838,6 @@ func (p *parser) parseChanType() *ast.ChanType { return &ast.ChanType{pos, dir, value} } - // If the result is an identifier, it is not resolved. func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr { switch p.tok { @@ -918,7 +870,6 @@ func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr { return nil } - func (p *parser) tryType() ast.Expr { typ := p.tryIdentOrType(false) if typ != nil { @@ -927,7 +878,6 @@ func (p *parser) tryType() ast.Expr { return typ } - // ---------------------------------------------------------------------------- // Blocks @@ -943,7 +893,6 @@ func (p *parser) parseStmtList() (list []ast.Stmt) { return } - func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt { if p.trace { defer un(trace(p, "Body")) @@ -960,7 +909,6 @@ func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt { return &ast.BlockStmt{lbrace, list, rbrace} } - func (p *parser) parseBlockStmt() *ast.BlockStmt { if p.trace { defer un(trace(p, "BlockStmt")) @@ -975,7 +923,6 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt { return &ast.BlockStmt{lbrace, list, rbrace} } - // ---------------------------------------------------------------------------- // Expressions @@ -997,7 +944,6 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr { return &ast.FuncLit{typ, body} } - // parseOperand may return an expression or a raw type (incl. array // types of the form [...]T. Callers must verify the result. // If lhs is set and the result is an identifier, it is not resolved. @@ -1047,7 +993,6 @@ func (p *parser) parseOperand(lhs bool) ast.Expr { return &ast.BadExpr{pos, p.pos} } - func (p *parser) parseSelector(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "Selector")) @@ -1058,7 +1003,6 @@ func (p *parser) parseSelector(x ast.Expr) ast.Expr { return &ast.SelectorExpr{x, sel} } - func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "TypeAssertion")) @@ -1077,7 +1021,6 @@ func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr { return &ast.TypeAssertExpr{x, typ} } - func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "IndexOrSlice")) @@ -1106,7 +1049,6 @@ func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr { return &ast.IndexExpr{x, lbrack, low, rbrack} } - func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { if p.trace { defer un(trace(p, "CallOrConversion")) @@ -1133,7 +1075,6 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { return &ast.CallExpr{fun, lparen, list, ellipsis, rparen} } - func (p *parser) parseElement(keyOk bool) ast.Expr { if p.trace { defer un(trace(p, "Element")) @@ -1156,7 +1097,6 @@ func (p *parser) parseElement(keyOk bool) ast.Expr { return x } - func (p *parser) parseElementList() (list []ast.Expr) { if p.trace { defer un(trace(p, "ElementList")) @@ -1173,7 +1113,6 @@ func (p *parser) parseElementList() (list []ast.Expr) { return } - func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { if p.trace { defer un(trace(p, "LiteralValue")) @@ -1190,7 +1129,6 @@ func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr { return &ast.CompositeLit{typ, lbrace, elts, rbrace} } - // checkExpr checks that x is an expression (and not a type). func (p *parser) checkExpr(x ast.Expr) ast.Expr { switch t := unparen(x).(type) { @@ -1227,7 +1165,6 @@ func (p *parser) checkExpr(x ast.Expr) ast.Expr { return x } - // isTypeName returns true iff x is a (qualified) TypeName. func isTypeName(x ast.Expr) bool { switch t := x.(type) { @@ -1242,7 +1179,6 @@ func isTypeName(x ast.Expr) bool { return true } - // isLiteralType returns true iff x is a legal composite literal type. func isLiteralType(x ast.Expr) bool { switch t := x.(type) { @@ -1260,7 +1196,6 @@ func isLiteralType(x ast.Expr) bool { return true } - // If x is of the form *T, deref returns T, otherwise it returns x. func deref(x ast.Expr) ast.Expr { if p, isPtr := x.(*ast.StarExpr); isPtr { @@ -1269,7 +1204,6 @@ func deref(x ast.Expr) ast.Expr { return x } - // If x is of the form (T), unparen returns unparen(T), otherwise it returns x. func unparen(x ast.Expr) ast.Expr { if p, isParen := x.(*ast.ParenExpr); isParen { @@ -1278,7 +1212,6 @@ func unparen(x ast.Expr) ast.Expr { return x } - // checkExprOrType checks that x is an expression or a type // (and not a raw type such as [...]T). // @@ -1303,7 +1236,6 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr { return x } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr { if p.trace { @@ -1358,7 +1290,6 @@ L: return x } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parseUnaryExpr(lhs bool) ast.Expr { if p.trace { @@ -1396,7 +1327,6 @@ func (p *parser) parseUnaryExpr(lhs bool) ast.Expr { return p.parsePrimaryExpr(lhs) } - // If lhs is set and the result is an identifier, it is not resolved. func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr { if p.trace { @@ -1420,7 +1350,6 @@ func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr { return x } - // If lhs is set and the result is an identifier, it is not resolved. // TODO(gri): parseExpr may return a type or even a raw type ([..]int) - // should reject when a type/raw type is obviously not allowed @@ -1432,12 +1361,10 @@ func (p *parser) parseExpr(lhs bool) ast.Expr { return p.parseBinaryExpr(lhs, token.LowestPrec+1) } - func (p *parser) parseRhs() ast.Expr { return p.parseExpr(false) } - // ---------------------------------------------------------------------------- // Statements @@ -1500,7 +1427,6 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { return &ast.ExprStmt{x[0]} } - func (p *parser) parseCallExpr() *ast.CallExpr { x := p.parseRhs() if call, isCall := x.(*ast.CallExpr); isCall { @@ -1510,7 +1436,6 @@ func (p *parser) parseCallExpr() *ast.CallExpr { return nil } - func (p *parser) parseGoStmt() ast.Stmt { if p.trace { defer un(trace(p, "GoStmt")) @@ -1526,7 +1451,6 @@ func (p *parser) parseGoStmt() ast.Stmt { return &ast.GoStmt{pos, call} } - func (p *parser) parseDeferStmt() ast.Stmt { if p.trace { defer un(trace(p, "DeferStmt")) @@ -1542,7 +1466,6 @@ func (p *parser) parseDeferStmt() ast.Stmt { return &ast.DeferStmt{pos, call} } - func (p *parser) parseReturnStmt() *ast.ReturnStmt { if p.trace { defer un(trace(p, "ReturnStmt")) @@ -1559,7 +1482,6 @@ func (p *parser) parseReturnStmt() *ast.ReturnStmt { return &ast.ReturnStmt{pos, x} } - func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { if p.trace { defer un(trace(p, "BranchStmt")) @@ -1578,7 +1500,6 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { return &ast.BranchStmt{pos, tok, label} } - func (p *parser) makeExpr(s ast.Stmt) ast.Expr { if s == nil { return nil @@ -1590,7 +1511,6 @@ func (p *parser) makeExpr(s ast.Stmt) ast.Expr { return &ast.BadExpr{s.Pos(), s.End()} } - func (p *parser) parseIfStmt() *ast.IfStmt { if p.trace { defer un(trace(p, "IfStmt")) @@ -1633,7 +1553,6 @@ func (p *parser) parseIfStmt() *ast.IfStmt { return &ast.IfStmt{pos, s, x, body, else_} } - func (p *parser) parseTypeList() (list []ast.Expr) { if p.trace { defer un(trace(p, "TypeList")) @@ -1648,7 +1567,6 @@ func (p *parser) parseTypeList() (list []ast.Expr) { return } - func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause { if p.trace { defer un(trace(p, "CaseClause")) @@ -1675,7 +1593,6 @@ func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause { return &ast.CaseClause{pos, list, colon, body} } - func isExprSwitch(s ast.Stmt) bool { if s == nil { return true @@ -1689,7 +1606,6 @@ func isExprSwitch(s ast.Stmt) bool { return false } - func (p *parser) parseSwitchStmt() ast.Stmt { if p.trace { defer un(trace(p, "SwitchStmt")) @@ -1735,7 +1651,6 @@ func (p *parser) parseSwitchStmt() ast.Stmt { return &ast.TypeSwitchStmt{pos, s1, s2, body} } - func (p *parser) parseCommClause() *ast.CommClause { if p.trace { defer un(trace(p, "CommClause")) @@ -1801,7 +1716,6 @@ func (p *parser) parseCommClause() *ast.CommClause { return &ast.CommClause{pos, comm, colon, body} } - func (p *parser) parseSelectStmt() *ast.SelectStmt { if p.trace { defer un(trace(p, "SelectStmt")) @@ -1820,7 +1734,6 @@ func (p *parser) parseSelectStmt() *ast.SelectStmt { return &ast.SelectStmt{pos, body} } - func (p *parser) parseForStmt() ast.Stmt { if p.trace { defer un(trace(p, "ForStmt")) @@ -1890,7 +1803,6 @@ func (p *parser) parseForStmt() ast.Stmt { return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body} } - func (p *parser) parseStmt() (s ast.Stmt) { if p.trace { defer un(trace(p, "Statement")) @@ -1947,13 +1859,11 @@ func (p *parser) parseStmt() (s ast.Stmt) { return } - // ---------------------------------------------------------------------------- // Declarations type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec - func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "ImportSpec")) @@ -1984,7 +1894,6 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec { if p.trace { defer un(trace(p, "ConstSpec")) @@ -2009,7 +1918,6 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec { return spec } - func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "TypeSpec")) @@ -2031,7 +1939,6 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "VarSpec")) @@ -2056,7 +1963,6 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { return spec } - func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl { if p.trace { defer un(trace(p, "GenDecl("+keyword.String()+")")) @@ -2081,7 +1987,6 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen} } - func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Receiver")) @@ -2109,7 +2014,6 @@ func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { return par } - func (p *parser) parseFuncDecl() *ast.FuncDecl { if p.trace { defer un(trace(p, "FunctionDecl")) @@ -2150,7 +2054,6 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl { return decl } - func (p *parser) parseDecl() ast.Decl { if p.trace { defer un(trace(p, "Declaration")) @@ -2181,7 +2084,6 @@ func (p *parser) parseDecl() ast.Decl { return p.parseGenDecl(p.tok, f) } - func (p *parser) parseDeclList() (list []ast.Decl) { if p.trace { defer un(trace(p, "DeclList")) @@ -2194,7 +2096,6 @@ func (p *parser) parseDeclList() (list []ast.Decl) { return } - // ---------------------------------------------------------------------------- // Source files diff --git a/src/pkg/go/printer/testdata/statements.golden b/src/pkg/go/printer/testdata/statements.golden index 0e4840441..a6d85107f 100644 --- a/src/pkg/go/printer/testdata/statements.golden +++ b/src/pkg/go/printer/testdata/statements.golden @@ -30,7 +30,6 @@ func _() { } } - // Formatting of switch-statement headers. func _() { switch { @@ -56,7 +55,6 @@ func _() { } } - // Formatting of switch statement bodies. func _() { switch { @@ -110,7 +108,6 @@ func _() { } } - // Formatting of selected select statements. func _() { select {} @@ -125,7 +122,6 @@ func _() { } } - // Formatting of for-statement headers. func _() { for { @@ -164,7 +160,6 @@ func _() { } // no parens printed } - // Don't remove mandatory parentheses around composite literals in control clauses. func _() { // strip parentheses - no composite literals or composite literals don't start with a type name @@ -258,7 +253,6 @@ func _() { } } - // Extra empty lines inside functions. Do respect source code line // breaks between statement boundaries but print at most one empty // line at a time. @@ -291,19 +285,16 @@ func _() { } } - // Formatting around labels. func _() { L: } - func _() { // this comment should be indented L: // no semicolon needed } - func _() { switch 0 { case 0: @@ -317,7 +308,6 @@ func _() { } } - func _() { f() L1: @@ -327,26 +317,22 @@ L2: L3: } - func _() { // this comment should be indented L: } - func _() { L: _ = 0 } - func _() { // this comment should be indented L: _ = 0 } - func _() { for { L1: @@ -356,7 +342,6 @@ func _() { } } - func _() { // this comment should be indented for { @@ -367,7 +352,6 @@ func _() { } } - func _() { if true { _ = 0 @@ -385,7 +369,6 @@ L: _ = 0 } - func _() { for { goto L @@ -395,7 +378,6 @@ L: MoreCode() } - func _() { for { goto L @@ -408,7 +390,6 @@ L: // A comment on the same line as the label, followed by a single empty line. MoreCode() } - func _() { for { goto L @@ -419,7 +400,6 @@ L: MoreCode() } - func _() { for { goto AVeryLongLabelThatShouldNotAffectFormatting diff --git a/src/pkg/go/scanner/errors.go b/src/pkg/go/scanner/errors.go index 47e35a710..a0927e416 100644 --- a/src/pkg/go/scanner/errors.go +++ b/src/pkg/go/scanner/errors.go @@ -5,7 +5,6 @@ package scanner import ( - "container/vector" "fmt" "go/token" "io" @@ -13,7 +12,6 @@ import ( "sort" ) - // An implementation of an ErrorHandler may be provided to the Scanner. // If a syntax error is encountered and a handler was installed, Error // is called with a position and an error message. The position points @@ -23,7 +21,6 @@ type ErrorHandler interface { Error(pos token.Position, msg string) } - // ErrorVector implements the ErrorHandler interface. It maintains a list // of errors which can be retrieved with GetErrorList and GetError. The // zero value for an ErrorVector is an empty ErrorVector ready to use. @@ -34,17 +31,14 @@ type ErrorHandler interface { // error handling is obtained. // type ErrorVector struct { - errors vector.Vector + errors []*Error } - // Reset resets an ErrorVector to no errors. -func (h *ErrorVector) Reset() { h.errors.Resize(0, 0) } - +func (h *ErrorVector) Reset() { h.errors = h.errors[:0] } // ErrorCount returns the number of errors collected. -func (h *ErrorVector) ErrorCount() int { return h.errors.Len() } - +func (h *ErrorVector) ErrorCount() int { return len(h.errors) } // Within ErrorVector, an error is represented by an Error node. The // position Pos, if valid, points to the beginning of the offending @@ -55,7 +49,6 @@ type Error struct { Msg string } - func (e *Error) String() string { if e.Pos.Filename != "" || e.Pos.IsValid() { // don't print "<unknown position>" @@ -65,16 +58,13 @@ func (e *Error) String() string { return e.Msg } - // An ErrorList is a (possibly sorted) list of Errors. type ErrorList []*Error - // ErrorList implements the sort Interface. func (p ErrorList) Len() int { return len(p) } func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - func (p ErrorList) Less(i, j int) bool { e := &p[i].Pos f := &p[j].Pos @@ -95,7 +85,6 @@ func (p ErrorList) Less(i, j int) bool { return false } - func (p ErrorList) String() string { switch len(p) { case 0: @@ -106,7 +95,6 @@ func (p ErrorList) String() string { return fmt.Sprintf("%s (and %d more errors)", p[0].String(), len(p)-1) } - // These constants control the construction of the ErrorList // returned by GetErrors. // @@ -116,20 +104,17 @@ const ( NoMultiples // sort error list and leave only the first error per line ) - // GetErrorList returns the list of errors collected by an ErrorVector. // The construction of the ErrorList returned is controlled by the mode // parameter. If there are no errors, the result is nil. // func (h *ErrorVector) GetErrorList(mode int) ErrorList { - if h.errors.Len() == 0 { + if len(h.errors) == 0 { return nil } - list := make(ErrorList, h.errors.Len()) - for i := 0; i < h.errors.Len(); i++ { - list[i] = h.errors.At(i).(*Error) - } + list := make(ErrorList, len(h.errors)) + copy(list, h.errors) if mode >= Sorted { sort.Sort(list) @@ -151,26 +136,23 @@ func (h *ErrorVector) GetErrorList(mode int) ErrorList { return list } - // GetError is like GetErrorList, but it returns an os.Error instead // so that a nil result can be assigned to an os.Error variable and // remains nil. // func (h *ErrorVector) GetError(mode int) os.Error { - if h.errors.Len() == 0 { + if len(h.errors) == 0 { return nil } return h.GetErrorList(mode) } - // ErrorVector implements the ErrorHandler interface. func (h *ErrorVector) Error(pos token.Position, msg string) { - h.errors.Push(&Error{pos, msg}) + h.errors = append(h.errors, &Error{pos, msg}) } - // PrintError is a utility function that prints a list of errors to w, // one error per line, if the err parameter is an ErrorList. Otherwise // it prints the err string. diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go index 795f0ac15..7f3dd2373 100644 --- a/src/pkg/go/scanner/scanner.go +++ b/src/pkg/go/scanner/scanner.go @@ -30,7 +30,6 @@ import ( "utf8" ) - // A Scanner holds the scanner's internal state while processing // a given text. It can be allocated as part of another data // structure but must be initialized via Init before use. @@ -54,7 +53,6 @@ type Scanner struct { ErrorCount int // number of errors encountered } - // Read the next Unicode char into S.ch. // S.ch < 0 means end-of-file. // @@ -88,7 +86,6 @@ func (S *Scanner) next() { } } - // The mode parameter to the Init function is a set of flags (or 0). // They control scanner behavior. // @@ -134,7 +131,6 @@ func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode uint S.next() } - func (S *Scanner) error(offs int, msg string) { if S.err != nil { S.err.Error(S.file.Position(S.file.Pos(offs)), msg) @@ -142,7 +138,6 @@ func (S *Scanner) error(offs int, msg string) { S.ErrorCount++ } - var prefix = []byte("//line ") func (S *Scanner) interpretLineComment(text []byte) { @@ -163,7 +158,6 @@ func (S *Scanner) interpretLineComment(text []byte) { } } - func (S *Scanner) scanComment() { // initial '/' already consumed; S.ch == '/' || S.ch == '*' offs := S.offset - 1 // position of initial '/' @@ -195,7 +189,6 @@ func (S *Scanner) scanComment() { S.error(offs, "comment not terminated") } - func (S *Scanner) findLineEnd() bool { // initial '/' already consumed @@ -240,17 +233,14 @@ func (S *Scanner) findLineEnd() bool { return false } - func isLetter(ch int) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } - func isDigit(ch int) bool { return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } - func (S *Scanner) scanIdentifier() token.Token { offs := S.offset for isLetter(S.ch) || isDigit(S.ch) { @@ -259,7 +249,6 @@ func (S *Scanner) scanIdentifier() token.Token { return token.Lookup(S.src[offs:S.offset]) } - func digitVal(ch int) int { switch { case '0' <= ch && ch <= '9': @@ -272,14 +261,12 @@ func digitVal(ch int) int { return 16 // larger than any legal digit val } - func (S *Scanner) scanMantissa(base int) { for digitVal(S.ch) < base { S.next() } } - func (S *Scanner) scanNumber(seenDecimalPoint bool) token.Token { // digitVal(S.ch) < 10 tok := token.INT @@ -351,7 +338,6 @@ exit: return tok } - func (S *Scanner) scanEscape(quote int) { offs := S.offset @@ -396,7 +382,6 @@ func (S *Scanner) scanEscape(quote int) { } } - func (S *Scanner) scanChar() { // '\'' opening already consumed offs := S.offset - 1 @@ -423,7 +408,6 @@ func (S *Scanner) scanChar() { } } - func (S *Scanner) scanString() { // '"' opening already consumed offs := S.offset - 1 @@ -443,7 +427,6 @@ func (S *Scanner) scanString() { S.next() } - func (S *Scanner) scanRawString() { // '`' opening already consumed offs := S.offset - 1 @@ -460,14 +443,12 @@ func (S *Scanner) scanRawString() { S.next() } - func (S *Scanner) skipWhitespace() { for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' && !S.insertSemi || S.ch == '\r' { S.next() } } - // Helper functions for scanning multi-byte tokens such as >> += >>= . // Different routines recognize different length tok_i based on matches // of ch_i. If a token ends in '=', the result is tok1 or tok3 @@ -482,7 +463,6 @@ func (S *Scanner) switch2(tok0, tok1 token.Token) token.Token { return tok0 } - func (S *Scanner) switch3(tok0, tok1 token.Token, ch2 int, tok2 token.Token) token.Token { if S.ch == '=' { S.next() @@ -495,7 +475,6 @@ func (S *Scanner) switch3(tok0, tok1 token.Token, ch2 int, tok2 token.Token) tok return tok0 } - func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Token) token.Token { if S.ch == '=' { S.next() @@ -512,7 +491,6 @@ func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Toke return tok0 } - // Scan scans the next token and returns the token position, // the token, and the literal string corresponding to the // token. The source end is indicated by token.EOF. diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go index c096e2725..eb9e1cb81 100644 --- a/src/pkg/go/scanner/scanner_test.go +++ b/src/pkg/go/scanner/scanner_test.go @@ -12,10 +12,8 @@ import ( "testing" ) - var fset = token.NewFileSet() - const /* class */ ( special = iota literal @@ -23,7 +21,6 @@ const /* class */ ( keyword ) - func tokenclass(tok token.Token) int { switch { case tok.IsLiteral(): @@ -36,14 +33,12 @@ func tokenclass(tok token.Token) int { return special } - type elt struct { tok token.Token lit string class int } - var tokens = [...]elt{ // Special tokens {token.COMMENT, "/* a comment */", special}, @@ -178,7 +173,6 @@ var tokens = [...]elt{ {token.VAR, "var", keyword}, } - const whitespace = " \t \n\n\n" // to separate tokens type testErrorHandler struct { @@ -189,7 +183,6 @@ func (h *testErrorHandler) Error(pos token.Position, msg string) { h.t.Errorf("Error() called (msg = %s)", msg) } - func newlineCount(s string) int { n := 0 for i := 0; i < len(s); i++ { @@ -200,7 +193,6 @@ func newlineCount(s string) int { return n } - func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) { pos := fset.Position(p) if pos.Filename != expected.Filename { @@ -217,7 +209,6 @@ func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) { } } - // Verify that calling Scan() provides the correct results. func TestScan(t *testing.T) { // make source @@ -271,7 +262,6 @@ func TestScan(t *testing.T) { } } - func checkSemi(t *testing.T, line string, mode uint) { var S Scanner file := fset.AddFile("TestSemis", fset.Base(), len(line)) @@ -305,7 +295,6 @@ func checkSemi(t *testing.T, line string, mode uint) { } } - var lines = []string{ // # indicates a semicolon present in the source // $ indicates an automatically inserted semicolon @@ -429,7 +418,6 @@ var lines = []string{ "package main$", } - func TestSemis(t *testing.T) { for _, line := range lines { checkSemi(t, line, AllowIllegalChars|InsertSemis) @@ -463,25 +451,31 @@ var segments = []segment{ {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored - {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42}, {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42}, {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100}, } +var unixsegments = []segment{ + {"\n//line /bar:42\n line42", "/bar", 42}, +} + var winsegments = []segment{ + {"\n//line c:\\bar:42\n line42", "c:\\bar", 42}, {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100}, } - // Verify that comments of the form "//line filename:line" are interpreted correctly. func TestLineComments(t *testing.T) { + segs := segments if runtime.GOOS == "windows" { - segments = append(segments, winsegments...) + segs = append(segs, winsegments...) + } else { + segs = append(segs, unixsegments...) } // make source var src string - for _, e := range segments { + for _, e := range segs { src += e.srcline } @@ -489,7 +483,7 @@ func TestLineComments(t *testing.T) { var S Scanner file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src)) S.Init(file, []byte(src), nil, 0) - for _, s := range segments { + for _, s := range segs { p, _, lit := S.Scan() pos := file.Position(p) checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.line, pos.Column}) @@ -500,7 +494,6 @@ func TestLineComments(t *testing.T) { } } - // Verify that initializing the same scanner more then once works correctly. func TestInit(t *testing.T) { var s Scanner @@ -536,7 +529,6 @@ func TestInit(t *testing.T) { } } - func TestIllegalChars(t *testing.T) { var s Scanner @@ -558,7 +550,6 @@ func TestIllegalChars(t *testing.T) { } } - func TestStdErrorHander(t *testing.T) { const src = "@\n" + // illegal character, cause an error "@ @\n" + // two errors on the same line @@ -601,21 +592,18 @@ func TestStdErrorHander(t *testing.T) { } } - type errorCollector struct { cnt int // number of errors encountered msg string // last error message encountered pos token.Position // last error position encountered } - func (h *errorCollector) Error(pos token.Position, msg string) { h.cnt++ h.msg = msg h.pos = pos } - func checkError(t *testing.T, src string, tok token.Token, pos int, err string) { var s Scanner var h errorCollector @@ -643,7 +631,6 @@ func checkError(t *testing.T, src string, tok token.Token, pos int, err string) } } - var errors = []struct { src string tok token.Token @@ -678,7 +665,6 @@ var errors = []struct { {"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"}, } - func TestScanErrors(t *testing.T) { for _, e := range errors { checkError(t, e.src, e.tok, e.pos, e.err) diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go index 23a3cc00f..c559e19f8 100644 --- a/src/pkg/go/token/position.go +++ b/src/pkg/go/token/position.go @@ -12,7 +12,6 @@ import ( "sync" ) - // Position describes an arbitrary source position // including the file, line, and column location. // A Position is valid if the line number is > 0. @@ -24,11 +23,9 @@ type Position struct { Column int // column number, starting at 1 (character count) } - // IsValid returns true if the position is valid. func (pos *Position) IsValid() bool { return pos.Line > 0 } - // String returns a string in one of several forms: // // file:line:column valid position with file name @@ -50,7 +47,6 @@ func (pos Position) String() string { return s } - // Pos is a compact encoding of a source position within a file set. // It can be converted into a Position for a more convenient, but much // larger, representation. @@ -73,7 +69,6 @@ func (pos Position) String() string { // type Pos int - // The zero value for Pos is NoPos; there is no file and line information // associated with it, and NoPos().IsValid() is false. NoPos is always // smaller than any other Pos value. The corresponding Position value @@ -81,18 +76,15 @@ type Pos int // const NoPos Pos = 0 - // IsValid returns true if the position is valid. func (p Pos) IsValid() bool { return p != NoPos } - func searchFiles(a []*File, x int) int { return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 } - func (s *FileSet) file(p Pos) *File { if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { return f @@ -108,7 +100,6 @@ func (s *FileSet) file(p Pos) *File { return nil } - // File returns the file which contains the position p. // If no such file is found (for instance for p == NoPos), // the result is nil. @@ -122,7 +113,6 @@ func (s *FileSet) File(p Pos) (f *File) { return } - func (f *File) position(p Pos) (pos Position) { offset := int(p) - f.base pos.Offset = offset @@ -130,7 +120,6 @@ func (f *File) position(p Pos) (pos Position) { return } - // Position converts a Pos in the fileset into a general Position. func (s *FileSet) Position(p Pos) (pos Position) { if p != NoPos { @@ -147,14 +136,12 @@ func (s *FileSet) Position(p Pos) (pos Position) { return } - type lineInfo struct { offset int filename string line int } - // AddLineInfo adds alternative file and line number information for // a given file offset. The offset must be larger than the offset for // the previously added alternative line info and smaller than the @@ -171,7 +158,6 @@ func (f *File) AddLineInfo(offset int, filename string, line int) { f.set.mutex.Unlock() } - // A File is a handle for a file belonging to a FileSet. // A File has a name, size, and line offset table. // @@ -186,25 +172,21 @@ type File struct { infos []lineInfo } - // Name returns the file name of file f as registered with AddFile. func (f *File) Name() string { return f.name } - // Base returns the base offset of file f as registered with AddFile. func (f *File) Base() int { return f.base } - // Size returns the size of file f as registered with AddFile. func (f *File) Size() int { return f.size } - // LineCount returns the number of lines in file f. func (f *File) LineCount() int { f.set.mutex.RLock() @@ -213,7 +195,6 @@ func (f *File) LineCount() int { return n } - // AddLine adds the line offset for a new line. // The line offset must be larger than the offset for the previous line // and smaller than the file size; otherwise the line offset is ignored. @@ -226,7 +207,6 @@ func (f *File) AddLine(offset int) { f.set.mutex.Unlock() } - // SetLines sets the line offsets for a file and returns true if successful. // The line offsets are the offsets of the first character of each line; // for instance for the content "ab\nc\n" the line offsets are {0, 3}. @@ -251,7 +231,6 @@ func (f *File) SetLines(lines []int) bool { return true } - // SetLinesForContent sets the line offsets for the given file content. func (f *File) SetLinesForContent(content []byte) { var lines []int @@ -272,7 +251,6 @@ func (f *File) SetLinesForContent(content []byte) { f.set.mutex.Unlock() } - // Pos returns the Pos value for the given file offset; // the offset must be <= f.Size(). // f.Pos(f.Offset(p)) == p. @@ -284,7 +262,6 @@ func (f *File) Pos(offset int) Pos { return Pos(f.base + offset) } - // Offset returns the offset for the given file position p; // p must be a valid Pos value in that file. // f.Offset(f.Pos(offset)) == offset. @@ -296,7 +273,6 @@ func (f *File) Offset(p Pos) int { return int(p) - f.base } - // Line returns the line number for the given file position p; // p must be a Pos value in that file or NoPos. // @@ -305,7 +281,6 @@ func (f *File) Line(p Pos) int { return f.Position(p).Line } - // Position returns the Position value for the given file position p; // p must be a Pos value in that file or NoPos. // @@ -319,7 +294,6 @@ func (f *File) Position(p Pos) (pos Position) { return } - func searchInts(a []int, x int) int { // This function body is a manually inlined version of: // @@ -342,12 +316,10 @@ func searchInts(a []int, x int) int { return i - 1 } - func searchLineInfos(a []lineInfo, x int) int { return sort.Search(len(a), func(i int) bool { return a[i].offset > x }) - 1 } - // info returns the file name, line, and column number for a file offset. func (f *File) info(offset int) (filename string, line, column int) { filename = f.name @@ -367,7 +339,6 @@ func (f *File) info(offset int) (filename string, line, column int) { return } - // A FileSet represents a set of source files. // Methods of file sets are synchronized; multiple goroutines // may invoke them concurrently. @@ -379,7 +350,6 @@ type FileSet struct { last *File // cache of last file looked up } - // NewFileSet creates a new file set. func NewFileSet() *FileSet { s := new(FileSet) @@ -387,7 +357,6 @@ func NewFileSet() *FileSet { return s } - // Base returns the minimum base offset that must be provided to // AddFile when adding the next file. // @@ -399,7 +368,6 @@ func (s *FileSet) Base() int { } - // AddFile adds a new file with a given filename, base offset, and file size // to the file set s and returns the file. Multiple files may have the same // name. The base offset must not be smaller than the FileSet's Base(), and @@ -434,7 +402,6 @@ func (s *FileSet) AddFile(filename string, base, size int) *File { return f } - // Files returns the files added to the file set. func (s *FileSet) Files() <-chan *File { ch := make(chan *File) diff --git a/src/pkg/go/token/position_test.go b/src/pkg/go/token/position_test.go index 979c9b1e8..30bec5991 100644 --- a/src/pkg/go/token/position_test.go +++ b/src/pkg/go/token/position_test.go @@ -9,7 +9,6 @@ import ( "testing" ) - func checkPos(t *testing.T, msg string, p, q Position) { if p.Filename != q.Filename { t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename) @@ -25,7 +24,6 @@ func checkPos(t *testing.T, msg string, p, q Position) { } } - func TestNoPos(t *testing.T) { if NoPos.IsValid() { t.Errorf("NoPos should not be valid") @@ -36,7 +34,6 @@ func TestNoPos(t *testing.T) { checkPos(t, "fset NoPos", fset.Position(NoPos), Position{}) } - var tests = []struct { filename string source []byte // may be nil @@ -53,7 +50,6 @@ var tests = []struct { {"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}}, } - func linecol(lines []int, offs int) (int, int) { prevLineOffs := 0 for line, lineOffs := range lines { @@ -65,7 +61,6 @@ func linecol(lines []int, offs int) (int, int) { return len(lines), offs - prevLineOffs + 1 } - func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) { for offs := 0; offs < f.Size(); offs++ { p := f.Pos(offs) @@ -80,7 +75,6 @@ func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) { } } - func makeTestSource(size int, lines []int) []byte { src := make([]byte, size) for _, offs := range lines { @@ -91,7 +85,6 @@ func makeTestSource(size int, lines []int) []byte { return src } - func TestPositions(t *testing.T) { const delta = 7 // a non-zero base offset increment fset := NewFileSet() @@ -150,7 +143,6 @@ func TestPositions(t *testing.T) { } } - func TestLineInfo(t *testing.T) { fset := NewFileSet() f := fset.AddFile("foo", fset.Base(), 500) @@ -170,7 +162,6 @@ func TestLineInfo(t *testing.T) { } } - func TestFiles(t *testing.T) { fset := NewFileSet() for i, test := range tests { diff --git a/src/pkg/go/token/token.go b/src/pkg/go/token/token.go index c2ec80ae1..557374052 100644 --- a/src/pkg/go/token/token.go +++ b/src/pkg/go/token/token.go @@ -9,7 +9,6 @@ package token import "strconv" - // Token is the set of lexical tokens of the Go programming language. type Token int @@ -124,7 +123,6 @@ const ( keyword_end ) - var tokens = [...]string{ ILLEGAL: "ILLEGAL", @@ -225,7 +223,6 @@ var tokens = [...]string{ VAR: "var", } - // String returns the string corresponding to the token tok. // For operators, delimiters, and keywords the string is the actual // token character sequence (e.g., for the token ADD, the string is @@ -243,7 +240,6 @@ func (tok Token) String() string { return s } - // A set of constants for precedence-based expression parsing. // Non-operators have lowest precedence, followed by operators // starting with precedence 1 up to unary operators. The highest @@ -256,7 +252,6 @@ const ( HighestPrec = 7 ) - // Precedence returns the operator precedence of the binary // operator op. If op is not a binary operator, the result // is LowestPrecedence. @@ -277,7 +272,6 @@ func (op Token) Precedence() int { return LowestPrec } - var keywords map[string]Token func init() { @@ -287,7 +281,6 @@ func init() { } } - // Lookup maps an identifier to its keyword token or IDENT (if not a keyword). // func Lookup(ident []byte) Token { @@ -299,7 +292,6 @@ func Lookup(ident []byte) Token { return IDENT } - // Predicates // IsLiteral returns true for tokens corresponding to identifiers diff --git a/src/pkg/go/typechecker/scope.go b/src/pkg/go/typechecker/scope.go index a4bee6e69..d73d1a450 100644 --- a/src/pkg/go/typechecker/scope.go +++ b/src/pkg/go/typechecker/scope.go @@ -12,18 +12,15 @@ package typechecker import "go/ast" - func (tc *typechecker) openScope() *ast.Scope { tc.topScope = ast.NewScope(tc.topScope) return tc.topScope } - func (tc *typechecker) closeScope() { tc.topScope = tc.topScope.Outer } - // declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields. // It returns the newly allocated object. If an object with the same name already exists in scope, an error // is reported and the object is not inserted. @@ -40,13 +37,11 @@ func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.ObjKind, name *ast return obj } - // decl is the same as declInScope(tc.topScope, ...) func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object { return tc.declInScope(tc.topScope, kind, name, decl, n) } - // find returns the object with the given name if visible in the current scope hierarchy. // If no such object is found, an error is reported and a bad object is returned instead. func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) { @@ -61,7 +56,6 @@ func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) { return } - // findField returns the object with the given name if visible in the type's scope. // If no such object is found, an error is reported and a bad object is returned instead. func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) { diff --git a/src/pkg/go/typechecker/type.go b/src/pkg/go/typechecker/type.go index 62b4e9d3e..1b88eb54b 100644 --- a/src/pkg/go/typechecker/type.go +++ b/src/pkg/go/typechecker/type.go @@ -6,7 +6,6 @@ package typechecker import "go/ast" - // A Type represents a Go type. type Type struct { Form Form @@ -18,13 +17,11 @@ type Type struct { Expr ast.Expr // corresponding AST expression } - // NewType creates a new type of a given form. func NewType(form Form) *Type { return &Type{Form: form, Scope: ast.NewScope(nil)} } - // Form describes the form of a type. type Form int @@ -45,7 +42,6 @@ const ( Tuple ) - var formStrings = [...]string{ BadType: "badType", Unresolved: "unresolved", @@ -62,10 +58,8 @@ var formStrings = [...]string{ Tuple: "tuple", } - func (form Form) String() string { return formStrings[form] } - // The list of basic type id's. const ( Bool = iota @@ -96,7 +90,6 @@ const ( // TODO(gri) ideal types are missing ) - var BasicTypes = map[uint]string{ Bool: "bool", Byte: "byte", diff --git a/src/pkg/go/typechecker/typechecker.go b/src/pkg/go/typechecker/typechecker.go index b151f5834..24480165b 100644 --- a/src/pkg/go/typechecker/typechecker.go +++ b/src/pkg/go/typechecker/typechecker.go @@ -17,19 +17,16 @@ import ( "os" ) - // TODO(gri) don't report errors for objects/types that are marked as bad. const debug = true // set for debugging output - // An importer takes an import path and returns the data describing the // respective package's exported interface. The data format is TBD. // type Importer func(path string) ([]byte, os.Error) - // CheckPackage typechecks a package and augments the AST by setting // *ast.Object, *ast.Type, and *ast.Scope fields accordingly. If an // importer is provided, it is used to handle imports, otherwise they @@ -46,7 +43,6 @@ func CheckPackage(fset *token.FileSet, pkg *ast.Package, importer Importer) os.E return tc.GetError(scanner.Sorted) } - // CheckFile typechecks a single file, but otherwise behaves like // CheckPackage. If the complete package consists of more than just // one file, the file may not typecheck without errors. @@ -57,7 +53,6 @@ func CheckFile(fset *token.FileSet, file *ast.File, importer Importer) os.Error return CheckPackage(fset, pkg, importer) } - // ---------------------------------------------------------------------------- // Typechecker state @@ -71,19 +66,16 @@ type typechecker struct { iota int // current value of iota } - func (tc *typechecker) Errorf(pos token.Pos, format string, args ...interface{}) { tc.Error(tc.fset.Position(pos), fmt.Sprintf(format, args...)) } - func assert(pred bool) { if !pred { panic("internal error") } } - /* Typechecking is done in several phases: @@ -158,7 +150,6 @@ func (tc *typechecker) checkPackage(pkg *ast.Package) { pkg.Scope = tc.topScope } - func (tc *typechecker) declGlobal(global ast.Decl) { switch d := global.(type) { case *ast.BadDecl: @@ -218,7 +209,6 @@ func (tc *typechecker) declGlobal(global ast.Decl) { } } - // If x is of the form *T, deref returns T, otherwise it returns x. func deref(x ast.Expr) ast.Expr { if p, isPtr := x.(*ast.StarExpr); isPtr { @@ -227,7 +217,6 @@ func deref(x ast.Expr) ast.Expr { return x } - func (tc *typechecker) bindMethod(method *ast.FuncDecl) { // a method is declared in the receiver base type's scope var scope *ast.Scope @@ -259,7 +248,6 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) { tc.declInScope(scope, ast.Fun, method.Name, method, 0) } - func (tc *typechecker) resolve(obj *ast.Object) { // check for declaration cycles if tc.cyclemap[obj] { @@ -318,7 +306,6 @@ func (tc *typechecker) resolve(obj *ast.Object) { } } - func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) { tc.openScope() defer tc.closeScope() @@ -338,7 +325,6 @@ func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) { } } - // ---------------------------------------------------------------------------- // Types @@ -350,7 +336,6 @@ func unparen(x ast.Expr) ast.Expr { return x } - func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref bool) (n uint) { if fields != nil { for _, f := range fields.List { @@ -365,7 +350,6 @@ func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref b return n } - func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) { assert((typ.Form == Method) == (recv != nil)) typ.Params = ast.NewScope(nil) @@ -374,7 +358,6 @@ func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.Field typ.N = tc.declFields(typ.Params, results, true) } - func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) { x = unparen(x) @@ -472,17 +455,14 @@ func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) { return } - // ---------------------------------------------------------------------------- // TODO(gri) implement these place holders func (tc *typechecker) declConst(*ast.Object) { } - func (tc *typechecker) declVar(*ast.Object) { } - func (tc *typechecker) checkStmt(ast.Stmt) { } diff --git a/src/pkg/go/typechecker/typechecker_test.go b/src/pkg/go/typechecker/typechecker_test.go index d16e06921..4bad4499a 100644 --- a/src/pkg/go/typechecker/typechecker_test.go +++ b/src/pkg/go/typechecker/typechecker_test.go @@ -41,7 +41,6 @@ import ( "testing" ) - const testDir = "./testdata" // location of test packages var fset = token.NewFileSet() @@ -51,7 +50,6 @@ var ( trace = flag.Bool("trace", false, "print package names") ) - // ERROR comments must be of the form /* ERROR "rx" */ and rx is // a regular expression that matches the expected error message. var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`) @@ -91,12 +89,10 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) { return } - func testFilter(f *os.FileInfo) bool { return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.' } - func checkError(t *testing.T, expected, found *scanner.Error) { rx, err := regexp.Compile(expected.Msg) if err != nil { @@ -120,7 +116,6 @@ func checkError(t *testing.T, expected, found *scanner.Error) { } } - func TestTypeCheck(t *testing.T) { flag.Parse() pkgRx, err := regexp.Compile(*pkgPat) diff --git a/src/pkg/go/typechecker/universe.go b/src/pkg/go/typechecker/universe.go index abc8bbbd4..81c14a05e 100644 --- a/src/pkg/go/typechecker/universe.go +++ b/src/pkg/go/typechecker/universe.go @@ -11,7 +11,6 @@ import "go/ast" // The Universe scope contains all predeclared identifiers. var Universe *ast.Scope - func def(obj *ast.Object) { alt := Universe.Insert(obj) if alt != nil { @@ -19,7 +18,6 @@ func def(obj *ast.Object) { } } - func init() { Universe = ast.NewScope(nil) diff --git a/src/pkg/go/types/check.go b/src/pkg/go/types/check.go index 02d662926..87e3e93da 100644 --- a/src/pkg/go/types/check.go +++ b/src/pkg/go/types/check.go @@ -15,24 +15,20 @@ import ( "strconv" ) - const debug = false - type checker struct { fset *token.FileSet scanner.ErrorVector types map[ast.Expr]Type } - func (c *checker) errorf(pos token.Pos, format string, args ...interface{}) string { msg := fmt.Sprintf(format, args...) c.Error(c.fset.Position(pos), msg) return msg } - // collectFields collects struct fields tok = token.STRUCT), interface methods // (tok = token.INTERFACE), and function arguments/results (tok = token.FUNC). func (c *checker) collectFields(tok token.Token, list *ast.FieldList, cycleOk bool) (fields ObjList, tags []string, isVariadic bool) { @@ -87,7 +83,6 @@ func (c *checker) collectFields(tok token.Token, list *ast.FieldList, cycleOk bo return } - // makeType makes a new type for an AST type specification x or returns // the type referred to by a type name x. If cycleOk is set, a type may // refer to itself directly or indirectly; otherwise cycles are errors. @@ -183,7 +178,6 @@ func (c *checker) makeType(x ast.Expr, cycleOk bool) (typ Type) { panic(fmt.Sprintf("unreachable (%T)", x)) } - // checkObj type checks an object. func (c *checker) checkObj(obj *ast.Object, ref bool) { if obj.Type != nil { @@ -214,7 +208,6 @@ func (c *checker) checkObj(obj *ast.Object, ref bool) { } } - // Check typechecks a package. // It augments the AST by assigning types to all ast.Objects and returns a map // of types for all expression nodes in statements, and a scanner.ErrorList if diff --git a/src/pkg/go/types/check_test.go b/src/pkg/go/types/check_test.go index 6ecb12b1e..8be653fcb 100644 --- a/src/pkg/go/types/check_test.go +++ b/src/pkg/go/types/check_test.go @@ -34,7 +34,6 @@ import ( "testing" ) - // The test filenames do not end in .go so that they are invisible // to gofmt since they contain comments that must not change their // positions relative to surrounding tokens. @@ -46,10 +45,8 @@ var tests = []struct { {"test0", []string{"testdata/test0.src"}}, } - var fset = token.NewFileSet() - // TODO(gri) This functionality should be in token.Fileset. func getFile(filename string) *token.File { for f := range fset.Files() { @@ -60,7 +57,6 @@ func getFile(filename string) *token.File { return nil } - // TODO(gri) This functionality should be in token.Fileset. func getPos(filename string, offset int) token.Pos { if f := getFile(filename); f != nil { @@ -69,7 +65,6 @@ func getPos(filename string, offset int) token.Pos { return token.NoPos } - // TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles // or a similar function instead. func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, os.Error) { @@ -95,7 +90,6 @@ func parseFiles(t *testing.T, testname string, filenames []string) (map[string]* return files, errors } - // ERROR comments must be of the form /* ERROR "rx" */ and rx is // a regular expression that matches the expected error message. // @@ -138,7 +132,6 @@ func expectedErrors(t *testing.T, testname string, files map[string]*ast.File) m return errors } - func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) { if errors == nil { return @@ -172,7 +165,6 @@ func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) { } } - func check(t *testing.T, testname string, testfiles []string) { // TODO(gri) Eventually all these different phases should be // subsumed into a single function call that takes @@ -206,7 +198,6 @@ func check(t *testing.T, testname string, testfiles []string) { } } - func TestCheck(t *testing.T) { // For easy debugging w/o changing the testing code, // if there is a local test file, only test that file. diff --git a/src/pkg/go/types/const.go b/src/pkg/go/types/const.go index 6fdc22f6b..1ef95d9f9 100644 --- a/src/pkg/go/types/const.go +++ b/src/pkg/go/types/const.go @@ -12,7 +12,6 @@ import ( "strconv" ) - // TODO(gri) Consider changing the API so Const is an interface // and operations on consts don't have to type switch. @@ -28,20 +27,17 @@ type Const struct { val interface{} } - // Representation of complex values. type cmplx struct { re, im *big.Rat } - func assert(cond bool) { if !cond { panic("go/types internal error: assertion failed") } } - // MakeConst makes an ideal constant from a literal // token and the corresponding literal string. func MakeConst(tok token.Token, lit string) Const { @@ -75,14 +71,12 @@ func MakeConst(tok token.Token, lit string) Const { panic("unreachable") } - // MakeZero returns the zero constant for the given type. func MakeZero(typ *Type) Const { // TODO(gri) fix this return Const{0} } - // Match attempts to match the internal constant representations of x and y. // If the attempt is successful, the result is the values of x and y, // if necessary converted to have the same internal representation; otherwise @@ -132,7 +126,6 @@ func (x Const) Match(y Const) (u, v Const) { return } - // Convert attempts to convert the constant x to a given type. // If the attempt is successful, the result is the new constant; // otherwise the result is invalid. @@ -148,7 +141,6 @@ func (x Const) Convert(typ *Type) Const { return x } - func (x Const) String() string { switch x := x.val.(type) { case bool: @@ -169,12 +161,10 @@ func (x Const) String() string { panic("unreachable") } - func (x Const) UnaryOp(op token.Token) Const { panic("unimplemented") } - func (x Const) BinaryOp(op token.Token, y Const) Const { var z interface{} switch x := x.val.(type) { @@ -194,7 +184,6 @@ func (x Const) BinaryOp(op token.Token, y Const) Const { return Const{z} } - func binaryBoolOp(x bool, op token.Token, y bool) interface{} { switch op { case token.EQL: @@ -205,7 +194,6 @@ func binaryBoolOp(x bool, op token.Token, y bool) interface{} { panic("unreachable") } - func binaryIntOp(x *big.Int, op token.Token, y *big.Int) interface{} { var z big.Int switch op { @@ -247,7 +235,6 @@ func binaryIntOp(x *big.Int, op token.Token, y *big.Int) interface{} { panic("unreachable") } - func binaryFloatOp(x *big.Rat, op token.Token, y *big.Rat) interface{} { var z big.Rat switch op { @@ -275,7 +262,6 @@ func binaryFloatOp(x *big.Rat, op token.Token, y *big.Rat) interface{} { panic("unreachable") } - func binaryCmplxOp(x cmplx, op token.Token, y cmplx) interface{} { a, b := x.re, x.im c, d := y.re, y.im @@ -325,7 +311,6 @@ func binaryCmplxOp(x cmplx, op token.Token, y cmplx) interface{} { panic("unreachable") } - func binaryStringOp(x string, op token.Token, y string) interface{} { switch op { case token.ADD: diff --git a/src/pkg/go/types/exportdata.go b/src/pkg/go/types/exportdata.go index f68133761..383520320 100644 --- a/src/pkg/go/types/exportdata.go +++ b/src/pkg/go/types/exportdata.go @@ -15,7 +15,6 @@ import ( "strings" ) - func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) { // See $GOROOT/include/ar.h. hdr := make([]byte, 64+12+6+6+8+10+2) @@ -36,13 +35,11 @@ func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) { return } - type dataReader struct { *bufio.Reader io.Closer } - // ExportData returns a readCloser positioned at the beginning of the // export data section of the given object/archive file, or an error. // It is the caller's responsibility to close the readCloser. diff --git a/src/pkg/go/types/gcimporter.go b/src/pkg/go/types/gcimporter.go index aa0bb9160..6ab1806b6 100644 --- a/src/pkg/go/types/gcimporter.go +++ b/src/pkg/go/types/gcimporter.go @@ -20,7 +20,6 @@ import ( "strconv" ) - const trace = false // set to true for debugging var ( @@ -28,7 +27,6 @@ var ( pkgExts = [...]string{".a", ".5", ".6", ".8"} ) - // findPkg returns the filename and package id for an import path. // If no file was found, an empty filename is returned. func findPkg(path string) (filename, id string) { @@ -69,7 +67,6 @@ func findPkg(path string) (filename, id string) { return } - // gcParser parses the exports inside a gc compiler-produced // object/archive file and populates its scope with the results. type gcParser struct { @@ -80,7 +77,6 @@ type gcParser struct { imports map[string]*ast.Object // package id -> package object } - func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]*ast.Object) { p.scanner.Init(src) p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } @@ -92,7 +88,6 @@ func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]* p.imports = imports } - func (p *gcParser) next() { p.tok = p.scanner.Scan() switch p.tok { @@ -106,7 +101,6 @@ func (p *gcParser) next() { } } - // GcImporter implements the ast.Importer signature. func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err os.Error) { if path == "unsafe" { @@ -148,7 +142,6 @@ func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, e return } - // ---------------------------------------------------------------------------- // Error handling @@ -158,12 +151,10 @@ type importError struct { err os.Error } - func (e importError) String() string { return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) } - func (p *gcParser) error(err interface{}) { if s, ok := err.(string); ok { err = os.NewError(s) @@ -172,12 +163,10 @@ func (p *gcParser) error(err interface{}) { panic(importError{p.scanner.Pos(), err.(os.Error)}) } - func (p *gcParser) errorf(format string, args ...interface{}) { p.error(fmt.Sprintf(format, args...)) } - func (p *gcParser) expect(tok int) string { lit := p.lit if p.tok != tok { @@ -187,7 +176,6 @@ func (p *gcParser) expect(tok int) string { return lit } - func (p *gcParser) expectSpecial(tok string) { sep := 'x' // not white space i := 0 @@ -201,7 +189,6 @@ func (p *gcParser) expectSpecial(tok string) { } } - func (p *gcParser) expectKeyword(keyword string) { lit := p.expect(scanner.Ident) if lit != keyword { @@ -209,7 +196,6 @@ func (p *gcParser) expectKeyword(keyword string) { } } - // ---------------------------------------------------------------------------- // Import declarations @@ -242,7 +228,6 @@ func (p *gcParser) parsePkgId() *ast.Object { return pkg } - // dotIdentifier = ( ident | '·' ) { ident | int | '·' } . func (p *gcParser) parseDotIdent() string { ident := "" @@ -260,7 +245,6 @@ func (p *gcParser) parseDotIdent() string { return ident } - // ExportedName = ImportPath "." dotIdentifier . // func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object { @@ -295,7 +279,6 @@ func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object { return obj } - // ---------------------------------------------------------------------------- // Types @@ -309,7 +292,6 @@ func (p *gcParser) parseBasicType() Type { return obj.Type.(Type) } - // ArrayType = "[" int_lit "]" Type . // func (p *gcParser) parseArrayType() Type { @@ -324,7 +306,6 @@ func (p *gcParser) parseArrayType() Type { return &Array{Len: n, Elt: elt} } - // MapType = "map" "[" Type "]" Type . // func (p *gcParser) parseMapType() Type { @@ -336,7 +317,6 @@ func (p *gcParser) parseMapType() Type { return &Map{Key: key, Elt: elt} } - // Name = identifier | "?" . // func (p *gcParser) parseName() (name string) { @@ -353,7 +333,6 @@ func (p *gcParser) parseName() (name string) { return } - // Field = Name Type [ ":" string_lit ] . // func (p *gcParser) parseField() (fld *ast.Object, tag string) { @@ -374,7 +353,6 @@ func (p *gcParser) parseField() (fld *ast.Object, tag string) { return } - // StructType = "struct" "{" [ FieldList ] "}" . // FieldList = Field { ";" Field } . // @@ -402,7 +380,6 @@ func (p *gcParser) parseStructType() Type { return &Struct{Fields: fields, Tags: tags} } - // Parameter = ( identifier | "?" ) [ "..." ] Type [ ":" string_lit ] . // func (p *gcParser) parseParameter() (par *ast.Object, isVariadic bool) { @@ -425,7 +402,6 @@ func (p *gcParser) parseParameter() (par *ast.Object, isVariadic bool) { return } - // Parameters = "(" [ ParameterList ] ")" . // ParameterList = { Parameter "," } Parameter . // @@ -454,7 +430,6 @@ func (p *gcParser) parseParameters() (list []*ast.Object, isVariadic bool) { return } - // Signature = Parameters [ Result ] . // Result = Type | Parameters . // @@ -481,7 +456,6 @@ func (p *gcParser) parseSignature() *Func { return &Func{Params: params, Results: results, IsVariadic: isVariadic} } - // MethodSpec = identifier Signature . // func (p *gcParser) parseMethodSpec() *ast.Object { @@ -499,7 +473,6 @@ func (p *gcParser) parseMethodSpec() *ast.Object { return ast.NewObj(ast.Fun, "_") } - // InterfaceType = "interface" "{" [ MethodList ] "}" . // MethodList = MethodSpec { ";" MethodSpec } . // @@ -526,7 +499,6 @@ func (p *gcParser) parseInterfaceType() Type { return &Interface{Methods: methods} } - // ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . // func (p *gcParser) parseChanType() Type { @@ -546,7 +518,6 @@ func (p *gcParser) parseChanType() Type { return &Chan{Dir: dir, Elt: elt} } - // Type = // BasicType | TypeName | ArrayType | SliceType | StructType | // PointerType | FuncType | InterfaceType | MapType | ChanType | @@ -604,7 +575,6 @@ func (p *gcParser) parseType() Type { return nil } - // ---------------------------------------------------------------------------- // Declarations @@ -621,7 +591,6 @@ func (p *gcParser) parseImportDecl() { pkg.Name = name } - // int_lit = [ "+" | "-" ] { "0" ... "9" } . // func (p *gcParser) parseInt() (sign, val string) { @@ -636,7 +605,6 @@ func (p *gcParser) parseInt() (sign, val string) { return } - // number = int_lit [ "p" int_lit ] . // func (p *gcParser) parseNumber() Const { @@ -667,7 +635,6 @@ func (p *gcParser) parseNumber() Const { return Const{mant} } - // ConstDecl = "const" ExportedName [ Type ] "=" Literal . // Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit . // bool_lit = "true" | "false" . @@ -722,7 +689,6 @@ func (p *gcParser) parseConstDecl() { obj.Data = x } - // TypeDecl = "type" ExportedName Type . // func (p *gcParser) parseTypeDecl() { @@ -742,7 +708,6 @@ func (p *gcParser) parseTypeDecl() { } } - // VarDecl = "var" ExportedName Type . // func (p *gcParser) parseVarDecl() { @@ -751,7 +716,6 @@ func (p *gcParser) parseVarDecl() { obj.Type = p.parseType() } - // FuncDecl = "func" ExportedName Signature . // func (p *gcParser) parseFuncDecl() { @@ -760,7 +724,6 @@ func (p *gcParser) parseFuncDecl() { obj.Type = p.parseSignature() } - // MethodDecl = "func" Receiver identifier Signature . // Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . // @@ -773,7 +736,6 @@ func (p *gcParser) parseMethodDecl() { p.parseSignature() } - // Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . // func (p *gcParser) parseDecl() { @@ -797,7 +759,6 @@ func (p *gcParser) parseDecl() { p.expect('\n') } - // ---------------------------------------------------------------------------- // Export diff --git a/src/pkg/go/types/gcimporter_test.go b/src/pkg/go/types/gcimporter_test.go index 10240add5..ec87f5d51 100644 --- a/src/pkg/go/types/gcimporter_test.go +++ b/src/pkg/go/types/gcimporter_test.go @@ -15,7 +15,6 @@ import ( "time" ) - var gcName, gcPath string // compiler name and path func init() { @@ -35,7 +34,6 @@ func init() { gcPath, _ = exec.LookPath(gcName) } - func compile(t *testing.T, dirname, filename string) { cmd := exec.Command(gcPath, filename) cmd.Dir = dirname @@ -47,7 +45,6 @@ func compile(t *testing.T, dirname, filename string) { t.Logf("%s", string(out)) } - // Use the same global imports map for all tests. The effect is // as if all tested packages were imported into a single package. var imports = make(map[string]*ast.Object) @@ -61,7 +58,6 @@ func testPath(t *testing.T, path string) bool { return true } - const maxTime = 3e9 // maximum allotted testing time in ns func testDir(t *testing.T, dir string, endTime int64) (nimports int) { @@ -93,7 +89,6 @@ func testDir(t *testing.T, dir string, endTime int64) (nimports int) { return } - func TestGcImport(t *testing.T) { compile(t, "testdata", "exports.go") diff --git a/src/pkg/go/types/testdata/exports.go b/src/pkg/go/types/testdata/exports.go index 035a13fb7..ed63bf9ad 100644 --- a/src/pkg/go/types/testdata/exports.go +++ b/src/pkg/go/types/testdata/exports.go @@ -11,7 +11,6 @@ import ( "go/ast" ) - const ( C0 int = 0 C1 = 3.14159265 @@ -23,7 +22,6 @@ const ( C7 = `bar\n` ) - type ( T1 int T2 [10]int @@ -72,18 +70,15 @@ type ( T28 func(T28) T28 ) - var ( V0 int V1 = -991.0 ) - func F1() {} func F2(x int) {} func F3() int { return 0 } func F4() float32 { return 0 } func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10) - func (p *T1) M1() diff --git a/src/pkg/go/types/types.go b/src/pkg/go/types/types.go index 10b0145b8..3aa896892 100644 --- a/src/pkg/go/types/types.go +++ b/src/pkg/go/types/types.go @@ -12,34 +12,29 @@ import ( "sort" ) - // All types implement the Type interface. type Type interface { isType() } - // All concrete types embed ImplementsType which // ensures that all types implement the Type interface. type ImplementsType struct{} func (t *ImplementsType) isType() {} - // A Bad type is a non-nil placeholder type when we don't know a type. type Bad struct { ImplementsType Msg string // for better error reporting/debugging } - // A Basic represents a (unnamed) basic type. type Basic struct { ImplementsType // TODO(gri) need a field specifying the exact basic type } - // An Array represents an array type [Len]Elt. type Array struct { ImplementsType @@ -47,14 +42,12 @@ type Array struct { Elt Type } - // A Slice represents a slice type []Elt. type Slice struct { ImplementsType Elt Type } - // A Struct represents a struct type struct{...}. // Anonymous fields are represented by objects with empty names. type Struct struct { @@ -67,14 +60,12 @@ type Struct struct { // - there is no scope for fast lookup (but the parser creates one) } - // A Pointer represents a pointer type *Base. type Pointer struct { ImplementsType Base Type } - // A Func represents a function type func(...) (...). // Unnamed parameters are represented by objects with empty names. type Func struct { @@ -85,21 +76,18 @@ type Func struct { IsVariadic bool // true if the last parameter's type is of the form ...T } - // An Interface represents an interface type interface{...}. type Interface struct { ImplementsType Methods ObjList // interface methods sorted by name; or nil } - // A Map represents a map type map[Key]Elt. type Map struct { ImplementsType Key, Elt Type } - // A Chan represents a channel type chan Elt, <-chan Elt, or chan<-Elt. type Chan struct { ImplementsType @@ -107,7 +95,6 @@ type Chan struct { Elt Type } - // A Name represents a named type as declared in a type declaration. type Name struct { ImplementsType @@ -116,7 +103,6 @@ type Name struct { // TODO(gri) need to remember fields and methods. } - // If typ is a pointer type, Deref returns the pointer's base type; // otherwise it returns typ. func Deref(typ Type) Type { @@ -126,7 +112,6 @@ func Deref(typ Type) Type { return typ } - // Underlying returns the underlying type of a type. func Underlying(typ Type) Type { if typ, ok := typ.(*Name); ok { @@ -141,7 +126,6 @@ func Underlying(typ Type) Type { return typ } - // An ObjList represents an ordered (in some fashion) list of objects. type ObjList []*ast.Object @@ -153,7 +137,6 @@ func (list ObjList) Swap(i, j int) { list[i], list[j] = list[j], list[i] } // Sort sorts an object list by object name. func (list ObjList) Sort() { sort.Sort(list) } - // identicalTypes returns true if both lists a and b have the // same length and corresponding objects have identical types. func identicalTypes(a, b ObjList) bool { @@ -169,7 +152,6 @@ func identicalTypes(a, b ObjList) bool { return false } - // Identical returns true if two types are identical. func Identical(x, y Type) bool { if x == y { diff --git a/src/pkg/go/types/universe.go b/src/pkg/go/types/universe.go index 96005cff5..6ae88e5f9 100644 --- a/src/pkg/go/types/universe.go +++ b/src/pkg/go/types/universe.go @@ -9,14 +9,12 @@ package types import "go/ast" - var ( scope *ast.Scope // current scope to use for initialization Universe *ast.Scope Unsafe *ast.Object // package unsafe ) - func define(kind ast.ObjKind, name string) *ast.Object { obj := ast.NewObj(kind, name) if scope.Insert(obj) != nil { @@ -25,7 +23,6 @@ func define(kind ast.ObjKind, name string) *ast.Object { return obj } - func defType(name string) *Name { obj := define(ast.Typ, name) typ := &Name{Underlying: &Basic{}, Obj: obj} @@ -33,19 +30,16 @@ func defType(name string) *Name { return typ } - func defConst(name string) { obj := define(ast.Con, name) _ = obj // TODO(gri) fill in other properties } - func defFun(name string) { obj := define(ast.Fun, name) _ = obj // TODO(gri) fill in other properties } - var ( Bool, Int, @@ -54,7 +48,6 @@ var ( String *Name ) - func init() { scope = ast.NewScope(nil) Universe = scope |