diff options
Diffstat (limited to 'src/pkg/go')
-rw-r--r-- | src/pkg/go/ast/ast.go | 4 | ||||
-rw-r--r-- | src/pkg/go/ast/filter.go | 28 | ||||
-rw-r--r-- | src/pkg/go/ast/scope.go | 4 | ||||
-rw-r--r-- | src/pkg/go/ast/walk.go | 66 | ||||
-rw-r--r-- | src/pkg/go/doc/comment.go | 36 | ||||
-rw-r--r-- | src/pkg/go/doc/doc.go | 60 | ||||
-rw-r--r-- | src/pkg/go/parser/interface.go | 34 | ||||
-rw-r--r-- | src/pkg/go/parser/parser.go | 374 | ||||
-rw-r--r-- | src/pkg/go/parser/parser_test.go | 14 | ||||
-rw-r--r-- | src/pkg/go/printer/nodes.go | 200 | ||||
-rw-r--r-- | src/pkg/go/printer/printer.go | 140 | ||||
-rw-r--r-- | src/pkg/go/printer/printer_test.go | 10 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/comments.golden | 4 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/expressions.golden | 4 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/expressions.raw | 4 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/linebreaks.golden | 4 | ||||
-rw-r--r-- | src/pkg/go/printer/testdata/statements.golden | 48 | ||||
-rw-r--r-- | src/pkg/go/scanner/errors.go | 26 | ||||
-rw-r--r-- | src/pkg/go/scanner/scanner.go | 120 | ||||
-rw-r--r-- | src/pkg/go/scanner/scanner_test.go | 50 | ||||
-rw-r--r-- | src/pkg/go/token/token.go | 22 |
21 files changed, 626 insertions, 626 deletions
diff --git a/src/pkg/go/ast/ast.go b/src/pkg/go/ast/ast.go index ec299c5f7..b501247cb 100644 --- a/src/pkg/go/ast/ast.go +++ b/src/pkg/go/ast/ast.go @@ -97,7 +97,7 @@ type Field struct { func (f *Field) Pos() token.Position { if len(f.Names) > 0 { - return f.Names[0].Pos(); + return f.Names[0].Pos() } return f.Type.Pos(); } @@ -600,7 +600,7 @@ type ( // func (s *ImportSpec) Pos() token.Position { if s.Name != nil { - return s.Name.Pos(); + return s.Name.Pos() } return s.Path[0].Pos(); } diff --git a/src/pkg/go/ast/filter.go b/src/pkg/go/ast/filter.go index ee6747f65..2e86435bf 100644 --- a/src/pkg/go/ast/filter.go +++ b/src/pkg/go/ast/filter.go @@ -23,14 +23,14 @@ func filterIdentList(list []*Ident) []*Ident { func isExportedType(typ Expr) bool { switch t := typ.(type) { case *Ident: - return t.IsExported(); + return t.IsExported() case *ParenExpr: - return isExportedType(t.X); + return isExportedType(t.X) case *SelectorExpr: // assume t.X is a typename - return t.Sel.IsExported(); + return t.Sel.IsExported() case *StarExpr: - return isExportedType(t.X); + return isExportedType(t.X) } return false; } @@ -47,12 +47,12 @@ func filterFieldList(list []*Field, incomplete *bool) []*Field { // fields, so this is not absolutely correct. // However, this cannot be done w/o complete // type information.) - exported = isExportedType(f.Type); + exported = isExportedType(f.Type) } else { n := len(f.Names); f.Names = filterIdentList(f.Names); if len(f.Names) < n { - *incomplete = true; + *incomplete = true } exported = len(f.Names) > 0; } @@ -63,7 +63,7 @@ func filterFieldList(list []*Field, incomplete *bool) []*Field { } } if j < len(list) { - *incomplete = true; + *incomplete = true } return list[0:j]; } @@ -71,7 +71,7 @@ func filterFieldList(list []*Field, incomplete *bool) []*Field { func filterParamList(list []*Field) { for _, f := range list { - filterType(f.Type); + filterType(f.Type) } } @@ -81,19 +81,19 @@ var noPos token.Position func filterType(typ Expr) { switch t := typ.(type) { case *ArrayType: - filterType(t.Elt); + filterType(t.Elt) case *StructType: - t.Fields = filterFieldList(t.Fields, &t.Incomplete); + t.Fields = filterFieldList(t.Fields, &t.Incomplete) case *FuncType: filterParamList(t.Params); filterParamList(t.Results); case *InterfaceType: - t.Methods = filterFieldList(t.Methods, &t.Incomplete); + t.Methods = filterFieldList(t.Methods, &t.Incomplete) case *MapType: filterType(t.Key); filterType(t.Value); case *ChanType: - filterType(t.Value); + filterType(t.Value) } } @@ -180,7 +180,7 @@ func PackageExports(pkg *Package) bool { hasExports := false; for _, f := range pkg.Files { if FileExports(f) { - hasExports = true; + hasExports = true } } return hasExports; @@ -203,7 +203,7 @@ func MergePackageFiles(pkg *Package) *File { ndecls := 0; for _, f := range pkg.Files { if f.Doc != nil { - ncomments += len(f.Doc.List)+1; // +1 for separator + ncomments += len(f.Doc.List)+1 // +1 for separator } ndecls += len(f.Decls); } diff --git a/src/pkg/go/ast/scope.go b/src/pkg/go/ast/scope.go index 301630de6..240953673 100644 --- a/src/pkg/go/ast/scope.go +++ b/src/pkg/go/ast/scope.go @@ -26,7 +26,7 @@ func NewScope(outer *Scope) *Scope { return &Scope{outer, make(map[string]*Ident // func (s *Scope) Declare(ident *Ident) bool { if _, found := s.Names[ident.Value]; found { - return false; + return false } s.Names[ident.Value] = ident; return true; @@ -40,7 +40,7 @@ func (s *Scope) Declare(ident *Ident) bool { func (s *Scope) Lookup(name string) *Ident { for ; s != nil; s = s.Outer { if ident, found := s.Names[name]; found { - return ident; + return ident } } return nil; diff --git a/src/pkg/go/ast/walk.go b/src/pkg/go/ast/walk.go index 379cc9012..c1ed36621 100644 --- a/src/pkg/go/ast/walk.go +++ b/src/pkg/go/ast/walk.go @@ -17,49 +17,49 @@ type Visitor interface { func walkIdent(v Visitor, x *Ident) { if x != nil { - Walk(v, x); + Walk(v, x) } } func walkCommentGroup(v Visitor, g *CommentGroup) { if g != nil { - Walk(v, g); + Walk(v, g) } } func walkFieldList(v Visitor, list []*Field) { for _, x := range list { - Walk(v, x); + Walk(v, x) } } func walkIdentList(v Visitor, list []*Ident) { for _, x := range list { - Walk(v, x); + Walk(v, x) } } func walkExprList(v Visitor, list []Expr) { for _, x := range list { - Walk(v, x); + Walk(v, x) } } func walkStmtList(v Visitor, list []Stmt) { for _, s := range list { - Walk(v, s); + Walk(v, s) } } func walkBlockStmt(v Visitor, b *BlockStmt) { if b != nil { - Walk(v, b); + Walk(v, b) } } @@ -70,7 +70,7 @@ func walkBlockStmt(v Visitor, b *BlockStmt) { // func Walk(v Visitor, node interface{}) { if node == nil || !v.Visit(node) { - return; + return } // walk children @@ -83,7 +83,7 @@ func Walk(v Visitor, node interface{}) { case *CommentGroup: for _, c := range n.List { - Walk(v, c); + Walk(v, c) } // TODO(gri): Keep comments in a list/vector instead // of linking them via Next. Following next will lead @@ -96,7 +96,7 @@ func Walk(v Visitor, node interface{}) { walkIdentList(v, n.Names); Walk(v, n.Type); for _, x := range n.Tag { - Walk(v, x); + Walk(v, x) } walkCommentGroup(v, n.Comment); @@ -106,12 +106,12 @@ func Walk(v Visitor, node interface{}) { case *StringList: for _, x := range n.Strings { - Walk(v, x); + Walk(v, x) } case *FuncLit: if n != nil { - Walk(v, n.Type); + Walk(v, n.Type) } walkBlockStmt(v, n.Body); @@ -120,7 +120,7 @@ func Walk(v Visitor, node interface{}) { walkExprList(v, n.Elts); case *ParenExpr: - Walk(v, n.X); + Walk(v, n.X) case *SelectorExpr: Walk(v, n.X); @@ -140,10 +140,10 @@ func Walk(v Visitor, node interface{}) { walkExprList(v, n.Args); case *StarExpr: - Walk(v, n.X); + Walk(v, n.X) case *UnaryExpr: - Walk(v, n.X); + Walk(v, n.X) case *BinaryExpr: Walk(v, n.X); @@ -159,28 +159,28 @@ func Walk(v Visitor, node interface{}) { Walk(v, n.Elt); case *StructType: - walkFieldList(v, n.Fields); + walkFieldList(v, n.Fields) case *FuncType: walkFieldList(v, n.Params); walkFieldList(v, n.Results); case *InterfaceType: - walkFieldList(v, n.Methods); + walkFieldList(v, n.Methods) case *MapType: Walk(v, n.Key); Walk(v, n.Value); case *ChanType: - Walk(v, n.Value); + Walk(v, n.Value) // Statements case *BadStmt: // nothing to do case *DeclStmt: - Walk(v, n.Decl); + Walk(v, n.Decl) case *EmptyStmt: // nothing to do @@ -190,10 +190,10 @@ func Walk(v Visitor, node interface{}) { Walk(v, n.Stmt); case *ExprStmt: - Walk(v, n.X); + Walk(v, n.X) case *IncDecStmt: - Walk(v, n.X); + Walk(v, n.X) case *AssignStmt: walkExprList(v, n.Lhs); @@ -201,22 +201,22 @@ func Walk(v Visitor, node interface{}) { case *GoStmt: if n.Call != nil { - Walk(v, n.Call); + Walk(v, n.Call) } case *DeferStmt: if n.Call != nil { - Walk(v, n.Call); + Walk(v, n.Call) } case *ReturnStmt: - walkExprList(v, n.Results); + walkExprList(v, n.Results) case *BranchStmt: - walkIdent(v, n.Label); + walkIdent(v, n.Label) case *BlockStmt: - walkStmtList(v, n.List); + walkStmtList(v, n.List) case *IfStmt: Walk(v, n.Init); @@ -248,7 +248,7 @@ func Walk(v Visitor, node interface{}) { walkStmtList(v, n.Body); case *SelectStmt: - walkBlockStmt(v, n.Body); + walkBlockStmt(v, n.Body) case *ForStmt: Walk(v, n.Init); @@ -267,7 +267,7 @@ func Walk(v Visitor, node interface{}) { walkCommentGroup(v, n.Doc); walkIdent(v, n.Name); for _, x := range n.Path { - Walk(v, x); + Walk(v, x) } walkCommentGroup(v, n.Comment); @@ -290,17 +290,17 @@ func Walk(v Visitor, node interface{}) { case *GenDecl: walkCommentGroup(v, n.Doc); for _, s := range n.Specs { - Walk(v, s); + Walk(v, s) } case *FuncDecl: walkCommentGroup(v, n.Doc); if n.Recv != nil { - Walk(v, n.Recv); + Walk(v, n.Recv) } walkIdent(v, n.Name); if n.Type != nil { - Walk(v, n.Type); + Walk(v, n.Type) } walkBlockStmt(v, n.Body); @@ -309,13 +309,13 @@ func Walk(v Visitor, node interface{}) { walkCommentGroup(v, n.Doc); walkIdent(v, n.Name); for _, d := range n.Decls { - Walk(v, d); + Walk(v, d) } walkCommentGroup(v, n.Comments); case *Package: for _, f := range n.Files { - Walk(v, f); + Walk(v, f) } default: diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go index 3d04c349c..ba9371439 100644 --- a/src/pkg/go/doc/comment.go +++ b/src/pkg/go/doc/comment.go @@ -19,11 +19,11 @@ import ( // with the comment markers - //, /*, and */ - removed. func CommentText(comment *ast.CommentGroup) string { if comment == nil { - return ""; + return "" } comments := make([]string, len(comment.List)); for i, c := range comment.List { - comments[i] = string(c.Text); + comments[i] = string(c.Text) } lines := make([]string, 0, 20); @@ -32,12 +32,12 @@ func CommentText(comment *ast.CommentGroup) string { // The parser has given us exactly the comment text. switch n := len(c); { case n >= 4 && c[0:2] == "/*" && c[n-2 : n] == "*/": - c = c[2 : n-2]; + c = c[2 : n-2] case n >= 2 && c[0:2] == "//": c = c[2:n]; // Remove leading space after //, if there is one. if len(c) > 0 && c[0] == ' ' { - c = c[1:len(c)]; + c = c[1:len(c)] } } @@ -49,7 +49,7 @@ func CommentText(comment *ast.CommentGroup) string { // Strip trailing white space m := len(l); for m > 0 && (l[m-1] == ' ' || l[m-1] == '\n' || l[m-1] == '\t' || l[m-1] == '\r') { - m--; + m-- } l = l[0:m]; @@ -58,7 +58,7 @@ func CommentText(comment *ast.CommentGroup) string { if n+1 >= cap(lines) { newlines := make([]string, n, 2*cap(lines)); for k := range newlines { - newlines[k] = lines[k]; + newlines[k] = lines[k] } lines = newlines; } @@ -100,7 +100,7 @@ func split(text []byte) [][]byte { } } if last < len(text) { - n++; + n++ } // split @@ -115,7 +115,7 @@ func split(text []byte) [][]byte { } } if last < len(text) { - out[n] = text[last:len(text)]; + out[n] = text[last:len(text)] } return out; @@ -137,9 +137,9 @@ func commentEscape(w io.Writer, s []byte) { last = i+2; switch s[i] { case '`': - w.Write(ldquo); + w.Write(ldquo) case '\'': - w.Write(rdquo); + w.Write(rdquo) } i++; // loop will add one more } @@ -159,7 +159,7 @@ var ( func indentLen(s []byte) int { i := 0; for i < len(s) && (s[i] == ' ' || s[i] == '\t') { - i++; + i++ } return i; } @@ -171,7 +171,7 @@ func isBlank(s []byte) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n' func commonPrefix(a, b []byte) []byte { i := 0; for i < len(a) && i < len(b) && a[i] == b[i] { - i++; + i++ } return a[0:i]; } @@ -179,14 +179,14 @@ func commonPrefix(a, b []byte) []byte { func unindent(block [][]byte) { if len(block) == 0 { - return; + return } // compute maximum common white prefix prefix := block[0][0 : indentLen(block[0])]; for _, line := range block { if !isBlank(line) { - prefix = commonPrefix(prefix, line[0 : indentLen(line)]); + prefix = commonPrefix(prefix, line[0 : indentLen(line)]) } } n := len(prefix); @@ -194,7 +194,7 @@ func unindent(block [][]byte) { // remove for i, line := range block { if !isBlank(line) { - block[i] = line[n:len(line)]; + block[i] = line[n:len(line)] } } } @@ -244,11 +244,11 @@ func ToHTML(w io.Writer, s []byte) { // count indented or blank lines j := i+1; for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) { - j++; + j++ } // but not trailing blank lines for j > i && isBlank(lines[j-1]) { - j--; + j-- } block := lines[i:j]; i = j; @@ -260,7 +260,7 @@ func ToHTML(w io.Writer, s []byte) { // just html escaping w.Write(html_pre); for _, line := range block { - template.HTMLEscape(w, line); + template.HTMLEscape(w, line) } w.Write(html_endpre); continue; diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go index 865f52ec4..9c85c20be 100644 --- a/src/pkg/go/doc/doc.go +++ b/src/pkg/go/doc/doc.go @@ -61,17 +61,17 @@ func (doc *docReader) addType(decl *ast.GenDecl) { if typ != nil { // a type should be added at most once, so typ.decl // should be nil - if it isn't, simply overwrite it - typ.decl = decl; + typ.decl = decl } } func (doc *docReader) lookupTypeDoc(name string) *typeDoc { if name == "" { - return nil; // no type docs for anonymous types + return nil // no type docs for anonymous types } if tdoc, found := doc.types[name]; found { - return tdoc; + return tdoc } // type wasn't found - add one without declaration tdoc := &typeDoc{nil, vector.New(0), make(map[string]*ast.FuncDecl), make(map[string]*ast.FuncDecl)}; @@ -86,10 +86,10 @@ func baseTypeName(typ ast.Expr) string { // if the type is not exported, the effect to // a client is as if there were no type name if t.IsExported() { - return string(t.Value); + return string(t.Value) } case *ast.StarExpr: - return baseTypeName(t.X); + return baseTypeName(t.X) } return ""; } @@ -109,13 +109,13 @@ func (doc *docReader) addValue(decl *ast.GenDecl) { switch { case v.Type != nil: // a type is present; determine it's name - name = baseTypeName(v.Type); + name = baseTypeName(v.Type) case decl.Tok == token.CONST: // no type is present but we have a constant declaration; // use the previous type name (w/o more type information // we cannot handle the case of unnamed variables with // initializer expressions except for some trivial cases) - name = prev; + name = prev } if name != "" { // entry has a named type @@ -139,7 +139,7 @@ func (doc *docReader) addValue(decl *ast.GenDecl) { // typed entries are sufficiently frequent typ := doc.lookupTypeDoc(domName); if typ != nil { - values = typ.values; // associate with that type + values = typ.values // associate with that type } } @@ -156,7 +156,7 @@ func (doc *docReader) addFunc(fun *ast.FuncDecl) { typ := doc.lookupTypeDoc(baseTypeName(fun.Recv.Type)); if typ != nil { // exported receiver type - typ.methods[name] = fun; + typ.methods[name] = fun } // otherwise don't show the method // TODO(gri): There may be exported methods of non-exported types @@ -209,7 +209,7 @@ func (doc *docReader) addDecl(decl ast.Decl) { switch d.Tok { case token.CONST, token.VAR: // constants and variables are always handled as a group - doc.addValue(d); + doc.addValue(d) case token.TYPE: // types are handled individually var noPos token.Position; @@ -225,13 +225,13 @@ func (doc *docReader) addDecl(decl ast.Decl) { // makeTypeDocs below). Simpler data structures, but // would lose GenDecl documentation if the TypeSpec // has documentation as well. - doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, noPos, []ast.Spec{spec}, noPos}); + doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, noPos, []ast.Spec{spec}, noPos}) // A new GenDecl node is created, no need to nil out d.Doc. } } } case *ast.FuncDecl: - doc.addFunc(d); + doc.addFunc(d) } } @@ -239,7 +239,7 @@ func (doc *docReader) addDecl(decl ast.Decl) { func copyCommentList(list []*ast.Comment) []*ast.Comment { copy := make([]*ast.Comment, len(list)); for i, c := range list { - copy[i] = c; + copy[i] = c } return copy; } @@ -268,7 +268,7 @@ func (doc *docReader) addFile(src *ast.File) { // add all declarations for _, decl := range src.Decls { - doc.addDecl(decl); + doc.addDecl(decl) } // collect BUG(...) comments @@ -331,14 +331,14 @@ func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func declName(d *ast.GenDecl) string { if len(d.Specs) != 1 { - return ""; + return "" } switch v := d.Specs[0].(type) { case *ast.ValueSpec: - return v.Names[0].Value; + return v.Names[0].Value case *ast.TypeSpec: - return v.Name.Value; + return v.Name.Value } return ""; @@ -350,7 +350,7 @@ func (p sortValueDoc) Less(i, j int) bool { // pull blocks (name = "") up to top // in original order if ni, nj := declName(p[i].Decl), declName(p[j].Decl); ni != nj { - return ni < nj; + return ni < nj } return p[i].order < p[j].order; } @@ -398,7 +398,7 @@ func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc { doc.Doc = CommentText(f.Doc); f.Doc = nil; // doc consumed - remove from ast.FuncDecl node if f.Recv != nil { - doc.Recv = f.Recv.Type; + doc.Recv = f.Recv.Type } doc.Name = f.Name.Value; doc.Decl = f; @@ -434,7 +434,7 @@ func (p sortTypeDoc) Less(i, j int) bool { // pull blocks (name = "") up to top // in original order if ni, nj := p[i].Type.Name.Value, p[j].Type.Name.Value; ni != nj { - return ni < nj; + return ni < nj } return p[i].order < p[j].order; } @@ -457,7 +457,7 @@ func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc { typespec.Doc = nil; // doc consumed - remove from ast.TypeSpec node if doc == nil { // no doc associated with the spec, use the declaration doc, if any - doc = decl.Doc; + doc = decl.Doc } decl.Doc = nil; // doc consumed - remove from ast.Decl node t.Doc = CommentText(doc); @@ -480,13 +480,13 @@ func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc { doc.values.AppendVector(old.values); // 2) move factory functions for name, f := range old.factories { - doc.funcs[name] = f; + doc.funcs[name] = f } // 3) move methods for name, f := range old.methods { // don't overwrite functions with the same name if _, found := doc.funcs[name]; !found { - doc.funcs[name] = f; + doc.funcs[name] = f } } } @@ -500,7 +500,7 @@ func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc { func makeBugDocs(v *vector.Vector) []string { d := make([]string, v.Len()); for i := 0; i < v.Len(); i++ { - d[i] = CommentText(v.At(i).(*ast.CommentGroup)); + d[i] = CommentText(v.At(i).(*ast.CommentGroup)) } return d; } @@ -553,7 +553,7 @@ func isRegexp(s string) bool { for _, c := range s { for _, m := range metachars { if c == m { - return true; + return true } } } @@ -565,11 +565,11 @@ func match(s string, a []string) bool { for _, t := range a { if isRegexp(t) { if matched, _ := regexp.MatchString(t, s); matched { - return true; + return true } } if s == t { - return true; + return true } } return false; @@ -582,12 +582,12 @@ func matchDecl(d *ast.GenDecl, names []string) bool { case *ast.ValueSpec: for _, name := range v.Names { if match(name.Value, names) { - return true; + return true } } case *ast.TypeSpec: if match(v.Name.Value, names) { - return true; + return true } } } @@ -624,7 +624,7 @@ func filterTypeDocs(a []*TypeDoc, names []string) []*TypeDoc { for _, td := range a { match := false; if matchDecl(td.Decl, names) { - match = true; + match = true } else { // type name doesn't match, but we may have matching factories or methods td.Factories = filterFuncDocs(td.Factories, names); diff --git a/src/pkg/go/parser/interface.go b/src/pkg/go/parser/interface.go index 3c9be7532..50d7dff65 100644 --- a/src/pkg/go/parser/interface.go +++ b/src/pkg/go/parser/interface.go @@ -26,23 +26,23 @@ func readSource(filename string, src interface{}) ([]byte, os.Error) { if src != nil { switch s := src.(type) { case string: - return strings.Bytes(s), nil; + return strings.Bytes(s), nil case []byte: - return s, nil; + return s, nil case *bytes.Buffer: // is io.Reader, but src is already available in []byte form if s != nil { - return s.Bytes(), nil; + return s.Bytes(), nil } case io.Reader: var buf bytes.Buffer; _, err := io.Copy(&buf, s); if err != nil { - return nil, err; + return nil, err } return buf.Bytes(), nil; default: - return nil, os.ErrorString("invalid source"); + return nil, os.ErrorString("invalid source") } } @@ -58,7 +58,7 @@ func readSource(filename string, src interface{}) ([]byte, os.Error) { func ParseExpr(filename string, src interface{}) (ast.Expr, os.Error) { data, err := readSource(filename, src); if err != nil { - return nil, err; + return nil, err } var p parser; @@ -75,7 +75,7 @@ func ParseExpr(filename string, src interface{}) (ast.Expr, os.Error) { func ParseStmtList(filename string, src interface{}) ([]ast.Stmt, os.Error) { data, err := readSource(filename, src); if err != nil { - return nil, err; + return nil, err } var p parser; @@ -92,7 +92,7 @@ func ParseStmtList(filename string, src interface{}) ([]ast.Stmt, os.Error) { func ParseDeclList(filename string, src interface{}) ([]ast.Decl, os.Error) { data, err := readSource(filename, src); if err != nil { - return nil, err; + return nil, err } var p parser; @@ -122,7 +122,7 @@ func ParseDeclList(filename string, src interface{}) ([]ast.Decl, os.Error) { func ParseFile(filename string, src interface{}, mode uint) (*ast.File, os.Error) { data, err := readSource(filename, src); if err != nil { - return nil, err; + return nil, err } var p parser; @@ -140,16 +140,16 @@ func ParseFile(filename string, src interface{}, mode uint) (*ast.File, os.Error func ParsePkgFile(pkgname, filename string, mode uint) (*ast.File, os.Error) { src, err := io.ReadFile(filename); if err != nil { - return nil, err; + return nil, err } if pkgname != "" { prog, err := ParseFile(filename, src, PackageClauseOnly); if err != nil { - return nil, err; + return nil, err } if prog.Name.Value != pkgname { - return nil, os.NewError(fmt.Sprintf("multiple packages found: %s, %s", prog.Name.Value, pkgname)); + return nil, os.NewError(fmt.Sprintf("multiple packages found: %s, %s", prog.Name.Value, pkgname)) } } @@ -168,13 +168,13 @@ func ParsePkgFile(pkgname, filename string, mode uint) (*ast.File, os.Error) { func ParsePackage(path string, filter func(*os.Dir) bool, mode uint) (*ast.Package, os.Error) { fd, err := os.Open(path, os.O_RDONLY, 0); if err != nil { - return nil, err; + return nil, err } defer fd.Close(); list, err := fd.Readdir(-1); if err != nil { - return nil, err; + return nil, err } name := ""; @@ -184,17 +184,17 @@ func ParsePackage(path string, filter func(*os.Dir) bool, mode uint) (*ast.Packa if filter == nil || filter(entry) { src, err := ParsePkgFile(name, pathutil.Join(path, entry.Name), mode); if err != nil { - return nil, err; + return nil, err } files[entry.Name] = src; if name == "" { - name = src.Name.Value; + name = src.Name.Value } } } if len(files) == 0 { - return nil, os.NewError(path + ": no package found"); + return nil, os.NewError(path + ": no package found") } return &ast.Package{name, path, files}, nil; diff --git a/src/pkg/go/parser/parser.go b/src/pkg/go/parser/parser.go index 53fff192d..5e0c7307b 100644 --- a/src/pkg/go/parser/parser.go +++ b/src/pkg/go/parser/parser.go @@ -69,7 +69,7 @@ type parser struct { // scannerMode returns the scanner mode bits given the parser's mode bits. func scannerMode(mode uint) uint { if mode & ParseComments != 0 { - return scanner.ScanComments; + return scanner.ScanComments } return 0; } @@ -94,7 +94,7 @@ func (p *parser) printTrace(a ...) { fmt.Printf("%5d:%3d: ", p.pos.Line, p.pos.Column); i := 2 * p.indent; for ; i > n; i -= n { - fmt.Print(dots); + fmt.Print(dots) } fmt.Print(dots[0:i]); fmt.Println(a); @@ -125,11 +125,11 @@ func (p *parser) next0() { s := p.tok.String(); switch { case p.tok.IsLiteral(): - p.printTrace(s, string(p.lit)); + p.printTrace(s, string(p.lit)) case p.tok.IsOperator(), p.tok.IsKeyword(): - p.printTrace("\""+s+"\""); + p.printTrace("\""+s+"\"") default: - p.printTrace(s); + p.printTrace(s) } } @@ -146,7 +146,7 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) { if p.lit[1] == '*' { for _, b := range p.lit { if b == '\n' { - endline++; + endline++ } } } @@ -175,15 +175,15 @@ func (p *parser) consumeCommentGroup() int { // convert list group := make([]*ast.Comment, list.Len()); for i := 0; i < list.Len(); i++ { - group[i] = list.At(i).(*ast.Comment); + group[i] = list.At(i).(*ast.Comment) } // add comment group to the comments list g := &ast.CommentGroup{group, nil}; if p.lastComment != nil { - p.lastComment.Next = g; + p.lastComment.Next = g } else { - p.comments = g; + p.comments = g } p.lastComment = g; @@ -220,20 +220,20 @@ func (p *parser) next() { if p.pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. - p.lineComment = p.lastComment; + p.lineComment = p.lastComment } } // consume successor comments, if any endline := -1; for p.tok == token.COMMENT { - endline = p.consumeCommentGroup(); + endline = p.consumeCommentGroup() } if endline >= 0 && endline+1 == p.pos.Line { // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. - p.leadComment = p.lastComment; + p.leadComment = p.lastComment } } } @@ -246,7 +246,7 @@ func (p *parser) errorExpected(pos token.Position, msg string) { // make the error message more specific msg += ", found '" + p.tok.String() + "'"; if p.tok.IsLiteral() { - msg += " "+string(p.lit); + msg += " "+string(p.lit) } } p.Error(pos, msg); @@ -256,7 +256,7 @@ func (p *parser) errorExpected(pos token.Position, msg string) { func (p *parser) expect(tok token.Token) token.Position { pos := p.pos; if p.tok != tok { - p.errorExpected(pos, "'" + tok.String() + "'"); + p.errorExpected(pos, "'" + tok.String() + "'") } p.next(); // make progress in any case return pos; @@ -278,14 +278,14 @@ func close(p *parser) { p.topScope = p.topScope.Outer } func (p *parser) declare(ident *ast.Ident) { if !p.topScope.Declare(ident) { - p.Error(p.pos, "'" + ident.Value + "' declared already"); + p.Error(p.pos, "'" + ident.Value + "' declared already") } } func (p *parser) declareList(idents []*ast.Ident) { for _, ident := range idents { - p.declare(ident); + p.declare(ident) } } @@ -306,7 +306,7 @@ func (p *parser) parseIdent() *ast.Ident { func (p *parser) parseIdentList() []*ast.Ident { if p.trace { - defer un(trace(p, "IdentList")); + defer un(trace(p, "IdentList")) } list := vector.New(0); @@ -319,7 +319,7 @@ func (p *parser) parseIdentList() []*ast.Ident { // convert vector idents := make([]*ast.Ident, list.Len()); for i := 0; i < list.Len(); i++ { - idents[i] = list.At(i).(*ast.Ident); + idents[i] = list.At(i).(*ast.Ident) } return idents; @@ -328,7 +328,7 @@ func (p *parser) parseIdentList() []*ast.Ident { func (p *parser) parseExprList() []ast.Expr { if p.trace { - defer un(trace(p, "ExpressionList")); + defer un(trace(p, "ExpressionList")) } list := vector.New(0); @@ -341,7 +341,7 @@ func (p *parser) parseExprList() []ast.Expr { // convert list exprs := make([]ast.Expr, list.Len()); for i := 0; i < list.Len(); i++ { - exprs[i] = list.At(i).(ast.Expr); + exprs[i] = list.At(i).(ast.Expr) } return exprs; @@ -353,7 +353,7 @@ func (p *parser) parseExprList() []ast.Expr { func (p *parser) parseType() ast.Expr { if p.trace { - defer un(trace(p, "Type")); + defer un(trace(p, "Type")) } typ := p.tryType(); @@ -370,7 +370,7 @@ func (p *parser) parseType() ast.Expr { func (p *parser) parseQualifiedIdent() ast.Expr { if p.trace { - defer un(trace(p, "QualifiedIdent")); + defer un(trace(p, "QualifiedIdent")) } var x ast.Expr = p.parseIdent(); @@ -386,7 +386,7 @@ func (p *parser) parseQualifiedIdent() ast.Expr { func (p *parser) parseTypeName() ast.Expr { if p.trace { - defer un(trace(p, "TypeName")); + defer un(trace(p, "TypeName")) } return p.parseQualifiedIdent(); @@ -395,7 +395,7 @@ func (p *parser) parseTypeName() ast.Expr { func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { if p.trace { - defer un(trace(p, "ArrayType")); + defer un(trace(p, "ArrayType")) } lbrack := p.expect(token.LBRACK); @@ -404,7 +404,7 @@ func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr { len = &ast.Ellipsis{p.pos}; p.next(); } else if p.tok != token.RBRACK { - len = p.parseExpr(); + len = p.parseExpr() } p.expect(token.RBRACK); elt := p.parseType(); @@ -430,7 +430,7 @@ func (p *parser) makeIdentList(list *vector.Vector) []*ast.Ident { func (p *parser) parseFieldDecl() *ast.Field { if p.trace { - defer un(trace(p, "FieldDecl")); + defer un(trace(p, "FieldDecl")) } doc := p.leadComment; @@ -441,9 +441,9 @@ func (p *parser) parseFieldDecl() *ast.Field { // TODO(gri): do not allow ()'s here list.Push(p.parseType()); if p.tok == token.COMMA { - p.next(); + p.next() } else { - break; + break } } @@ -453,19 +453,19 @@ func (p *parser) parseFieldDecl() *ast.Field { // optional tag var tag []*ast.BasicLit; if p.tok == token.STRING { - tag = p.parseStringList(nil); + tag = p.parseStringList(nil) } // analyze case var idents []*ast.Ident; if typ != nil { // IdentifierList Type - idents = p.makeIdentList(list); + idents = p.makeIdentList(list) } else { // Type (anonymous field) if list.Len() == 1 { // TODO(gri): check that this looks like a type - typ = list.At(0).(ast.Expr); + typ = list.At(0).(ast.Expr) } else { p.errorExpected(p.pos, "anonymous field"); typ = &ast.BadExpr{p.pos}; @@ -478,7 +478,7 @@ func (p *parser) parseFieldDecl() *ast.Field { func (p *parser) parseStructType() *ast.StructType { if p.trace { - defer un(trace(p, "StructType")); + defer un(trace(p, "StructType")) } pos := p.expect(token.STRUCT); @@ -487,7 +487,7 @@ func (p *parser) parseStructType() *ast.StructType { for p.tok == token.IDENT || p.tok == token.MUL { f := p.parseFieldDecl(); if p.tok != token.RBRACE { - p.expect(token.SEMICOLON); + p.expect(token.SEMICOLON) } f.Comment = p.lineComment; list.Push(f); @@ -498,7 +498,7 @@ func (p *parser) parseStructType() *ast.StructType { // convert vector fields := make([]*ast.Field, list.Len()); for i := list.Len() - 1; i >= 0; i-- { - fields[i] = list.At(i).(*ast.Field); + fields[i] = list.At(i).(*ast.Field) } return &ast.StructType{pos, lbrace, fields, rbrace, false}; @@ -507,7 +507,7 @@ func (p *parser) parseStructType() *ast.StructType { func (p *parser) parsePointerType() *ast.StarExpr { if p.trace { - defer un(trace(p, "PointerType")); + defer un(trace(p, "PointerType")) } star := p.expect(token.MUL); @@ -523,7 +523,7 @@ func (p *parser) tryParameterType(ellipsisOk bool) ast.Expr { p.next(); if p.tok != token.RPAREN { // "..." always must be at the very end of a parameter list - p.Error(pos, "expected type, found '...'"); + p.Error(pos, "expected type, found '...'") } return &ast.Ellipsis{pos}; } @@ -544,7 +544,7 @@ func (p *parser) parseParameterType(ellipsisOk bool) ast.Expr { func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) { if p.trace { - defer un(trace(p, "ParameterDecl")); + defer un(trace(p, "ParameterDecl")) } // a list of identifiers looks like a list of type names @@ -553,9 +553,9 @@ func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) // TODO(gri): do not allow ()'s here list.Push(p.parseParameterType(ellipsisOk)); if p.tok == token.COMMA { - p.next(); + p.next() } else { - break; + break } } @@ -568,7 +568,7 @@ func (p *parser) parseParameterDecl(ellipsisOk bool) (*vector.Vector, ast.Expr) func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field { if p.trace { - defer un(trace(p, "ParameterList")); + defer un(trace(p, "ParameterList")) } list, typ := p.parseParameterDecl(ellipsisOk); @@ -589,14 +589,14 @@ func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field { // Type { "," Type } (anonymous parameters) // convert list of types into list of *Param for i := 0; i < list.Len(); i++ { - list.Set(i, &ast.Field{Type: list.At(i).(ast.Expr)}); + list.Set(i, &ast.Field{Type: list.At(i).(ast.Expr)}) } } // convert list params := make([]*ast.Field, list.Len()); for i := 0; i < list.Len(); i++ { - params[i] = list.At(i).(*ast.Field); + params[i] = list.At(i).(*ast.Field) } return params; @@ -605,13 +605,13 @@ func (p *parser) parseParameterList(ellipsisOk bool) []*ast.Field { func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field { if p.trace { - defer un(trace(p, "Parameters")); + defer un(trace(p, "Parameters")) } var params []*ast.Field; p.expect(token.LPAREN); if p.tok != token.RPAREN { - params = p.parseParameterList(ellipsisOk); + params = p.parseParameterList(ellipsisOk) } p.expect(token.RPAREN); @@ -621,12 +621,12 @@ func (p *parser) parseParameters(ellipsisOk bool) []*ast.Field { func (p *parser) parseResult() []*ast.Field { if p.trace { - defer un(trace(p, "Result")); + defer un(trace(p, "Result")) } var results []*ast.Field; if p.tok == token.LPAREN { - results = p.parseParameters(false); + results = p.parseParameters(false) } else if p.tok != token.FUNC { typ := p.tryType(); if typ != nil { @@ -641,7 +641,7 @@ func (p *parser) parseResult() []*ast.Field { func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) { if p.trace { - defer un(trace(p, "Signature")); + defer un(trace(p, "Signature")) } params = p.parseParameters(true); @@ -653,7 +653,7 @@ func (p *parser) parseSignature() (params []*ast.Field, results []*ast.Field) { func (p *parser) parseFuncType() *ast.FuncType { if p.trace { - defer un(trace(p, "FuncType")); + defer un(trace(p, "FuncType")) } pos := p.expect(token.FUNC); @@ -665,7 +665,7 @@ func (p *parser) parseFuncType() *ast.FuncType { func (p *parser) parseMethodSpec() *ast.Field { if p.trace { - defer un(trace(p, "MethodSpec")); + defer un(trace(p, "MethodSpec")) } doc := p.leadComment; @@ -679,7 +679,7 @@ func (p *parser) parseMethodSpec() *ast.Field { typ = &ast.FuncType{noPos, params, results}; } else { // embedded interface - typ = x; + typ = x } return &ast.Field{doc, idents, typ, nil, nil}; @@ -688,7 +688,7 @@ func (p *parser) parseMethodSpec() *ast.Field { func (p *parser) parseInterfaceType() *ast.InterfaceType { if p.trace { - defer un(trace(p, "InterfaceType")); + defer un(trace(p, "InterfaceType")) } pos := p.expect(token.INTERFACE); @@ -697,7 +697,7 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType { for p.tok == token.IDENT { m := p.parseMethodSpec(); if p.tok != token.RBRACE { - p.expect(token.SEMICOLON); + p.expect(token.SEMICOLON) } m.Comment = p.lineComment; list.Push(m); @@ -708,7 +708,7 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType { // convert vector methods := make([]*ast.Field, list.Len()); for i := list.Len() - 1; i >= 0; i-- { - methods[i] = list.At(i).(*ast.Field); + methods[i] = list.At(i).(*ast.Field) } return &ast.InterfaceType{pos, lbrace, methods, rbrace, false}; @@ -717,7 +717,7 @@ func (p *parser) parseInterfaceType() *ast.InterfaceType { func (p *parser) parseMapType() *ast.MapType { if p.trace { - defer un(trace(p, "MapType")); + defer un(trace(p, "MapType")) } pos := p.expect(token.MAP); @@ -732,7 +732,7 @@ func (p *parser) parseMapType() *ast.MapType { func (p *parser) parseChanType() *ast.ChanType { if p.trace { - defer un(trace(p, "ChanType")); + defer un(trace(p, "ChanType")) } pos := p.pos; @@ -757,21 +757,21 @@ func (p *parser) parseChanType() *ast.ChanType { func (p *parser) tryRawType(ellipsisOk bool) ast.Expr { switch p.tok { case token.IDENT: - return p.parseTypeName(); + return p.parseTypeName() case token.LBRACK: - return p.parseArrayType(ellipsisOk); + return p.parseArrayType(ellipsisOk) case token.STRUCT: - return p.parseStructType(); + return p.parseStructType() case token.MUL: - return p.parsePointerType(); + return p.parsePointerType() case token.FUNC: - return p.parseFuncType(); + return p.parseFuncType() case token.INTERFACE: - return p.parseInterfaceType(); + return p.parseInterfaceType() case token.MAP: - return p.parseMapType(); + return p.parseMapType() case token.CHAN, token.ARROW: - return p.parseChanType(); + return p.parseChanType() case token.LPAREN: lparen := p.pos; p.next(); @@ -794,7 +794,7 @@ func (p *parser) tryType() ast.Expr { return p.tryRawType(false) } func makeStmtList(list *vector.Vector) []ast.Stmt { stats := make([]ast.Stmt, list.Len()); for i := 0; i < list.Len(); i++ { - stats[i] = list.At(i).(ast.Stmt); + stats[i] = list.At(i).(ast.Stmt) } return stats; } @@ -802,7 +802,7 @@ func makeStmtList(list *vector.Vector) []ast.Stmt { func (p *parser) parseStmtList() []ast.Stmt { if p.trace { - defer un(trace(p, "StatementList")); + defer un(trace(p, "StatementList")) } list := vector.New(0); @@ -814,11 +814,11 @@ func (p *parser) parseStmtList() []ast.Stmt { } list.Push(p.parseStmt()); if p.tok == token.SEMICOLON { - p.next(); + p.next() } else if p.optSemi { - p.optSemi = false; // "consume" optional semicolon + p.optSemi = false // "consume" optional semicolon } else { - expectSemi = true; + expectSemi = true } } @@ -828,7 +828,7 @@ func (p *parser) parseStmtList() []ast.Stmt { func (p *parser) parseBlockStmt(idents []*ast.Ident) *ast.BlockStmt { if p.trace { - defer un(trace(p, "BlockStmt")); + defer un(trace(p, "BlockStmt")) } defer close(openScope(p)); @@ -847,12 +847,12 @@ func (p *parser) parseBlockStmt(idents []*ast.Ident) *ast.BlockStmt { func (p *parser) parseStringList(x *ast.BasicLit) []*ast.BasicLit { if p.trace { - defer un(trace(p, "StringList")); + defer un(trace(p, "StringList")) } list := vector.New(0); if x != nil { - list.Push(x); + list.Push(x) } for p.tok == token.STRING { @@ -863,7 +863,7 @@ func (p *parser) parseStringList(x *ast.BasicLit) []*ast.BasicLit { // convert list strings := make([]*ast.BasicLit, list.Len()); for i := 0; i < list.Len(); i++ { - strings[i] = list.At(i).(*ast.BasicLit); + strings[i] = list.At(i).(*ast.BasicLit) } return strings; @@ -872,13 +872,13 @@ func (p *parser) parseStringList(x *ast.BasicLit) []*ast.BasicLit { func (p *parser) parseFuncTypeOrLit() ast.Expr { if p.trace { - defer un(trace(p, "FuncTypeOrLit")); + defer un(trace(p, "FuncTypeOrLit")) } typ := p.parseFuncType(); if p.tok != token.LBRACE { // function type only - return typ; + return typ } p.exprLev++; @@ -895,18 +895,18 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr { // func (p *parser) parseOperand() ast.Expr { if p.trace { - defer un(trace(p, "Operand")); + defer un(trace(p, "Operand")) } switch p.tok { case token.IDENT: - return p.parseIdent(); + return p.parseIdent() case token.INT, token.FLOAT, token.CHAR, token.STRING: x := &ast.BasicLit{p.pos, p.tok, p.lit}; p.next(); if p.tok == token.STRING && p.tok == token.STRING { - return &ast.StringList{p.parseStringList(x)}; + return &ast.StringList{p.parseStringList(x)} } return x; @@ -920,12 +920,12 @@ func (p *parser) parseOperand() ast.Expr { return &ast.ParenExpr{lparen, x, rparen}; case token.FUNC: - return p.parseFuncTypeOrLit(); + return p.parseFuncTypeOrLit() default: t := p.tryRawType(true); // could be type for composite literal or conversion if t != nil { - return t; + return t } } @@ -937,7 +937,7 @@ func (p *parser) parseOperand() ast.Expr { func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr { if p.trace { - defer un(trace(p, "SelectorOrTypeAssertion")); + defer un(trace(p, "SelectorOrTypeAssertion")) } p.expect(token.PERIOD); @@ -952,9 +952,9 @@ func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr { var typ ast.Expr; if p.tok == token.TYPE { // type switch: typ == nil - p.next(); + p.next() } else { - typ = p.parseType(); + typ = p.parseType() } p.expect(token.RPAREN); @@ -964,7 +964,7 @@ func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr { func (p *parser) parseIndex(x ast.Expr) ast.Expr { if p.trace { - defer un(trace(p, "Index")); + defer un(trace(p, "Index")) } p.expect(token.LBRACK); @@ -984,13 +984,13 @@ func (p *parser) parseIndex(x ast.Expr) ast.Expr { func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { if p.trace { - defer un(trace(p, "CallOrConversion")); + defer un(trace(p, "CallOrConversion")) } lparen := p.expect(token.LPAREN); var args []ast.Expr; if p.tok != token.RPAREN { - args = p.parseExprList(); + args = p.parseExprList() } rparen := p.expect(token.RPAREN); @@ -1000,7 +1000,7 @@ func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr { func (p *parser) parseElement() ast.Expr { if p.trace { - defer un(trace(p, "Element")); + defer un(trace(p, "Element")) } x := p.parseExpr(); @@ -1016,23 +1016,23 @@ func (p *parser) parseElement() ast.Expr { func (p *parser) parseElementList() []ast.Expr { if p.trace { - defer un(trace(p, "ElementList")); + defer un(trace(p, "ElementList")) } list := vector.New(0); for p.tok != token.RBRACE && p.tok != token.EOF { list.Push(p.parseElement()); if p.tok == token.COMMA { - p.next(); + p.next() } else { - break; + break } } // convert list elts := make([]ast.Expr, list.Len()); for i := 0; i < list.Len(); i++ { - elts[i] = list.At(i).(ast.Expr); + elts[i] = list.At(i).(ast.Expr) } return elts; @@ -1041,13 +1041,13 @@ func (p *parser) parseElementList() []ast.Expr { func (p *parser) parseCompositeLit(typ ast.Expr) ast.Expr { if p.trace { - defer un(trace(p, "CompositeLit")); + defer un(trace(p, "CompositeLit")) } lbrace := p.expect(token.LBRACE); var elts []ast.Expr; if p.tok != token.RBRACE { - elts = p.parseElementList(); + elts = p.parseElementList() } rbrace := p.expect(token.RBRACE); return &ast.CompositeLit{typ, lbrace, elts, rbrace}; @@ -1103,11 +1103,11 @@ func isTypeName(x ast.Expr) bool { case *ast.BadExpr: case *ast.Ident: case *ast.ParenExpr: - return isTypeName(t.X); // TODO(gri): should (TypeName) be illegal? + return isTypeName(t.X) // TODO(gri): should (TypeName) be illegal? case *ast.SelectorExpr: - return isTypeName(t.X); + return isTypeName(t.X) default: - return false; // all other nodes are not type names + return false // all other nodes are not type names } return true; } @@ -1120,14 +1120,14 @@ func isCompositeLitType(x ast.Expr) bool { case *ast.BadExpr: case *ast.Ident: case *ast.ParenExpr: - return isCompositeLitType(t.X); + return isCompositeLitType(t.X) case *ast.SelectorExpr: - return isTypeName(t.X); + return isTypeName(t.X) case *ast.ArrayType: case *ast.StructType: case *ast.MapType: default: - return false; // all other nodes are not legal composite literal types + return false // all other nodes are not legal composite literal types } return true; } @@ -1159,26 +1159,26 @@ func (p *parser) checkExprOrType(x ast.Expr) ast.Expr { func (p *parser) parsePrimaryExpr() ast.Expr { if p.trace { - defer un(trace(p, "PrimaryExpr")); + defer un(trace(p, "PrimaryExpr")) } x := p.parseOperand(); L: for { switch p.tok { case token.PERIOD: - x = p.parseSelectorOrTypeAssertion(p.checkExpr(x)); + x = p.parseSelectorOrTypeAssertion(p.checkExpr(x)) case token.LBRACK: - x = p.parseIndex(p.checkExpr(x)); + x = p.parseIndex(p.checkExpr(x)) case token.LPAREN: - x = p.parseCallOrConversion(p.checkExprOrType(x)); + x = p.parseCallOrConversion(p.checkExprOrType(x)) case token.LBRACE: if isCompositeLitType(x) && (p.exprLev >= 0 || !isTypeName(x)) { - x = p.parseCompositeLit(x); + x = p.parseCompositeLit(x) } else { - break L; + break L } default: - break L; + break L } } @@ -1188,7 +1188,7 @@ L: for { func (p *parser) parseUnaryExpr() ast.Expr { if p.trace { - defer un(trace(p, "UnaryExpr")); + defer un(trace(p, "UnaryExpr")) } switch p.tok { @@ -1212,7 +1212,7 @@ func (p *parser) parseUnaryExpr() ast.Expr { func (p *parser) parseBinaryExpr(prec1 int) ast.Expr { if p.trace { - defer un(trace(p, "BinaryExpr")); + defer un(trace(p, "BinaryExpr")) } x := p.parseUnaryExpr(); @@ -1233,7 +1233,7 @@ func (p *parser) parseBinaryExpr(prec1 int) ast.Expr { // should reject when a type/raw type is obviously not allowed func (p *parser) parseExpr() ast.Expr { if p.trace { - defer un(trace(p, "Expression")); + defer un(trace(p, "Expression")) } return p.parseBinaryExpr(token.LowestPrec + 1); @@ -1246,7 +1246,7 @@ func (p *parser) parseExpr() ast.Expr { func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { if p.trace { - defer un(trace(p, "SimpleStmt")); + defer un(trace(p, "SimpleStmt")) } x := p.parseExprList(); @@ -1257,7 +1257,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { p.next(); if labelOk && len(x) == 1 { if label, isIdent := x[0].(*ast.Ident); isIdent { - return &ast.LabeledStmt{label, p.parseStmt()}; + return &ast.LabeledStmt{label, p.parseStmt()} } } p.Error(x[0].Pos(), "illegal label declaration"); @@ -1273,13 +1273,13 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { p.next(); y := p.parseExprList(); if len(x) > 1 && len(y) > 1 && len(x) != len(y) { - p.Error(x[0].Pos(), "arity of lhs doesn't match rhs"); + p.Error(x[0].Pos(), "arity of lhs doesn't match rhs") } return &ast.AssignStmt{x, pos, tok, y}; } if len(x) > 1 { - p.Error(x[0].Pos(), "only one expression allowed"); + p.Error(x[0].Pos(), "only one expression allowed") // continue with first expression } @@ -1298,7 +1298,7 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { func (p *parser) parseCallExpr() *ast.CallExpr { x := p.parseExpr(); if call, isCall := x.(*ast.CallExpr); isCall { - return call; + return call } p.errorExpected(x.Pos(), "function/method call"); return nil; @@ -1307,13 +1307,13 @@ func (p *parser) parseCallExpr() *ast.CallExpr { func (p *parser) parseGoStmt() ast.Stmt { if p.trace { - defer un(trace(p, "GoStmt")); + defer un(trace(p, "GoStmt")) } pos := p.expect(token.GO); call := p.parseCallExpr(); if call != nil { - return &ast.GoStmt{pos, call}; + return &ast.GoStmt{pos, call} } return &ast.BadStmt{pos}; } @@ -1321,13 +1321,13 @@ func (p *parser) parseGoStmt() ast.Stmt { func (p *parser) parseDeferStmt() ast.Stmt { if p.trace { - defer un(trace(p, "DeferStmt")); + defer un(trace(p, "DeferStmt")) } pos := p.expect(token.DEFER); call := p.parseCallExpr(); if call != nil { - return &ast.DeferStmt{pos, call}; + return &ast.DeferStmt{pos, call} } return &ast.BadStmt{pos}; } @@ -1335,14 +1335,14 @@ func (p *parser) parseDeferStmt() ast.Stmt { func (p *parser) parseReturnStmt() *ast.ReturnStmt { if p.trace { - defer un(trace(p, "ReturnStmt")); + defer un(trace(p, "ReturnStmt")) } pos := p.pos; p.expect(token.RETURN); var x []ast.Expr; if p.tok != token.SEMICOLON && p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE { - x = p.parseExprList(); + x = p.parseExprList() } return &ast.ReturnStmt{pos, x}; @@ -1351,13 +1351,13 @@ func (p *parser) parseReturnStmt() *ast.ReturnStmt { func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { if p.trace { - defer un(trace(p, "BranchStmt")); + defer un(trace(p, "BranchStmt")) } s := &ast.BranchStmt{p.pos, tok, nil}; p.expect(tok); if tok != token.FALLTHROUGH && p.tok == token.IDENT { - s.Label = p.parseIdent(); + s.Label = p.parseIdent() } return s; @@ -1366,10 +1366,10 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { func (p *parser) makeExpr(s ast.Stmt) ast.Expr { if s == nil { - return nil; + return nil } if es, isExpr := s.(*ast.ExprStmt); isExpr { - return p.checkExpr(es.X); + return p.checkExpr(es.X) } p.Error(s.Pos(), "expected condition, found simple statement"); return &ast.BadExpr{s.Pos()}; @@ -1382,22 +1382,22 @@ func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) { p.exprLev = -1; if p.tok != token.SEMICOLON { - s1 = p.parseSimpleStmt(false); + s1 = p.parseSimpleStmt(false) } if p.tok == token.SEMICOLON { p.next(); if p.tok != token.LBRACE && p.tok != token.SEMICOLON { - s2 = p.parseSimpleStmt(false); + s2 = p.parseSimpleStmt(false) } if isForStmt { // for statements have a 3rd section p.expect(token.SEMICOLON); if p.tok != token.LBRACE { - s3 = p.parseSimpleStmt(false); + s3 = p.parseSimpleStmt(false) } } } else { - s1, s2 = nil, s1; + s1, s2 = nil, s1 } p.exprLev = prevLev; @@ -1409,7 +1409,7 @@ func (p *parser) parseControlClause(isForStmt bool) (s1, s2, s3 ast.Stmt) { func (p *parser) parseIfStmt() *ast.IfStmt { if p.trace { - defer un(trace(p, "IfStmt")); + defer un(trace(p, "IfStmt")) } // IfStmt block @@ -1430,7 +1430,7 @@ func (p *parser) parseIfStmt() *ast.IfStmt { func (p *parser) parseCaseClause() *ast.CaseClause { if p.trace { - defer un(trace(p, "CaseClause")); + defer un(trace(p, "CaseClause")) } // CaseClause block @@ -1443,7 +1443,7 @@ func (p *parser) parseCaseClause() *ast.CaseClause { p.next(); x = p.parseExprList(); } else { - p.expect(token.DEFAULT); + p.expect(token.DEFAULT) } colon := p.expect(token.COLON); @@ -1455,7 +1455,7 @@ func (p *parser) parseCaseClause() *ast.CaseClause { func (p *parser) parseTypeList() []ast.Expr { if p.trace { - defer un(trace(p, "TypeList")); + defer un(trace(p, "TypeList")) } list := vector.New(0); @@ -1468,7 +1468,7 @@ func (p *parser) parseTypeList() []ast.Expr { // convert list exprs := make([]ast.Expr, list.Len()); for i := 0; i < list.Len(); i++ { - exprs[i] = list.At(i).(ast.Expr); + exprs[i] = list.At(i).(ast.Expr) } return exprs; @@ -1477,7 +1477,7 @@ func (p *parser) parseTypeList() []ast.Expr { func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause { if p.trace { - defer un(trace(p, "TypeCaseClause")); + defer un(trace(p, "TypeCaseClause")) } // TypeCaseClause block @@ -1490,7 +1490,7 @@ func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause { p.next(); types = p.parseTypeList(); } else { - p.expect(token.DEFAULT); + p.expect(token.DEFAULT) } colon := p.expect(token.COLON); @@ -1502,11 +1502,11 @@ func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause { func isExprSwitch(s ast.Stmt) bool { if s == nil { - return true; + return true } if e, ok := s.(*ast.ExprStmt); ok { if a, ok := e.X.(*ast.TypeAssertExpr); ok { - return a.Type != nil; // regular type assertion + return a.Type != nil // regular type assertion } return true; } @@ -1516,7 +1516,7 @@ func isExprSwitch(s ast.Stmt) bool { func (p *parser) parseSwitchStmt() ast.Stmt { if p.trace { - defer un(trace(p, "SwitchStmt")); + defer un(trace(p, "SwitchStmt")) } // SwitchStmt block @@ -1529,7 +1529,7 @@ func (p *parser) parseSwitchStmt() ast.Stmt { lbrace := p.expect(token.LBRACE); cases := vector.New(0); for p.tok == token.CASE || p.tok == token.DEFAULT { - cases.Push(p.parseCaseClause()); + cases.Push(p.parseCaseClause()) } rbrace := p.expect(token.RBRACE); p.optSemi = true; @@ -1542,7 +1542,7 @@ func (p *parser) parseSwitchStmt() ast.Stmt { lbrace := p.expect(token.LBRACE); cases := vector.New(0); for p.tok == token.CASE || p.tok == token.DEFAULT { - cases.Push(p.parseTypeCaseClause()); + cases.Push(p.parseTypeCaseClause()) } rbrace := p.expect(token.RBRACE); p.optSemi = true; @@ -1553,7 +1553,7 @@ func (p *parser) parseSwitchStmt() ast.Stmt { func (p *parser) parseCommClause() *ast.CommClause { if p.trace { - defer un(trace(p, "CommClause")); + defer un(trace(p, "CommClause")) } // CommClause block @@ -1567,7 +1567,7 @@ func (p *parser) parseCommClause() *ast.CommClause { p.next(); if p.tok == token.ARROW { // RecvExpr without assignment - rhs = p.parseExpr(); + rhs = p.parseExpr() } else { // SendExpr or RecvExpr rhs = p.parseExpr(); @@ -1577,15 +1577,15 @@ func (p *parser) parseCommClause() *ast.CommClause { p.next(); lhs = rhs; if p.tok == token.ARROW { - rhs = p.parseExpr(); + rhs = p.parseExpr() } else { - p.expect(token.ARROW); // use expect() error handling + p.expect(token.ARROW) // use expect() error handling } } // else SendExpr } } else { - p.expect(token.DEFAULT); + p.expect(token.DEFAULT) } colon := p.expect(token.COLON); @@ -1597,14 +1597,14 @@ func (p *parser) parseCommClause() *ast.CommClause { func (p *parser) parseSelectStmt() *ast.SelectStmt { if p.trace { - defer un(trace(p, "SelectStmt")); + defer un(trace(p, "SelectStmt")) } pos := p.expect(token.SELECT); lbrace := p.expect(token.LBRACE); cases := vector.New(0); for p.tok == token.CASE || p.tok == token.DEFAULT { - cases.Push(p.parseCommClause()); + cases.Push(p.parseCommClause()) } rbrace := p.expect(token.RBRACE); p.optSemi = true; @@ -1616,7 +1616,7 @@ func (p *parser) parseSelectStmt() *ast.SelectStmt { func (p *parser) parseForStmt() ast.Stmt { if p.trace { - defer un(trace(p, "ForStmt")); + defer un(trace(p, "ForStmt")) } // ForStmt block @@ -1639,7 +1639,7 @@ func (p *parser) parseForStmt() ast.Stmt { value = as.Lhs[1]; fallthrough; case 1: - key = as.Lhs[0]; + key = as.Lhs[0] default: p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions"); return &ast.BadStmt{pos}; @@ -1651,14 +1651,14 @@ func (p *parser) parseForStmt() ast.Stmt { } if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { // rhs is range expression; check lhs - return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}; + return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body} } else { p.errorExpected(s2.Pos(), "range clause"); return &ast.BadStmt{pos}; } } else { // regular for statement - return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}; + return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body} } panic(); // unreachable @@ -1668,7 +1668,7 @@ func (p *parser) parseForStmt() ast.Stmt { func (p *parser) parseStmt() ast.Stmt { if p.trace { - defer un(trace(p, "Statement")); + defer un(trace(p, "Statement")) } switch p.tok { @@ -1680,28 +1680,28 @@ func (p *parser) parseStmt() ast.Stmt { token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand token.LBRACK, token.STRUCT, // composite type token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators - return p.parseSimpleStmt(true); + return p.parseSimpleStmt(true) case token.GO: - return p.parseGoStmt(); + return p.parseGoStmt() case token.DEFER: - return p.parseDeferStmt(); + return p.parseDeferStmt() case token.RETURN: - return p.parseReturnStmt(); + return p.parseReturnStmt() case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH: - return p.parseBranchStmt(p.tok); + return p.parseBranchStmt(p.tok) case token.LBRACE: - return p.parseBlockStmt(nil); + return p.parseBlockStmt(nil) case token.IF: - return p.parseIfStmt(); + return p.parseIfStmt() case token.SWITCH: - return p.parseSwitchStmt(); + return p.parseSwitchStmt() case token.SELECT: - return p.parseSelectStmt(); + return p.parseSelectStmt() case token.FOR: - return p.parseForStmt(); + return p.parseForStmt() case token.SEMICOLON, token.RBRACE: // don't consume the ";", it is the separator following the empty statement - return &ast.EmptyStmt{p.pos}; + return &ast.EmptyStmt{p.pos} } // no statement found @@ -1731,7 +1731,7 @@ func (p *parser) parseComment(getSemi bool) (comment *ast.CommentGroup, gotSemi func parseImportSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) { if p.trace { - defer un(trace(p, "ImportSpec")); + defer un(trace(p, "ImportSpec")) } var ident *ast.Ident; @@ -1739,14 +1739,14 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.S ident = &ast.Ident{p.pos, "."}; p.next(); } else if p.tok == token.IDENT { - ident = p.parseIdent(); + ident = p.parseIdent() } var path []*ast.BasicLit; if p.tok == token.STRING { - path = p.parseStringList(nil); + path = p.parseStringList(nil) } else { - p.expect(token.STRING); // use expect() error handling + p.expect(token.STRING) // use expect() error handling } comment, gotSemi := p.parseComment(getSemi); @@ -1757,7 +1757,7 @@ func parseImportSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.S func parseConstSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) { if p.trace { - defer un(trace(p, "ConstSpec")); + defer un(trace(p, "ConstSpec")) } idents := p.parseIdentList(); @@ -1775,7 +1775,7 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Sp func parseTypeSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) { if p.trace { - defer un(trace(p, "TypeSpec")); + defer un(trace(p, "TypeSpec")) } ident := p.parseIdent(); @@ -1788,7 +1788,7 @@ func parseTypeSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spe func parseVarSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec, gotSemi bool) { if p.trace { - defer un(trace(p, "VarSpec")); + defer un(trace(p, "VarSpec")) } idents := p.parseIdentList(); @@ -1806,7 +1806,7 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup, getSemi bool) (spec ast.Spec func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi bool) (decl *ast.GenDecl, gotSemi bool) { if p.trace { - defer un(trace(p, keyword.String() + "Decl")); + defer un(trace(p, keyword.String() + "Decl")) } doc := p.leadComment; @@ -1821,7 +1821,7 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi spec, semi := f(p, doc, true); // consume semicolon if any list.Push(spec); if !semi { - break; + break } } rparen = p.expect(token.RPAREN); @@ -1830,7 +1830,7 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi p.next(); gotSemi = true; } else { - p.optSemi = true; + p.optSemi = true } } else { spec, semi := f(p, nil, getSemi); @@ -1841,7 +1841,7 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi // convert vector specs := make([]ast.Spec, list.Len()); for i := 0; i < list.Len(); i++ { - specs[i] = list.At(i).(ast.Spec); + specs[i] = list.At(i).(ast.Spec) } return &ast.GenDecl{doc, pos, keyword, lparen, specs, rparen}, gotSemi; @@ -1850,7 +1850,7 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction, getSemi func (p *parser) parseReceiver() *ast.Field { if p.trace { - defer un(trace(p, "Receiver")); + defer un(trace(p, "Receiver")) } pos := p.pos; @@ -1867,10 +1867,10 @@ func (p *parser) parseReceiver() *ast.Field { // recv type must be TypeName or *TypeName base := recv.Type; if ptr, isPtr := base.(*ast.StarExpr); isPtr { - base = ptr.X; + base = ptr.X } if !isTypeName(base) { - p.errorExpected(base.Pos(), "type name"); + p.errorExpected(base.Pos(), "type name") } return recv; @@ -1879,7 +1879,7 @@ func (p *parser) parseReceiver() *ast.Field { func (p *parser) parseFunctionDecl() *ast.FuncDecl { if p.trace { - defer un(trace(p, "FunctionDecl")); + defer un(trace(p, "FunctionDecl")) } doc := p.leadComment; @@ -1887,7 +1887,7 @@ func (p *parser) parseFunctionDecl() *ast.FuncDecl { var recv *ast.Field; if p.tok == token.LPAREN { - recv = p.parseReceiver(); + recv = p.parseReceiver() } ident := p.parseIdent(); @@ -1895,7 +1895,7 @@ func (p *parser) parseFunctionDecl() *ast.FuncDecl { var body *ast.BlockStmt; if p.tok == token.LBRACE { - body = p.parseBlockStmt(nil); + body = p.parseBlockStmt(nil) } return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}; @@ -1904,19 +1904,19 @@ func (p *parser) parseFunctionDecl() *ast.FuncDecl { func (p *parser) parseDecl(getSemi bool) (decl ast.Decl, gotSemi bool) { if p.trace { - defer un(trace(p, "Declaration")); + defer un(trace(p, "Declaration")) } var f parseSpecFunction; switch p.tok { case token.CONST: - f = parseConstSpec; + f = parseConstSpec case token.TYPE: - f = parseTypeSpec; + f = parseTypeSpec case token.VAR: - f = parseVarSpec; + f = parseVarSpec case token.FUNC: decl = p.parseFunctionDecl(); @@ -1938,7 +1938,7 @@ func (p *parser) parseDecl(getSemi bool) (decl ast.Decl, gotSemi bool) { func (p *parser) parseDeclList() []ast.Decl { if p.trace { - defer un(trace(p, "DeclList")); + defer un(trace(p, "DeclList")) } list := vector.New(0); @@ -1950,7 +1950,7 @@ func (p *parser) parseDeclList() []ast.Decl { // convert vector decls := make([]ast.Decl, list.Len()); for i := 0; i < list.Len(); i++ { - decls[i] = list.At(i).(ast.Decl); + decls[i] = list.At(i).(ast.Decl) } return decls; @@ -1962,7 +1962,7 @@ func (p *parser) parseDeclList() []ast.Decl { func (p *parser) parseFile() *ast.File { if p.trace { - defer un(trace(p, "File")); + defer un(trace(p, "File")) } // file block @@ -1996,7 +1996,7 @@ func (p *parser) parseFile() *ast.File { // convert declaration list decls = make([]ast.Decl, list.Len()); for i := 0; i < list.Len(); i++ { - decls[i] = list.At(i).(ast.Decl); + decls[i] = list.At(i).(ast.Decl) } } diff --git a/src/pkg/go/parser/parser_test.go b/src/pkg/go/parser/parser_test.go index 257ef9a7f..10a750c59 100644 --- a/src/pkg/go/parser/parser_test.go +++ b/src/pkg/go/parser/parser_test.go @@ -22,7 +22,7 @@ func TestParseIllegalInputs(t *testing.T) { for _, src := range illegalInputs { _, err := ParseFile("", src, 0); if err == nil { - t.Errorf("ParseFile(%v) should have failed", src); + t.Errorf("ParseFile(%v) should have failed", src) } } } @@ -38,7 +38,7 @@ func TestParseValidPrograms(t *testing.T) { for _, src := range validPrograms { _, err := ParseFile("", src, 0); if err != nil { - t.Errorf("ParseFile(%q): %v", src, err); + t.Errorf("ParseFile(%q): %v", src, err) } } } @@ -54,7 +54,7 @@ func TestParse3(t *testing.T) { for _, filename := range validFiles { _, err := ParseFile(filename, nil, 0); if err != nil { - t.Errorf("ParseFile(%s): %v", filename, err); + t.Errorf("ParseFile(%s): %v", filename, err) } } } @@ -66,7 +66,7 @@ func nameFilter(filename string) bool { case "interface.go": case "parser_test.go": default: - return false; + return false } return true; } @@ -79,14 +79,14 @@ func TestParse4(t *testing.T) { path := "."; pkg, err := ParsePackage(path, dirFilter, 0); if err != nil { - t.Fatalf("ParsePackage(%s): %v", path, err); + t.Fatalf("ParsePackage(%s): %v", path, err) } if pkg.Name != "parser" { - t.Errorf("incorrect package name: %s", pkg.Name); + t.Errorf("incorrect package name: %s", pkg.Name) } for filename, _ := range pkg.Files { if !nameFilter(filename) { - t.Errorf("unexpected package file: %s", filename); + t.Errorf("unexpected package file: %s", filename) } } } diff --git a/src/pkg/go/printer/nodes.go b/src/pkg/go/printer/nodes.go index b89bb2b0c..639779690 100644 --- a/src/pkg/go/printer/nodes.go +++ b/src/pkg/go/printer/nodes.go @@ -19,7 +19,7 @@ import ( // Disabled formatting - enable eventually and remove the flag. const ( compositeLitBlank = false; - fewerSemis = false; + fewerSemis = true; stringListMode = exprListMode(0); // previously: noIndent ) @@ -49,9 +49,9 @@ func (p *printer) linebreak(line, min, max int, ws whiteSpace, newSection bool) n := line - p.pos.Line; switch { case n < min: - n = min; + n = min case n > max: - n = max; + n = max } if n > 0 { p.print(ws); @@ -81,7 +81,7 @@ func (p *printer) commentList(list []*ast.Comment) { p.print(c.Pos(), t); if t[1] == '/' && i+1 < len(list) { //-style comment which is not at the end; print a newline - p.print(newline); + p.print(newline) } } } @@ -114,7 +114,7 @@ func (p *printer) identList(list []*ast.Ident, multiLine *bool) { // convert into an expression list xlist := make([]ast.Expr, len(list)); for i, x := range list { - xlist[i] = x; + xlist[i] = x } p.exprList(noPos, xlist, commaSep, multiLine); } @@ -125,7 +125,7 @@ func (p *printer) stringList(list []*ast.BasicLit, multiLine *bool) { // convert into an expression list xlist := make([]ast.Expr, len(list)); for i, x := range list { - xlist[i] = x; + xlist[i] = x } p.exprList(noPos, xlist, stringListMode, multiLine); } @@ -148,11 +148,11 @@ const ( // lines. func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMode, multiLine *bool) { if len(list) == 0 { - return; + return } if mode & blankStart != 0 { - p.print(blank); + p.print(blank) } // TODO(gri): endLine may be incorrect as it is really the beginning @@ -166,14 +166,14 @@ func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMo for i, x := range list { if i > 0 { if mode&commaSep != 0 { - p.print(token.COMMA); + p.print(token.COMMA) } p.print(blank); } p.expr(x, multiLine); } if mode&blankEnd != 0 { - p.print(blank); + p.print(blank) } return; } @@ -185,7 +185,7 @@ func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMo // i.e., pretend that the first line is already indented ws := ignore; if mode&noIndent == 0 { - ws = indent; + ws = indent } if prev.IsValid() && prev.Line < line && p.linebreak(line, 1, 2, ws, true) { @@ -198,7 +198,7 @@ func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMo line = x.Pos().Line; if i > 0 { if mode&commaSep != 0 { - p.print(token.COMMA); + p.print(token.COMMA) } if prev < line { if p.linebreak(line, 1, 2, ws, true) { @@ -206,7 +206,7 @@ func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMo *multiLine = true; } } else { - p.print(blank); + p.print(blank) } } p.expr(x, multiLine); @@ -216,19 +216,19 @@ func (p *printer) exprList(prev token.Position, list []ast.Expr, mode exprListMo p.print(token.COMMA); if ws == ignore && mode&noIndent == 0 { // unindent if we indented - p.print(unindent); + p.print(unindent) } p.print(formfeed); // terminating comma needs a line break to look good return; } if mode&blankEnd != 0 { - p.print(blank); + p.print(blank) } if ws == ignore && mode&noIndent == 0 { // unindent if we indented - p.print(unindent); + p.print(unindent) } } @@ -239,7 +239,7 @@ func (p *printer) parameters(list []*ast.Field, multiLine *bool) { if len(list) > 0 { for i, par := range list { if i > 0 { - p.print(token.COMMA, blank); + p.print(token.COMMA, blank) } if len(par.Names) > 0 { p.identList(par.Names, multiLine); @@ -277,13 +277,13 @@ func (p *printer) signature(params, result []*ast.Field, multiLine *bool) (optSe func identListSize(list []*ast.Ident, maxSize int) (size int) { for i, x := range list { if i > 0 { - size += 2 // ", " - ; + size += 2 // ", " + } size += len(x.Value); if size >= maxSize { - break; + break } } return; @@ -292,18 +292,18 @@ func identListSize(list []*ast.Ident, maxSize int) (size int) { func (p *printer) isOneLineFieldList(list []*ast.Field) bool { if len(list) != 1 { - return false; // allow only one field + return false // allow only one field } f := list[0]; if f.Tag != nil || f.Comment != nil { - return false; // don't allow tags or comments + return false // don't allow tags or comments } // only name(s) and type const maxSize = 30; // adjust as appropriate, this is an approximate value namesSize := identListSize(f.Names, maxSize); if namesSize > 0 { - namesSize = 1 // blank between names and types - ; + namesSize = 1 // blank between names and types + } typeSize := p.nodeSize(f.Type, maxSize); @@ -326,12 +326,12 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok f := list[0]; for i, x := range f.Names { if i > 0 { - p.print(token.COMMA, blank); + p.print(token.COMMA, blank) } p.expr(x, ignoreMultiLine); } if len(f.Names) > 0 { - p.print(blank); + p.print(blank) } p.expr(f.Type, ignoreMultiLine); p.print(blank, rbrace, token.RBRACE); @@ -345,12 +345,12 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok sep := vtab; if len(list) == 1 { - sep = blank; + sep = blank } var ml bool; for i, f := range list { if i > 0 { - p.linebreak(f.Pos().Line, 1, 2, ignore, ml); + p.linebreak(f.Pos().Line, 1, 2, ignore, ml) } ml = false; extraTabs := 0; @@ -368,7 +368,7 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok } if f.Tag != nil { if len(f.Names) > 0 && sep == vtab { - p.print(sep); + p.print(sep) } p.print(sep); p.expr(&ast.StringList{f.Tag}, &ml); @@ -377,14 +377,14 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok p.print(token.SEMICOLON); if f.Comment != nil { for ; extraTabs > 0; extraTabs-- { - p.print(vtab); + p.print(vtab) } p.lineComment(f.Comment); } } if isIncomplete { if len(list) > 0 { - p.print(formfeed); + p.print(formfeed) } // TODO(gri): this needs to be styled like normal comments p.print("// contains unexported fields"); @@ -395,7 +395,7 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok var ml bool; for i, f := range list { if i > 0 { - p.linebreak(f.Pos().Line, 1, 2, ignore, ml); + p.linebreak(f.Pos().Line, 1, 2, ignore, ml) } ml = false; p.leadComment(f.Doc); @@ -405,14 +405,14 @@ func (p *printer) fieldList(lbrace token.Position, list []*ast.Field, rbrace tok p.signature(ftyp.Params, ftyp.Results, &ml); } else { // embedded interface - p.expr(f.Type, &ml); + p.expr(f.Type, &ml) } p.print(token.SEMICOLON); p.lineComment(f.Comment); } if isIncomplete { if len(list) > 0 { - p.print(formfeed); + p.print(formfeed) } // TODO(gri): this needs to be styled like normal comments p.print("// contains unexported methods"); @@ -439,20 +439,20 @@ func needsBlanks(expr ast.Expr) bool { switch x := expr.(type) { case *ast.Ident: // "long" identifiers look better with blanks around them - return len(x.Value) > 8; + return len(x.Value) > 8 case *ast.BasicLit: // "long" literals look better with blanks around them - return len(x.Value) > 8; + return len(x.Value) > 8 case *ast.ParenExpr: // parenthesized expressions don't need blanks around them - return false; + return false case *ast.IndexExpr: // index expressions don't need blanks if the indexed expressions are simple - return needsBlanks(x.X); + return needsBlanks(x.X) case *ast.CallExpr: // call expressions need blanks if they have more than one // argument or if the function expression needs blanks - return len(x.Args) > 1 || needsBlanks(x.Fun); + return len(x.Args) > 1 || needsBlanks(x.Fun) } return true; } @@ -487,16 +487,16 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1 int, multiLine *bool) { prev := line; line = x.Y.Pos().Line; if needsBlanks(x.Y) || prev != line { - printBlanks = true; + printBlanks = true } } else { - break; + break } } prev := line; line = x.X.Pos().Line; if needsBlanks(x.X) || prev != line { - printBlanks = true; + printBlanks = true } // Print collected operations left-to-right, with blanks if necessary. @@ -516,18 +516,18 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1 int, multiLine *bool) { *multiLine = true; } } else { - p.print(blank, x.OpPos, x.Op, blank); + p.print(blank, x.OpPos, x.Op, blank) } } else { if prev != line { - panic("internal error"); + panic("internal error") } p.print(x.OpPos, x.Op); } p.expr1(x.Y, prec, 0, multiLine); } if ws == ignore { - p.print(unindent); + p.print(unindent) } } @@ -539,13 +539,13 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b switch x := expr.(type) { case *ast.BadExpr: - p.print("BadExpr"); + p.print("BadExpr") case *ast.Ident: - p.print(x); + p.print(x) case *ast.BinaryExpr: - p.binaryExpr(x, prec1, multiLine); + p.binaryExpr(x, prec1, multiLine) case *ast.KeyValueExpr: p.expr(x.Key, multiLine); @@ -567,16 +567,16 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b // no parenthesis needed p.print(x.Op); if x.Op == token.RANGE { - p.print(blank); + p.print(blank) } p.expr1(x.X, prec, 0, multiLine); } case *ast.BasicLit: - p.print(x); + p.print(x) case *ast.StringList: - p.stringList(x.Strings, multiLine); + p.stringList(x.Strings, multiLine) case *ast.FuncLit: p.expr(x.Type, multiLine); @@ -596,9 +596,9 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b p.expr1(x.X, token.HighestPrec, 0, multiLine); p.print(token.PERIOD, token.LPAREN); if x.Type != nil { - p.expr(x.Type, multiLine); + p.expr(x.Type, multiLine) } else { - p.print(token.TYPE); + p.print(token.TYPE) } p.print(token.RPAREN); @@ -609,10 +609,10 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b if x.End != nil { if needsBlanks(x.Index) || needsBlanks(x.End) { // blanks around ":" - p.print(blank, token.COLON, blank); + p.print(blank, token.COLON, blank) } else { // no blanks around ":" - p.print(token.COLON); + p.print(token.COLON) } p.expr(x.End, multiLine); } @@ -636,7 +636,7 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b // TODO(gri): for now this decision is made by looking at the // source code - it may not be correct if the source // code was badly misformatted in the first place - p.print(blank); + p.print(blank) } } p.print(x.Lbrace, token.LBRACE); @@ -644,12 +644,12 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b p.print(x.Rbrace, token.RBRACE); case *ast.Ellipsis: - p.print(token.ELLIPSIS); + p.print(token.ELLIPSIS) case *ast.ArrayType: p.print(token.LBRACK); if x.Len != nil { - p.expr(x.Len, multiLine); + p.expr(x.Len, multiLine) } p.print(token.RBRACK); optSemi = p.expr(x.Elt, multiLine); @@ -677,17 +677,17 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b case *ast.ChanType: switch x.Dir { case ast.SEND | ast.RECV: - p.print(token.CHAN); + p.print(token.CHAN) case ast.RECV: - p.print(token.ARROW, token.CHAN); + p.print(token.ARROW, token.CHAN) case ast.SEND: - p.print(token.CHAN, token.ARROW); + p.print(token.CHAN, token.ARROW) } p.print(blank); optSemi = p.expr(x.Value, multiLine); default: - panic("unreachable"); + panic("unreachable") } return; @@ -697,7 +697,7 @@ func (p *printer) expr1(expr ast.Expr, prec1 int, ctxt exprContext, multiLine *b // Returns true if a separating semicolon is optional. // Sets multiLine to true if the expression spans multiple lines. func (p *printer) expr(x ast.Expr, multiLine *bool) (optSemi bool) { - return p.expr1(x, token.LowestPrec, 0, multiLine); + return p.expr1(x, token.LowestPrec, 0, multiLine) } @@ -712,7 +712,7 @@ const maxStmtNewlines = 2 // maximum number of newlines between statements func (p *printer) stmtList(list []ast.Stmt, _indent int) { // TODO(gri): fix _indent code if _indent > 0 { - p.print(indent); + p.print(indent) } var multiLine bool; for i, s := range list { @@ -721,11 +721,11 @@ func (p *printer) stmtList(list []ast.Stmt, _indent int) { p.linebreak(s.Pos().Line, 1, maxStmtNewlines, ignore, i == 0 || _indent == 0 || multiLine); multiLine = false; if !p.stmt(s, &multiLine) && (!fewerSemis || len(list) > 1) { - p.print(token.SEMICOLON); + p.print(token.SEMICOLON) } } if _indent > 0 { - p.print(unindent); + p.print(unindent) } } @@ -744,7 +744,7 @@ func (p *printer) block(s *ast.BlockStmt, indent int) { // need to be careful to keep them around type expressions. func stripParens(x ast.Expr) ast.Expr { if px, hasParens := x.(*ast.ParenExpr); hasParens { - return stripParens(px.X); + return stripParens(px.X) } return x; } @@ -763,7 +763,7 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po // all semicolons required // (they are not separators, print them explicitly) if init != nil { - p.stmt(init, ignoreMultiLine); + p.stmt(init, ignoreMultiLine) } p.print(token.SEMICOLON, blank); if expr != nil { @@ -780,7 +780,7 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po } } if needsBlank { - p.print(blank); + p.print(blank) } } @@ -792,7 +792,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { switch s := stmt.(type) { case *ast.BadStmt: - p.print("BadStmt"); + p.print("BadStmt") case *ast.DeclStmt: p.decl(s.Decl, inStmtList, multiLine); @@ -812,7 +812,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { optSemi = p.stmt(s.Stmt, multiLine); case *ast.ExprStmt: - p.expr(s.X, multiLine); + p.expr(s.X, multiLine) case *ast.IncDecStmt: p.expr(s.X, multiLine); @@ -834,7 +834,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { case *ast.ReturnStmt: p.print(token.RETURN); if s.Results != nil { - p.exprList(s.Pos(), s.Results, blankStart | commaSep, multiLine); + p.exprList(s.Pos(), s.Results, blankStart | commaSep, multiLine) } case *ast.BranchStmt: @@ -859,7 +859,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { p.print(blank, token.ELSE, blank); switch s.Else.(type) { case *ast.BlockStmt, *ast.IfStmt: - optSemi = p.stmt(s.Else, ignoreMultiLine); + optSemi = p.stmt(s.Else, ignoreMultiLine) default: p.print(token.LBRACE, indent, formfeed); p.stmt(s.Else, ignoreMultiLine); @@ -872,7 +872,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { p.print(token.CASE); p.exprList(s.Pos(), s.Values, blankStart | commaSep, multiLine); } else { - p.print(token.DEFAULT); + p.print(token.DEFAULT) } p.print(s.Colon, token.COLON); p.stmtList(s.Body, 1); @@ -890,7 +890,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { p.print(token.CASE); p.exprList(s.Pos(), s.Types, blankStart | commaSep, multiLine); } else { - p.print(token.DEFAULT); + p.print(token.DEFAULT) } p.print(s.Colon, token.COLON); p.stmtList(s.Body, 1); @@ -919,7 +919,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { } p.expr(s.Rhs, multiLine); } else { - p.print(token.DEFAULT); + p.print(token.DEFAULT) } p.print(s.Colon, token.COLON); p.stmtList(s.Body, 1); @@ -953,7 +953,7 @@ func (p *printer) stmt(stmt ast.Stmt, multiLine *bool) (optSemi bool) { optSemi = true; default: - panic("unreachable"); + panic("unreachable") } return; @@ -1009,7 +1009,7 @@ func (p *printer) spec(spec ast.Spec, n int, context declContext, multiLine *boo } else { extraTabs = 2; if s.Type != nil || s.Values != nil { - p.print(vtab); + p.print(vtab) } if s.Type != nil { optSemi = p.expr(s.Type, multiLine); @@ -1029,24 +1029,24 @@ func (p *printer) spec(spec ast.Spec, n int, context declContext, multiLine *boo p.leadComment(s.Doc); p.expr(s.Name, multiLine); if n == 1 { - p.print(blank); + p.print(blank) } else { - p.print(vtab); + p.print(vtab) } optSemi = p.expr(s.Type, multiLine); comment = s.Comment; default: - panic("unreachable"); + panic("unreachable") } if context == inGroup || context == inStmtList && !optSemi { - p.print(token.SEMICOLON); + p.print(token.SEMICOLON) } if comment != nil { for ; extraTabs > 0; extraTabs-- { - p.print(vtab); + p.print(vtab) } p.lineComment(comment); } @@ -1066,7 +1066,7 @@ func (p *printer) genDecl(d *ast.GenDecl, context declContext, multiLine *bool) var ml bool; for i, s := range d.Specs { if i > 0 { - p.linebreak(s.Pos().Line, 1, 2, ignore, ml); + p.linebreak(s.Pos().Line, 1, 2, ignore, ml) } ml = false; p.spec(s, len(d.Specs), inGroup, &ml); @@ -1078,7 +1078,7 @@ func (p *printer) genDecl(d *ast.GenDecl, context declContext, multiLine *bool) } else { // single declaration - p.spec(d.Specs[0], 1, context, multiLine); + p.spec(d.Specs[0], 1, context, multiLine) } } @@ -1096,12 +1096,12 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { cfg := Config{Mode: RawFormat}; var buf bytes.Buffer; if _, err := cfg.Fprint(&buf, n); err != nil { - return; + return } if buf.Len() <= maxSize { for _, ch := range buf.Bytes() { if ch < ' ' { - return; + return } } size = buf.Len(); // n fits @@ -1115,9 +1115,9 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool { bodySize := 0; switch { case len(b.List) > 1 || p.commentBefore(b.Rbrace): - return false; // too many statements or there is a comment - all bets are off + return false // too many statements or there is a comment - all bets are off case len(b.List) == 1: - bodySize = p.nodeSize(b.List[0], maxSize); + bodySize = p.nodeSize(b.List[0], maxSize) } // require both headers and overall size to be not "too large" return headerSize <= maxSize/2 && headerSize + bodySize <= maxSize; @@ -1127,20 +1127,20 @@ func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool { // Sets multiLine to true if the function body spans multiple lines. func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLine *bool) { if b == nil { - return; + return } if p.isOneLineFunc(b, headerSize) { sep := vtab; if isLit { - sep = blank; + sep = blank } if len(b.List) > 0 { p.print(sep, b.Pos(), token.LBRACE, blank); p.stmt(b.List[0], ignoreMultiLine); p.print(blank, b.Rbrace, token.RBRACE); } else { - p.print(sep, b.Pos(), token.LBRACE, b.Rbrace, token.RBRACE); + p.print(sep, b.Pos(), token.LBRACE, b.Rbrace, token.RBRACE) } return; } @@ -1156,7 +1156,7 @@ func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLi // the result is infinity (1<<30). func distance(from, to token.Position) int { if from.IsValid() && to.IsValid() && from.Line == to.Line { - return to.Column - from.Column; + return to.Column - from.Column } return 1<<30; } @@ -1186,13 +1186,13 @@ func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) { func (p *printer) decl(decl ast.Decl, context declContext, multiLine *bool) { switch d := decl.(type) { case *ast.BadDecl: - p.print(d.Pos(), "BadDecl"); + p.print(d.Pos(), "BadDecl") case *ast.GenDecl: - p.genDecl(d, context, multiLine); + p.genDecl(d, context, multiLine) case *ast.FuncDecl: - p.funcDecl(d, multiLine); + p.funcDecl(d, multiLine) default: - panic("unreachable"); + panic("unreachable") } } @@ -1206,9 +1206,9 @@ func declToken(decl ast.Decl) (tok token.Token) { tok = token.ILLEGAL; switch d := decl.(type) { case *ast.GenDecl: - tok = d.Tok; + tok = d.Tok case *ast.FuncDecl: - tok = token.FUNC; + tok = token.FUNC } return; } @@ -1228,7 +1228,7 @@ func (p *printer) file(src *ast.File) { // print an empty line between top-level declarations min := 1; if prev != tok { - min = 2; + min = 2 } p.linebreak(d.Pos().Line, min, maxDeclNewlines, ignore, false); p.decl(d, atTop, ignoreMultiLine); diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go index 9c68da227..4c3511c61 100644 --- a/src/pkg/go/printer/printer.go +++ b/src/pkg/go/printer/printer.go @@ -137,7 +137,7 @@ func (p *printer) write(data []byte) { // must not be discarded by the tabwriter j := p.indent; for ; j > len(htabs); j -= len(htabs) { - p.write0(&htabs); + p.write0(&htabs) } p.write0(htabs[0:j]); @@ -158,15 +158,15 @@ func (p *printer) write(data []byte) { var esc []byte; switch b { case '"': - esc = esc_quot; + esc = esc_quot case '\'': - esc = esc_apos; + esc = esc_apos case '&': - esc = esc_amp; + esc = esc_amp case '<': - esc = esc_lt; + esc = esc_lt case '>': - esc = esc_gt; + esc = esc_gt } p.write0(esc); @@ -180,7 +180,7 @@ func (p *printer) write(data []byte) { } case tabwriter.Escape: - p.escape = !p.escape; + p.escape = !p.escape } } @@ -197,7 +197,7 @@ func (p *printer) write(data []byte) { func (p *printer) writeNewlines(n int) { if n > 0 { if n > maxNewlines { - n = maxNewlines; + n = maxNewlines } p.write(newlines[0:n]); } @@ -207,7 +207,7 @@ func (p *printer) writeNewlines(n int) { func (p *printer) writeFormfeeds(n int) { if n > 0 { if n > maxNewlines { - n = maxNewlines; + n = maxNewlines } p.write(formfeeds[0:n]); } @@ -218,12 +218,12 @@ func (p *printer) writeTaggedItem(data []byte, tag HTMLTag) { // write start tag, if any // (no html-escaping and no p.pos update for tags - use write0) if tag.Start != "" { - p.write0(strings.Bytes(tag.Start)); + p.write0(strings.Bytes(tag.Start)) } p.write(data); // write end tag, if any if tag.End != "" { - p.write0(strings.Bytes(tag.End)); + p.write0(strings.Bytes(tag.End)) } } @@ -239,7 +239,7 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { p.pos = pos; if debug { // do not update p.pos - use write0 - p.write0(strings.Bytes(fmt.Sprintf("[%d:%d]", pos.Line, pos.Column))); + p.write0(strings.Bytes(fmt.Sprintf("[%d:%d]", pos.Line, pos.Column))) } if p.Mode & GenHTML != 0 { // write line tag if on a new line @@ -251,7 +251,7 @@ func (p *printer) writeItem(pos token.Position, data []byte, tag HTMLTag) { } p.writeTaggedItem(data, tag); } else { - p.write(data); + p.write(data) } p.last = p.pos; } @@ -269,7 +269,7 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor if !p.last.IsValid() { // there was no preceeding item and the comment is the // first item to be printed - don't write any whitespace - return; + return } if pos.Line == p.last.Line { @@ -291,7 +291,7 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor continue; case indent: // apply pending indentation - continue; + continue } j = i; break; @@ -304,9 +304,9 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor // next item is on the same line as the comment // (which must be a /*-style comment): separate // with a blank instead of a tab - p.write([]byte{' '}); + p.write([]byte{' '}) } else { - p.write(htab); + p.write(htab) } } @@ -323,7 +323,7 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor continue; case indent: // apply pending indentation - continue; + continue case unindent: // if the next token is a keyword, apply the outdent // if it appears that the comment is aligned with the @@ -332,11 +332,11 @@ func (p *printer) writeCommentPrefix(pos, next token.Position, isFirst, isKeywor // comments before a case label where the comments // apply to the next case instead of the current one) if isKeyword && pos.Column == next.Column { - continue; + continue } case newline, formfeed: // TODO(gri): may want to keep formfeed info in some cases - p.buffer[i] = ignore; + p.buffer[i] = ignore } j = i; break; @@ -359,7 +359,7 @@ func (p *printer) writeCommentLine(comment *ast.Comment, pos token.Position, lin // apply styler, if any var tag HTMLTag; if p.Styler != nil { - line, tag = p.Styler.Comment(comment, line); + line, tag = p.Styler.Comment(comment, line) } p.writeItem(pos, line, tag); @@ -376,7 +376,7 @@ func split(text []byte) [][]byte { n := 1; for _, c := range text { if c == '\n' { - n++; + n++ } } @@ -400,7 +400,7 @@ func split(text []byte) [][]byte { func isBlank(s []byte) bool { for _, b := range s { if b > ' ' { - return false; + return false } } return true; @@ -410,7 +410,7 @@ func isBlank(s []byte) bool { func commonPrefix(a, b []byte) []byte { i := 0; for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { - i++; + i++ } return a[0:i]; } @@ -418,7 +418,7 @@ func commonPrefix(a, b []byte) []byte { func stripCommonPrefix(lines [][]byte) { if len(lines) < 2 { - return; // at most one line - nothing to do + return // at most one line - nothing to do } // The heuristic in this function tries to handle a few @@ -438,11 +438,11 @@ func stripCommonPrefix(lines [][]byte) { case i == 0 || i == len(lines)-1: // ignore case isBlank(line): - lines[i] = nil; + lines[i] = nil case prefix == nil: - prefix = commonPrefix(line, line); + prefix = commonPrefix(line, line) default: - prefix = commonPrefix(prefix, line); + prefix = commonPrefix(prefix, line) } } @@ -453,7 +453,7 @@ func stripCommonPrefix(lines [][]byte) { if i := bytes.Index(prefix, []byte{'*'}); i >= 0 { // Line of stars present. if i > 0 && prefix[i-1] == ' ' { - i--; // remove trailing blank from prefix so stars remain aligned + i-- // remove trailing blank from prefix so stars remain aligned } prefix = prefix[0:i]; lineOfStars = true; @@ -474,10 +474,10 @@ func stripCommonPrefix(lines [][]byte) { // in the first place i := len(prefix); for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ { - i--; + i-- } if i == len(prefix) && i > 0 && prefix[i-1] == '\t' { - i--; + i-- } prefix = prefix[0:i]; } else { @@ -490,7 +490,7 @@ func stripCommonPrefix(lines [][]byte) { } if n > 2 && suffix[2] == '\t' { // assume the '\t' compensates for the /* - suffix = suffix[2:n]; + suffix = suffix[2:n] } else { // otherwise assume two blanks suffix[0], suffix[1] = ' ', ' '; @@ -499,7 +499,7 @@ func stripCommonPrefix(lines [][]byte) { // Shorten the computed common prefix by the length of // suffix, if it is found as suffix of the prefix. if bytes.HasSuffix(prefix, suffix) { - prefix = prefix[0 : len(prefix)-len(suffix)]; + prefix = prefix[0 : len(prefix)-len(suffix)] } } } @@ -515,19 +515,19 @@ func stripCommonPrefix(lines [][]byte) { var sep []byte; if lineOfStars { // insert an aligning blank - sep = []byte{' '}; + sep = []byte{' '} } lines[len(lines)-1] = bytes.Join([][]byte{prefix, closing}, sep); } else { // last line contains more comment text - assume // it is aligned like the other lines - prefix = commonPrefix(prefix, last); + prefix = commonPrefix(prefix, last) } // Remove the common prefix from all but the first and empty lines. for i, line := range lines { if i > 0 && len(line) != 0 { - lines[i] = line[len(prefix):len(line)]; + lines[i] = line[len(prefix):len(line)] } } } @@ -557,7 +557,7 @@ func (p *printer) writeComment(comment *ast.Comment) { pos = p.pos; } if len(line) > 0 { - p.writeCommentLine(comment, pos, line); + p.writeCommentLine(comment, pos, line) } } } @@ -573,15 +573,15 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) { switch ch { case blank, vtab: // ignore trailing whitespace - p.buffer[i] = ignore; + p.buffer[i] = ignore case indent, unindent: // don't loose indentation information case newline, formfeed: // if we need a line break, keep exactly one if needsLinebreak { - needsLinebreak = false; + needsLinebreak = false } else { - p.buffer[i] = ignore; + p.buffer[i] = ignore } } } @@ -589,7 +589,7 @@ func (p *printer) writeCommentSuffix(needsLinebreak bool) { // make sure we have a line break if needsLinebreak { - p.write([]byte{'\n'}); + p.write([]byte{'\n'}) } } @@ -616,7 +616,7 @@ func (p *printer) intersperseComments(next token.Position, isKeyword bool) { if last != nil && !needsLinebreak && last.Pos().Line == next.Line { // the last comment is a /*-style comment and the next item // follows on the same line: separate with an extra blank - p.write([]byte{' '}); + p.write([]byte{' '}) } p.writeCommentSuffix(needsLinebreak); } @@ -631,13 +631,13 @@ func (p *printer) writeWhitespace(n int) { case ignore: // ignore! case indent: - p.indent++; + p.indent++ case unindent: p.indent--; if p.indent < 0 { // handle gracefully unless in debug mode if debug { - panicln("negative indentation:", p.indent); + panicln("negative indentation:", p.indent) } p.indent = 0; } @@ -704,7 +704,7 @@ func (p *printer) print(args ...) { // don't add ignore's to the buffer; they // may screw up "correcting" unindents (see // LabeledStmt) - break; + break } i := len(p.buffer); if i == cap(p.buffer) { @@ -719,22 +719,22 @@ func (p *printer) print(args ...) { case []byte: // TODO(gri): remove this case once commentList // handles comments correctly - data = x; + data = x case string: // TODO(gri): remove this case once fieldList // handles comments correctly - data = strings.Bytes(x); + data = strings.Bytes(x) case *ast.Ident: if p.Styler != nil { - data, tag = p.Styler.Ident(x); + data, tag = p.Styler.Ident(x) } else { - data = strings.Bytes(x.Value); + data = strings.Bytes(x.Value) } case *ast.BasicLit: if p.Styler != nil { - data, tag = p.Styler.BasicLit(x); + data, tag = p.Styler.BasicLit(x) } else { - data = x.Value; + data = x.Value } // escape all literals so they pass through unchanged // (note that valid Go programs cannot contain esc ('\xff') @@ -743,17 +743,17 @@ func (p *printer) print(args ...) { data = strings.Bytes("\xff"+string(data)+"\xff"); case token.Token: if p.Styler != nil { - data, tag = p.Styler.Token(x); + data, tag = p.Styler.Token(x) } else { - data = strings.Bytes(x.String()); + data = strings.Bytes(x.String()) } isKeyword = x.IsKeyword(); case token.Position: if x.IsValid() { - next = x; // accurate position of next item + next = x // accurate position of next item } default: - panicln("print: unsupported argument type", f.Type().String()); + panicln("print: unsupported argument type", f.Type().String()) } p.pos = next; @@ -775,7 +775,7 @@ func (p *printer) print(args ...) { // before the next position in the source code. // func (p *printer) commentBefore(next token.Position) bool { - return p.comment != nil && p.comment.List[0].Pos().Offset < next.Offset; + return p.comment != nil && p.comment.List[0].Pos().Offset < next.Offset } @@ -785,7 +785,7 @@ func (p *printer) commentBefore(next token.Position) bool { func (p *printer) flush(next token.Position, isKeyword bool) { // if there are comments before the next item, intersperse them if p.commentBefore(next) { - p.intersperseComments(next, isKeyword); + p.intersperseComments(next, isKeyword) } // write any leftover whitespace p.writeWhitespace(len(p.buffer)); @@ -817,7 +817,7 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // m >= 0: data[m:n] unwritten and no whitespace m := 0; if p.buf.Len() > 0 { - m = -1; + m = -1 } var b byte; @@ -827,7 +827,7 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // write any pending whitespace if m < 0 { if _, err = p.output.Write(p.buf.Bytes()); err != nil { - return; + return } p.buf.Reset(); m = n; @@ -841,13 +841,13 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // write any pending (non-whitespace) data if m >= 0 { if _, err = p.output.Write(data[m:n]); err != nil { - return; + return } m = -1; } // collect whitespace but discard tabrwiter.Escapes. if b != tabwriter.Escape { - p.buf.WriteByte(b); // WriteByte returns no errors + p.buf.WriteByte(b) // WriteByte returns no errors } case '\f', '\n': @@ -856,13 +856,13 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // write any pending (non-whitespace) data if m >= 0 { if _, err = p.output.Write(data[m:n]); err != nil { - return; + return } m = -1; } // convert formfeed into newline if _, err = p.output.Write(newlines[0:1]); err != nil { - return; + return } } } @@ -871,7 +871,7 @@ func (p *trimmer) Write(data []byte) (n int, err os.Error) { // write any pending non-whitespace if m >= 0 { if _, err = p.output.Write(data[m:n]); err != nil { - return; + return } } @@ -933,11 +933,11 @@ func (cfg *Config) Fprint(output io.Writer, node interface{}) (int, os.Error) { if cfg.Mode & RawFormat == 0 { padchar := byte('\t'); if cfg.Mode & UseSpaces != 0 { - padchar = ' '; + padchar = ' ' } twmode := tabwriter.DiscardEmptyColumns; if cfg.Mode & GenHTML != 0 { - twmode |= tabwriter.FilterHTML; + twmode |= tabwriter.FilterHTML } tw = tabwriter.NewWriter(output, cfg.Tabwidth, 1, padchar, twmode); output = tw; @@ -949,11 +949,11 @@ func (cfg *Config) Fprint(output io.Writer, node interface{}) (int, os.Error) { go func() { switch n := node.(type) { case ast.Expr: - p.expr(n, ignoreMultiLine); + p.expr(n, ignoreMultiLine) case ast.Stmt: - p.stmt(n, ignoreMultiLine); + p.stmt(n, ignoreMultiLine) case ast.Decl: - p.decl(n, atTop, ignoreMultiLine); + p.decl(n, atTop, ignoreMultiLine) case *ast.File: p.comment = n.Comments; p.file(n); @@ -968,7 +968,7 @@ func (cfg *Config) Fprint(output io.Writer, node interface{}) (int, os.Error) { // flush tabwriter, if any if tw != nil { - tw.Flush(); // ignore errors + tw.Flush() // ignore errors } return p.written, err; diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go index ebb170550..6cafc5820 100644 --- a/src/pkg/go/printer/printer_test.go +++ b/src/pkg/go/printer/printer_test.go @@ -27,7 +27,7 @@ var update = flag.Bool("update", false, "update golden files") func lineString(text []byte, i int) string { i0 := i; for i < len(text) && text[i] != '\n' { - i++; + i++ } return string(text[i0:i]); } @@ -58,20 +58,20 @@ func check(t *testing.T, source, golden string, mode checkMode) { // determine printer configuration cfg := Config{Tabwidth: tabwidth}; if mode & rawFormat != 0 { - cfg.Mode |= RawFormat; + cfg.Mode |= RawFormat } // format source var buf bytes.Buffer; if _, err := cfg.Fprint(&buf, prog); err != nil { - t.Error(err); + t.Error(err) } res := buf.Bytes(); // update golden files if necessary if *update { if err := io.WriteFile(golden, res, 0644); err != nil { - t.Error(err); + t.Error(err) } return; } @@ -85,7 +85,7 @@ func check(t *testing.T, source, golden string, mode checkMode) { // compare lengths if len(res) != len(gld) { - t.Errorf("len = %d, expected %d (= len(%s))", len(res), len(gld), golden); + t.Errorf("len = %d, expected %d (= len(%s))", len(res), len(gld), golden) } // compare contents diff --git a/src/pkg/go/printer/testdata/comments.golden b/src/pkg/go/printer/testdata/comments.golden index cb2827476..04f87a6c4 100644 --- a/src/pkg/go/printer/testdata/comments.golden +++ b/src/pkg/go/printer/testdata/comments.golden @@ -98,7 +98,7 @@ func _() { func abs(x int) int { if x < 0 { // the tab printed before this comment's // must not affect the remaining lines - return -x; // this statement should be properly indented + return -x // this statement should be properly indented } return x; } @@ -120,7 +120,7 @@ func typeswitch(x interface{}) { switch v0, ok := x.(int); x.(type) { case byte: // this comment should be on the same line as the keyword // this comment should be normally indented - _ = 0; + _ = 0 case bool, int, float: // this comment should be indented case string: diff --git a/src/pkg/go/printer/testdata/expressions.golden b/src/pkg/go/printer/testdata/expressions.golden index 4d105f7d3..efca110ca 100644 --- a/src/pkg/go/printer/testdata/expressions.golden +++ b/src/pkg/go/printer/testdata/expressions.golden @@ -230,7 +230,7 @@ func same(t, u *Time) bool { t.Second == u.Second && t.Weekday == u.Weekday && t.ZoneOffset == u.ZoneOffset && - t.Zone == u.Zone; + t.Zone == u.Zone } @@ -249,7 +249,7 @@ func addState(s []state, inst instr, match []int) { for i := 0; i < l; i++ { if s[i].inst.index() == index && // same instruction s[i].match[0] < pos { // earlier match already going; leftmost wins - return s; + return s } } } diff --git a/src/pkg/go/printer/testdata/expressions.raw b/src/pkg/go/printer/testdata/expressions.raw index ae11b7b16..29109ba61 100644 --- a/src/pkg/go/printer/testdata/expressions.raw +++ b/src/pkg/go/printer/testdata/expressions.raw @@ -230,7 +230,7 @@ func same(t, u *Time) bool { t.Second == u.Second && t.Weekday == u.Weekday && t.ZoneOffset == u.ZoneOffset && - t.Zone == u.Zone; + t.Zone == u.Zone } @@ -249,7 +249,7 @@ func addState(s []state, inst instr, match []int) { for i := 0; i < l; i++ { if s[i].inst.index() == index && // same instruction s[i].match[0] < pos { // earlier match already going; leftmost wins - return s; + return s } } } diff --git a/src/pkg/go/printer/testdata/linebreaks.golden b/src/pkg/go/printer/testdata/linebreaks.golden index 9f92a998c..afc5e7b4f 100644 --- a/src/pkg/go/printer/testdata/linebreaks.golden +++ b/src/pkg/go/printer/testdata/linebreaks.golden @@ -209,12 +209,12 @@ testLoop: } if !reflect.DeepEqual(hdr, header) { t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v", - i, j, *hdr, *header); + i, j, *hdr, *header) } } hdr, err := tr.Next(); if hdr != nil || err != nil { - t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err); + t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err) } f.Close(); } diff --git a/src/pkg/go/printer/testdata/statements.golden b/src/pkg/go/printer/testdata/statements.golden index 875bac40b..b09352c4c 100644 --- a/src/pkg/go/printer/testdata/statements.golden +++ b/src/pkg/go/printer/testdata/statements.golden @@ -23,10 +23,10 @@ func _() { if expr { } // no semicolon and parens printed if x := expr; { - use(x); + use(x) } if x := expr; expr { - use(x); + use(x) } } @@ -48,11 +48,11 @@ func _() { switch x := expr; { default: use( - x); + x) } switch x := expr; expr { default: - use(x); + use(x) } } @@ -69,7 +69,7 @@ func _() { case 2: // followed by an empty line - use(x); // followed by an empty line + use(x) // followed by an empty line case 3: // no empty lines use(x); @@ -78,20 +78,20 @@ func _() { switch x { case 0: - use(x); + use(x) case 1: // this comment should have no effect on the previous or next line - use(x); + use(x) } switch x := 0; x { case 1: - x = 0; + x = 0 // this comment should be indented case 2: - x = 0; + x = 0 // this comment should not be indented, it is aligned with the next case case 3: - x = 0; + x = 0 /* indented comment aligned aligned @@ -99,7 +99,7 @@ func _() { // bla /* and more */ case 4: - x = 0; + x = 0 /* not indented comment aligned aligned @@ -122,7 +122,7 @@ func _() { for { } // no semicolons printed for x := expr; ; { - use(x); + use(x) } for expr { } // no semicolons printed @@ -131,18 +131,18 @@ func _() { for ; ; expr = false { } for x := expr; expr; { - use(x); + use(x) } for x := expr; ; expr = false { - use(x); + use(x) } for ; expr; expr = false { } for x := expr; expr; expr = false { - use(x); + use(x) } for x := range []int{} { - use(x); + use(x) } } @@ -169,11 +169,11 @@ func _() { if x < x { - use(x); + use(x) } else { - use(x); + use(x) } } @@ -182,25 +182,25 @@ func _() { // Formatting around labels. func _() { L: - ; + } func _() { // this comment should be indented L: - ; + } func _() { -L: _ = 0; +L: _ = 0 } func _() { // this comment should be indented -L: _ = 0; +L: _ = 0 } @@ -225,14 +225,14 @@ func _() { func _() { if { - _ = 0; + _ = 0 } _ = 0; // the indentation here should not be affected by the long label name AnOverlongLabel: _ = 0; if { - _ = 0; + _ = 0 } _ = 0; diff --git a/src/pkg/go/scanner/errors.go b/src/pkg/go/scanner/errors.go index a8219b190..16ad53260 100644 --- a/src/pkg/go/scanner/errors.go +++ b/src/pkg/go/scanner/errors.go @@ -68,7 +68,7 @@ func (e *Error) String() string { if e.Pos.Filename != "" || e.Pos.IsValid() { // don't print "<unknown position>" // TODO(gri) reconsider the semantics of Position.IsValid - return e.Pos.String() + ": " + e.Msg; + return e.Pos.String() + ": " + e.Msg } return e.Msg; } @@ -90,14 +90,14 @@ func (p ErrorList) Less(i, j int) bool { // the offsets do not reflect modified line information (through //line // comments). if e.Filename < f.Filename { - return true; + return true } if e.Filename == f.Filename { if e.Line < f.Line { - return true; + return true } if e.Line == f.Line { - return e.Column < f.Column; + return e.Column < f.Column } } return false; @@ -107,9 +107,9 @@ func (p ErrorList) Less(i, j int) bool { func (p ErrorList) String() string { switch len(p) { case 0: - return "unspecified error"; + return "unspecified error" case 1: - return p[0].String(); + return p[0].String() } return fmt.Sprintf("%s (and %d more errors)", p[0].String(), len(p)-1); } @@ -131,16 +131,16 @@ const ( // func (h *ErrorVector) GetErrorList(mode int) ErrorList { if h.errors.Len() == 0 { - return nil; + return nil } list := make(ErrorList, h.errors.Len()); for i := 0; i < h.errors.Len(); i++ { - list[i] = h.errors.At(i).(*Error); + list[i] = h.errors.At(i).(*Error) } if mode >= Sorted { - sort.Sort(list); + sort.Sort(list) } if mode >= NoMultiples { @@ -166,7 +166,7 @@ func (h *ErrorVector) GetErrorList(mode int) ErrorList { // func (h *ErrorVector) GetError(mode int) os.Error { if h.errors.Len() == 0 { - return nil; + return nil } return h.GetErrorList(mode); @@ -175,7 +175,7 @@ func (h *ErrorVector) GetError(mode int) os.Error { // ErrorVector implements the ErrorHandler interface. func (h *ErrorVector) Error(pos token.Position, msg string) { - h.errors.Push(&Error{pos, msg}); + h.errors.Push(&Error{pos, msg}) } @@ -186,9 +186,9 @@ func (h *ErrorVector) Error(pos token.Position, msg string) { func PrintError(w io.Writer, err os.Error) { if list, ok := err.(ErrorList); ok { for _, e := range list { - fmt.Fprintf(w, "%s\n", e); + fmt.Fprintf(w, "%s\n", e) } } else { - fmt.Fprintf(w, "%s\n", err); + fmt.Fprintf(w, "%s\n", err) } } diff --git a/src/pkg/go/scanner/scanner.go b/src/pkg/go/scanner/scanner.go index 7f707296e..cc4ff9cc4 100644 --- a/src/pkg/go/scanner/scanner.go +++ b/src/pkg/go/scanner/scanner.go @@ -52,7 +52,7 @@ func (S *Scanner) next() { S.pos.Column = 0; case r >= 0x80: // not ASCII - r, w = utf8.DecodeRune(S.src[S.offset : len(S.src)]); + r, w = utf8.DecodeRune(S.src[S.offset : len(S.src)]) } S.offset += w; S.ch = r; @@ -96,27 +96,27 @@ func charString(ch int) string { var s string; switch ch { case -1: - return `EOF`; + return `EOF` case '\a': - s = `\a`; + s = `\a` case '\b': - s = `\b`; + s = `\b` case '\f': - s = `\f`; + s = `\f` case '\n': - s = `\n`; + s = `\n` case '\r': - s = `\r`; + s = `\r` case '\t': - s = `\t`; + s = `\t` case '\v': - s = `\v`; + s = `\v` case '\\': - s = `\\`; + s = `\\` case '\'': - s = `\'`; + s = `\'` default: - s = string(ch); + s = string(ch) } return "'" + s + "' (U+" + strconv.Itob(ch, 16) + ")"; } @@ -124,7 +124,7 @@ func charString(ch int) string { func (S *Scanner) error(pos token.Position, msg string) { if S.err != nil { - S.err.Error(pos, msg); + S.err.Error(pos, msg) } S.ErrorCount++; } @@ -132,7 +132,7 @@ func (S *Scanner) error(pos token.Position, msg string) { func (S *Scanner) expect(ch int) { if S.ch != ch { - S.error(S.pos, "expected " + charString(ch) + ", found " + charString(S.ch)); + S.error(S.pos, "expected " + charString(ch) + ", found " + charString(S.ch)) } S.next(); // always make progress } @@ -188,19 +188,19 @@ func (S *Scanner) scanComment(pos token.Position) { func isLetter(ch int) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch); + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) } func isDigit(ch int) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch); + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) } func (S *Scanner) scanIdentifier() token.Token { pos := S.pos.Offset; for isLetter(S.ch) || isDigit(S.ch) { - S.next(); + S.next() } return token.Lookup(S.src[pos : S.pos.Offset]); } @@ -209,11 +209,11 @@ func (S *Scanner) scanIdentifier() token.Token { func digitVal(ch int) int { switch { case '0' <= ch && ch <= '9': - return ch-'0'; + return ch-'0' case 'a' <= ch && ch <= 'f': - return ch-'a'+10; + return ch-'a'+10 case 'A' <= ch && ch <= 'F': - return ch-'A'+10; + return ch-'A'+10 } return 16; // larger than any legal digit val } @@ -221,7 +221,7 @@ func digitVal(ch int) int { func (S *Scanner) scanMantissa(base int) { for digitVal(S.ch) < base { - S.next(); + S.next() } } @@ -272,7 +272,7 @@ exponent: tok = token.FLOAT; S.next(); if S.ch == '-' || S.ch == '+' { - S.next(); + S.next() } S.scanMantissa(10); } @@ -288,7 +288,7 @@ func (S *Scanner) scanDigits(base, length int) { length--; } if length > 0 { - S.error(S.pos, "illegal char escape"); + S.error(S.pos, "illegal char escape") } } @@ -301,15 +301,15 @@ func (S *Scanner) scanEscape(quote int) { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: // nothing to do case '0', '1', '2', '3', '4', '5', '6', '7': - S.scanDigits(8, 3-1); // 1 char read already + S.scanDigits(8, 3-1) // 1 char read already case 'x': - S.scanDigits(16, 2); + S.scanDigits(16, 2) case 'u': - S.scanDigits(16, 4); + S.scanDigits(16, 4) case 'U': - S.scanDigits(16, 8); + S.scanDigits(16, 8) default: - S.error(pos, "illegal char escape"); + S.error(pos, "illegal char escape") } } @@ -328,14 +328,14 @@ func (S *Scanner) scanChar(pos token.Position) { break; } if ch == '\\' { - S.scanEscape('\''); + S.scanEscape('\'') } } S.next(); if n != 1 { - S.error(pos, "illegal character literal"); + S.error(pos, "illegal character literal") } } @@ -351,7 +351,7 @@ func (S *Scanner) scanString(pos token.Position) { break; } if ch == '\\' { - S.scanEscape('"'); + S.scanEscape('"') } } @@ -435,7 +435,7 @@ func (S *Scanner) Scan() (pos token.Position, tok token.Token, lit []byte) { scan_again: // skip white space for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' || S.ch == '\r' { - S.next(); + S.next() } // current token start @@ -444,14 +444,14 @@ scan_again: // determine token value switch ch := S.ch; { case isLetter(ch): - tok = S.scanIdentifier(); + tok = S.scanIdentifier() case digitVal(ch) < 10: - tok = S.scanNumber(false); + tok = S.scanNumber(false) default: S.next(); // always make progress switch ch { case -1: - tok = token.EOF; + tok = token.EOF case '"': tok = token.STRING; S.scanString(pos); @@ -462,10 +462,10 @@ scan_again: tok = token.STRING; S.scanRawString(pos); case ':': - tok = S.switch2(token.COLON, token.DEFINE); + tok = S.switch2(token.COLON, token.DEFINE) case '.': if digitVal(S.ch) < 10 { - tok = S.scanNumber(true); + tok = S.scanNumber(true) } else if S.ch == '.' { S.next(); if S.ch == '.' { @@ -473,69 +473,69 @@ scan_again: tok = token.ELLIPSIS; } } else { - tok = token.PERIOD; + tok = token.PERIOD } case ',': - tok = token.COMMA; + tok = token.COMMA case ';': - tok = token.SEMICOLON; + tok = token.SEMICOLON case '(': - tok = token.LPAREN; + tok = token.LPAREN case ')': - tok = token.RPAREN; + tok = token.RPAREN case '[': - tok = token.LBRACK; + tok = token.LBRACK case ']': - tok = token.RBRACK; + tok = token.RBRACK case '{': - tok = token.LBRACE; + tok = token.LBRACE case '}': - tok = token.RBRACE; + tok = token.RBRACE case '+': - tok = S.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC); + tok = S.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC) case '-': - tok = S.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC); + tok = S.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC) case '*': - tok = S.switch2(token.MUL, token.MUL_ASSIGN); + tok = S.switch2(token.MUL, token.MUL_ASSIGN) case '/': if S.ch == '/' || S.ch == '*' { S.scanComment(pos); tok = token.COMMENT; if S.mode & ScanComments == 0 { - goto scan_again; + goto scan_again } } else { - tok = S.switch2(token.QUO, token.QUO_ASSIGN); + tok = S.switch2(token.QUO, token.QUO_ASSIGN) } case '%': - tok = S.switch2(token.REM, token.REM_ASSIGN); + tok = S.switch2(token.REM, token.REM_ASSIGN) case '^': - tok = S.switch2(token.XOR, token.XOR_ASSIGN); + tok = S.switch2(token.XOR, token.XOR_ASSIGN) case '<': if S.ch == '-' { S.next(); tok = token.ARROW; } else { - tok = S.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN); + tok = S.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN) } case '>': - tok = S.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN); + tok = S.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN) case '=': - tok = S.switch2(token.ASSIGN, token.EQL); + tok = S.switch2(token.ASSIGN, token.EQL) case '!': - tok = S.switch2(token.NOT, token.NEQ); + tok = S.switch2(token.NOT, token.NEQ) case '&': if S.ch == '^' { S.next(); tok = S.switch2(token.AND_NOT, token.AND_NOT_ASSIGN); } else { - tok = S.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND); + tok = S.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND) } case '|': - tok = S.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR); + tok = S.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR) default: if S.mode & AllowIllegalChars == 0 { - S.error(pos, "illegal character " + charString(ch)); + S.error(pos, "illegal character " + charString(ch)) } } } diff --git a/src/pkg/go/scanner/scanner_test.go b/src/pkg/go/scanner/scanner_test.go index 0b036e8b4..3bdd71e64 100644 --- a/src/pkg/go/scanner/scanner_test.go +++ b/src/pkg/go/scanner/scanner_test.go @@ -23,11 +23,11 @@ const /* class */ ( func tokenclass(tok token.Token) int { switch { case tok.IsLiteral(): - return literal; + return literal case tok.IsOperator(): - return operator; + return operator case tok.IsKeyword(): - return keyword; + return keyword } return special; } @@ -169,7 +169,7 @@ type TestErrorHandler struct { } func (h *TestErrorHandler) Error(pos token.Position, msg string) { - h.t.Errorf("Error() called (msg = %s)", msg); + h.t.Errorf("Error() called (msg = %s)", msg) } @@ -177,7 +177,7 @@ func NewlineCount(s string) int { n := 0; for i := 0; i < len(s); i++ { if s[i] == '\n' { - n++; + n++ } } return n; @@ -186,16 +186,16 @@ func NewlineCount(s string) int { func checkPos(t *testing.T, lit string, pos, expected token.Position) { if pos.Filename != expected.Filename { - t.Errorf("bad filename for %s: got %s, expected %s", lit, pos.Filename, expected.Filename); + t.Errorf("bad filename for %s: got %s, expected %s", lit, pos.Filename, expected.Filename) } if pos.Offset != expected.Offset { - t.Errorf("bad position for %s: got %d, expected %d", lit, pos.Offset, expected.Offset); + t.Errorf("bad position for %s: got %d, expected %d", lit, pos.Offset, expected.Offset) } if pos.Line != expected.Line { - t.Errorf("bad line for %s: got %d, expected %d", lit, pos.Line, expected.Line); + t.Errorf("bad line for %s: got %d, expected %d", lit, pos.Line, expected.Line) } if pos.Column != expected.Column { - t.Errorf("bad column for %s: got %d, expected %d", lit, pos.Column, expected.Column); + t.Errorf("bad column for %s: got %d, expected %d", lit, pos.Column, expected.Column) } } @@ -205,7 +205,7 @@ func TestScan(t *testing.T) { // make source var src string; for _, e := range tokens { - src += e.lit + whitespace; + src += e.lit + whitespace } whitespace_linecount := NewlineCount(whitespace); @@ -216,7 +216,7 @@ func TestScan(t *testing.T) { func(pos token.Position, tok token.Token, litb []byte) bool { e := elt{token.EOF, "", special}; if index < len(tokens) { - e = tokens[index]; + e = tokens[index] } lit := string(litb); if tok == token.EOF { @@ -225,13 +225,13 @@ func TestScan(t *testing.T) { } checkPos(t, lit, pos, epos); if tok != e.tok { - t.Errorf("bad token for %s: got %s, expected %s", lit, tok.String(), e.tok.String()); + t.Errorf("bad token for %s: got %s, expected %s", lit, tok.String(), e.tok.String()) } if e.tok.IsLiteral() && lit != e.lit { - t.Errorf("bad literal for %s: got %s, expected %s", lit, lit, e.lit); + t.Errorf("bad literal for %s: got %s, expected %s", lit, lit, e.lit) } if tokenclass(tok) != e.class { - t.Errorf("bad class for %s: got %d, expected %d", lit, tokenclass(tok), e.class); + t.Errorf("bad class for %s: got %d, expected %d", lit, tokenclass(tok), e.class) } epos.Offset += len(lit)+len(whitespace); epos.Line += NewlineCount(lit) + whitespace_linecount; @@ -244,7 +244,7 @@ func TestScan(t *testing.T) { return tok != token.EOF; }); if nerrors != 0 { - t.Errorf("found %d errors", nerrors); + t.Errorf("found %d errors", nerrors) } } @@ -280,7 +280,7 @@ func TestLineComments(t *testing.T) { // make source var src string; for _, e := range segments { - src += e.srcline; + src += e.srcline } // verify scan @@ -292,7 +292,7 @@ func TestLineComments(t *testing.T) { } if S.ErrorCount != 0 { - t.Errorf("found %d errors", S.ErrorCount); + t.Errorf("found %d errors", S.ErrorCount) } } @@ -307,18 +307,18 @@ func TestInit(t *testing.T) { s.Scan(); // true _, tok, _ := s.Scan(); // { if tok != token.LBRACE { - t.Errorf("bad token: got %s, expected %s", tok.String(), token.LBRACE); + t.Errorf("bad token: got %s, expected %s", tok.String(), token.LBRACE) } // 2nd init s.Init("", strings.Bytes("go true { ]"), nil, 0); _, tok, _ = s.Scan(); // go if tok != token.GO { - t.Errorf("bad token: got %s, expected %s", tok.String(), token.GO); + t.Errorf("bad token: got %s, expected %s", tok.String(), token.GO) } if s.ErrorCount != 0 { - t.Errorf("found %d errors", s.ErrorCount); + t.Errorf("found %d errors", s.ErrorCount) } } @@ -331,15 +331,15 @@ func TestIllegalChars(t *testing.T) { for offs, ch := range src { pos, tok, lit := s.Scan(); if pos.Offset != offs { - t.Errorf("bad position for %s: got %d, expected %d", string(lit), pos.Offset, offs); + t.Errorf("bad position for %s: got %d, expected %d", string(lit), pos.Offset, offs) } if tok == token.ILLEGAL && string(lit) != string(ch) { - t.Errorf("bad token: got %s, expected %s", string(lit), string(ch)); + t.Errorf("bad token: got %s, expected %s", string(lit), string(ch)) } } if s.ErrorCount != 0 { - t.Errorf("found %d errors", s.ErrorCount); + t.Errorf("found %d errors", s.ErrorCount) } } @@ -358,7 +358,7 @@ func TestStdErrorHander(t *testing.T) { v := NewErrorVector(); nerrors := Tokenize("File1", strings.Bytes(src), v, 0, func(pos token.Position, tok token.Token, litb []byte) bool { - return tok != token.EOF; + return tok != token.EOF }); list := v.GetErrorList(Raw); @@ -380,6 +380,6 @@ func TestStdErrorHander(t *testing.T) { } if v.ErrorCount() != nerrors { - t.Errorf("found %d errors, expected %d", v.ErrorCount(), nerrors); + t.Errorf("found %d errors, expected %d", v.ErrorCount(), nerrors) } } diff --git a/src/pkg/go/token/token.go b/src/pkg/go/token/token.go index 28c335461..10097efbd 100644 --- a/src/pkg/go/token/token.go +++ b/src/pkg/go/token/token.go @@ -239,7 +239,7 @@ var tokens = map[Token]string{ // func (tok Token) String() string { if str, exists := tokens[tok]; exists { - return str; + return str } return "token(" + strconv.Itoa(int(tok)) + ")"; } @@ -265,17 +265,17 @@ const ( func (op Token) Precedence() int { switch op { case LOR: - return 1; + return 1 case LAND: - return 2; + return 2 case ARROW: - return 3; + return 3 case EQL, NEQ, LSS, LEQ, GTR, GEQ: - return 4; + return 4 case ADD, SUB, OR, XOR: - return 5; + return 5 case MUL, QUO, REM, SHL, SHR, AND, AND_NOT: - return 6; + return 6 } return LowestPrec; } @@ -286,7 +286,7 @@ var keywords map[string]Token func init() { keywords = make(map[string]Token); for i := keyword_beg + 1; i < keyword_end; i++ { - keywords[tokens[i]] = i; + keywords[tokens[i]] = i } } @@ -297,7 +297,7 @@ func Lookup(ident []byte) Token { // TODO Maps with []byte key are illegal because []byte does not // support == . Should find a more efficient solution eventually. if tok, is_keyword := keywords[string(ident)]; is_keyword { - return tok; + return tok } return IDENT; } @@ -346,12 +346,12 @@ func (pos Position) String() string { s := pos.Filename; if pos.IsValid() { if s != "" { - s += ":"; + s += ":" } s += fmt.Sprintf("%d:%d", pos.Line, pos.Column); } if s == "" { - s = "???"; + s = "???" } return s; } |