summaryrefslogtreecommitdiff
path: root/src/pkg/go
diff options
context:
space:
mode:
Diffstat (limited to 'src/pkg/go')
-rw-r--r--src/pkg/go/build/build_test.go9
-rw-r--r--src/pkg/go/build/dir.go446
-rw-r--r--src/pkg/go/build/syslist_test.go4
-rw-r--r--src/pkg/go/printer/printer.go81
-rw-r--r--src/pkg/go/printer/printer_test.go14
-rw-r--r--src/pkg/go/token/Makefile1
-rw-r--r--src/pkg/go/token/position.go20
-rw-r--r--src/pkg/go/token/serialize.go62
-rw-r--r--src/pkg/go/token/serialize_test.go105
9 files changed, 635 insertions, 107 deletions
diff --git a/src/pkg/go/build/build_test.go b/src/pkg/go/build/build_test.go
index 592ebbd9e..68a4180c9 100644
--- a/src/pkg/go/build/build_test.go
+++ b/src/pkg/go/build/build_test.go
@@ -27,7 +27,7 @@ var buildPkgs = []struct {
&DirInfo{
GoFiles: []string{"pkgtest.go"},
SFiles: []string{"sqrt_" + runtime.GOARCH + ".s"},
- PkgName: "pkgtest",
+ Package: "pkgtest",
Imports: []string{"os"},
TestImports: []string{"fmt", "pkgtest"},
TestGoFiles: sortstr([]string{"sqrt_test.go", "sqrt_" + runtime.GOARCH + "_test.go"}),
@@ -38,7 +38,7 @@ var buildPkgs = []struct {
"go/build/cmdtest",
&DirInfo{
GoFiles: []string{"main.go"},
- PkgName: "main",
+ Package: "main",
Imports: []string{"go/build/pkgtest"},
},
},
@@ -48,7 +48,7 @@ var buildPkgs = []struct {
CgoFiles: []string{"cgotest.go"},
CFiles: []string{"cgotest.c"},
Imports: []string{"C", "unsafe"},
- PkgName: "cgotest",
+ Package: "cgotest",
},
},
}
@@ -59,8 +59,7 @@ func TestBuild(t *testing.T) {
for _, tt := range buildPkgs {
tree := Path[0] // Goroot
dir := filepath.Join(tree.SrcDir(), tt.dir)
-
- info, err := ScanDir(dir, true)
+ info, err := ScanDir(dir)
if err != nil {
t.Errorf("ScanDir(%#q): %v", tt.dir, err)
continue
diff --git a/src/pkg/go/build/dir.go b/src/pkg/go/build/dir.go
index fa4d9e913..3ee10ab34 100644
--- a/src/pkg/go/build/dir.go
+++ b/src/pkg/go/build/dir.go
@@ -5,16 +5,22 @@
package build
import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/doc"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
+ "path"
"path/filepath"
+ "runtime"
"sort"
"strconv"
"strings"
- "runtime"
+ "unicode"
)
// A Context specifies the supporting context for a build.
@@ -22,14 +28,55 @@ type Context struct {
GOARCH string // target architecture
GOOS string // target operating system
// TODO(rsc,adg): GOPATH
+
+ // By default, ScanDir uses the operating system's
+ // file system calls to read directories and files.
+ // Callers can override those calls to provide other
+ // ways to read data by setting ReadDir and ReadFile.
+ // ScanDir does not make any assumptions about the
+ // format of the strings dir and file: they can be
+ // slash-separated, backslash-separated, even URLs.
+
+ // ReadDir returns a slice of *os.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // The dir argument is the argument to ScanDir.
+ // If ReadDir is nil, ScanDir uses io.ReadDir.
+ ReadDir func(dir string) (fi []*os.FileInfo, err os.Error)
+
+ // ReadFile returns the content of the file named file
+ // in the directory named dir. The dir argument is the
+ // argument to ScanDir, and the file argument is the
+ // Name field from an *os.FileInfo returned by ReadDir.
+ // The returned path is the full name of the file, to be
+ // used in error messages.
+ //
+ // If ReadFile is nil, ScanDir uses filepath.Join(dir, file)
+ // as the path and ioutil.ReadFile to read the data.
+ ReadFile func(dir, file string) (path string, content []byte, err os.Error)
+}
+
+func (ctxt *Context) readDir(dir string) ([]*os.FileInfo, os.Error) {
+ if f := ctxt.ReadDir; f != nil {
+ return f(dir)
+ }
+ return ioutil.ReadDir(dir)
+}
+
+func (ctxt *Context) readFile(dir, file string) (string, []byte, os.Error) {
+ if f := ctxt.ReadFile; f != nil {
+ return f(dir, file)
+ }
+ p := filepath.Join(dir, file)
+ content, err := ioutil.ReadFile(p)
+ return p, content, err
}
// The DefaultContext is the default Context for builds.
// It uses the GOARCH and GOOS environment variables
// if set, or else the compiled code's GOARCH and GOOS.
var DefaultContext = Context{
- envOr("GOARCH", runtime.GOARCH),
- envOr("GOOS", runtime.GOOS),
+ GOARCH: envOr("GOARCH", runtime.GOARCH),
+ GOOS: envOr("GOOS", runtime.GOOS),
}
func envOr(name, def string) string {
@@ -41,36 +88,48 @@ func envOr(name, def string) string {
}
type DirInfo struct {
- GoFiles []string // .go files in dir (excluding CgoFiles)
- CgoFiles []string // .go files that import "C"
- CFiles []string // .c files in dir
- SFiles []string // .s files in dir
- Imports []string // All packages imported by GoFiles
- TestImports []string // All packages imported by (X)TestGoFiles
- PkgName string // Name of package in dir
+ Package string // Name of package in dir
+ PackageComment *ast.CommentGroup // Package comments from GoFiles
+ ImportPath string // Import path of package in dir
+ Imports []string // All packages imported by GoFiles
+
+ // Source files
+ GoFiles []string // .go files in dir (excluding CgoFiles)
+ CFiles []string // .c files in dir
+ SFiles []string // .s files in dir
+ CgoFiles []string // .go files that import "C"
+
+ // Cgo directives
+ CgoPkgConfig []string // Cgo pkg-config directives
+ CgoCFLAGS []string // Cgo CFLAGS directives
+ CgoLDFLAGS []string // Cgo LDFLAGS directives
+
+ // Test information
TestGoFiles []string // _test.go files in package
XTestGoFiles []string // _test.go files outside package
+ TestImports []string // All packages imported by (X)TestGoFiles
}
func (d *DirInfo) IsCommand() bool {
- return d.PkgName == "main"
+ // TODO(rsc): This is at least a little bogus.
+ return d.Package == "main"
}
// ScanDir calls DefaultContext.ScanDir.
-func ScanDir(dir string, allowMain bool) (info *DirInfo, err os.Error) {
- return DefaultContext.ScanDir(dir, allowMain)
+func ScanDir(dir string) (info *DirInfo, err os.Error) {
+ return DefaultContext.ScanDir(dir)
}
// ScanDir returns a structure with details about the Go content found
// in the given directory. The file lists exclude:
//
-// - files in package main (unless allowMain is true)
+// - files in package main (unless no other package is found)
// - files in package documentation
// - files ending in _test.go
-// - files starting with _ or .
+// - files starting with _ or .
//
-func (ctxt *Context) ScanDir(dir string, allowMain bool) (info *DirInfo, err os.Error) {
- dirs, err := ioutil.ReadDir(dir)
+func (ctxt *Context) ScanDir(dir string) (info *DirInfo, err os.Error) {
+ dirs, err := ctxt.readDir(dir)
if err != nil {
return nil, err
}
@@ -80,72 +139,118 @@ func (ctxt *Context) ScanDir(dir string, allowMain bool) (info *DirInfo, err os.
testImported := make(map[string]bool)
fset := token.NewFileSet()
for _, d := range dirs {
+ if !d.IsRegular() {
+ continue
+ }
if strings.HasPrefix(d.Name, "_") ||
strings.HasPrefix(d.Name, ".") {
continue
}
- if !ctxt.goodOSArch(d.Name) {
+ if !ctxt.goodOSArchFile(d.Name) {
+ continue
+ }
+
+ ext := path.Ext(d.Name)
+ switch ext {
+ case ".go", ".c", ".s":
+ // tentatively okay
+ default:
+ // skip
continue
}
- isTest := false
- switch filepath.Ext(d.Name) {
- case ".go":
- isTest = strings.HasSuffix(d.Name, "_test.go")
+ // Look for +build comments to accept or reject the file.
+ filename, data, err := ctxt.readFile(dir, d.Name)
+ if err != nil {
+ return nil, err
+ }
+ if !ctxt.shouldBuild(data) {
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
case ".c":
di.CFiles = append(di.CFiles, d.Name)
continue
case ".s":
di.SFiles = append(di.SFiles, d.Name)
continue
- default:
- continue
}
- filename := filepath.Join(dir, d.Name)
- pf, err := parser.ParseFile(fset, filename, nil, parser.ImportsOnly)
+ pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return nil, err
}
+
pkg := string(pf.Name.Name)
- if pkg == "main" && !allowMain {
+ if pkg == "main" && di.Package != "" && di.Package != "main" {
continue
}
if pkg == "documentation" {
continue
}
+
+ isTest := strings.HasSuffix(d.Name, "_test.go")
if isTest && strings.HasSuffix(pkg, "_test") {
pkg = pkg[:len(pkg)-len("_test")]
}
- if di.PkgName == "" {
- di.PkgName = pkg
- } else if di.PkgName != pkg {
- // Only if all files in the directory are in package main
- // do we return PkgName=="main".
- // A mix of main and another package reverts
- // to the original (allowMain=false) behaviour.
- if pkg == "main" || di.PkgName == "main" {
- return ScanDir(dir, false)
+
+ if pkg != di.Package && di.Package == "main" {
+ // Found non-main package but was recording
+ // information about package main. Reset.
+ di = DirInfo{}
+ }
+ if di.Package == "" {
+ di.Package = pkg
+ } else if pkg != di.Package {
+ return nil, fmt.Errorf("%s: found packages %s and %s", dir, pkg, di.Package)
+ }
+ if pf.Doc != nil {
+ if di.PackageComment != nil {
+ di.PackageComment.List = append(di.PackageComment.List, pf.Doc.List...)
+ } else {
+ di.PackageComment = pf.Doc
}
- return nil, os.NewError("multiple package names in " + dir)
}
+
+ // Record imports and information about cgo.
isCgo := false
- for _, spec := range pf.Imports {
- quoted := string(spec.Path.Value)
- path, err := strconv.Unquote(quoted)
- if err != nil {
- log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
+ for _, decl := range pf.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
}
- if isTest {
- testImported[path] = true
- } else {
- imported[path] = true
- }
- if path == "C" {
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := string(spec.Path.Value)
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
+ }
if isTest {
- return nil, os.NewError("use of cgo in test " + filename)
+ testImported[path] = true
+ } else {
+ imported[path] = true
+ }
+ if path == "C" {
+ if isTest {
+ return nil, fmt.Errorf("%s: use of cgo in test not supported", filename)
+ }
+ cg := spec.Doc
+ if cg == nil && len(d.Specs) == 1 {
+ cg = d.Doc
+ }
+ if cg != nil {
+ if err := ctxt.saveCgo(filename, &di, cg); err != nil {
+ return nil, err
+ }
+ }
+ isCgo = true
}
- isCgo = true
}
}
if isCgo {
@@ -160,6 +265,9 @@ func (ctxt *Context) ScanDir(dir string, allowMain bool) (info *DirInfo, err os.
di.GoFiles = append(di.GoFiles, d.Name)
}
}
+ if di.Package == "" {
+ return nil, fmt.Errorf("%s: no Go source files", dir)
+ }
di.Imports = make([]string, len(imported))
i := 0
for p := range imported {
@@ -172,13 +280,245 @@ func (ctxt *Context) ScanDir(dir string, allowMain bool) (info *DirInfo, err os.
di.TestImports[i] = p
i++
}
- // File name lists are sorted because ioutil.ReadDir sorts.
+ // File name lists are sorted because ReadDir sorts.
sort.Strings(di.Imports)
sort.Strings(di.TestImports)
return &di, nil
}
-// goodOSArch returns false if the name contains a $GOOS or $GOARCH
+var slashslash = []byte("//")
+var plusBuild = []byte("+build")
+
+// shouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+// // +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+func (ctxt *Context) shouldBuild(content []byte) bool {
+ // Pass 1. Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ end := 0
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 { // Blank line
+ end = cap(content) - cap(line) // &line[0] - &content[0]
+ continue
+ }
+ if !bytes.HasPrefix(line, slashslash) { // Not comment line
+ break
+ }
+ }
+ content = content[:end]
+
+ // Pass 2. Process each line in the run.
+ p = content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if bytes.HasPrefix(line, slashslash) {
+ line = bytes.TrimSpace(line[len(slashslash):])
+ if len(line) > 0 && line[0] == '+' {
+ // Looks like a comment +line.
+ f := strings.Fields(string(line))
+ if f[0] == "+build" {
+ ok := false
+ for _, tok := range f[1:] {
+ if ctxt.matchOSArch(tok) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return false // this one doesn't match
+ }
+ }
+ }
+ }
+ }
+ return true // everything matches
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS and LDFLAGS and pkg-config directives that affect
+// the way cgo's C code is built.
+//
+// TODO(rsc): This duplicates code in cgo.
+// Once the dust settles, remove this code from cgo.
+func (ctxt *Context) saveCgo(filename string, di *DirInfo, cg *ast.CommentGroup) os.Error {
+ text := doc.CommentText(cg)
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line = strings.TrimSpace(line[4:])
+ i := strings.Index(line, ":")
+ if i < 0 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ line, argstr := line[:i], line[i+1:]
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.matchOSArch(c) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ for _, arg := range args {
+ if !safeName(arg) {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
+}
+
+var safeBytes = []byte("+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz")
+
+func safeName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < 0x80 && bytes.IndexByte(safeBytes, c) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+func splitQuoted(s string) (r []string, err os.Error) {
+ var args []string
+ arg := make([]int, len(s))
+ escaped := false
+ quoted := false
+ quote := 0
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != 0:
+ if rune == quote {
+ quote = 0
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = os.NewError("unclosed quote")
+ } else if escaped {
+ err = os.NewError("unfinished escaping")
+ }
+ return args, err
+}
+
+// matchOSArch returns true if the name is one of:
+//
+// $GOOS
+// $GOARCH
+// $GOOS/$GOARCH
+//
+func (ctxt *Context) matchOSArch(name string) bool {
+ if name == ctxt.GOOS || name == ctxt.GOARCH {
+ return true
+ }
+ i := strings.Index(name, "/")
+ return i >= 0 && name[:i] == ctxt.GOOS && name[i+1:] == ctxt.GOARCH
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
// suffix which does not match the current system.
// The recognized name formats are:
//
@@ -189,7 +529,7 @@ func (ctxt *Context) ScanDir(dir string, allowMain bool) (info *DirInfo, err os.
// name_$(GOARCH)_test.*
// name_$(GOOS)_$(GOARCH)_test.*
//
-func (ctxt *Context) goodOSArch(name string) bool {
+func (ctxt *Context) goodOSArchFile(name string) bool {
if dot := strings.Index(name, "."); dot != -1 {
name = name[:dot]
}
diff --git a/src/pkg/go/build/syslist_test.go b/src/pkg/go/build/syslist_test.go
index 2e8b4c865..d27630d75 100644
--- a/src/pkg/go/build/syslist_test.go
+++ b/src/pkg/go/build/syslist_test.go
@@ -55,8 +55,8 @@ var tests = []GoodFileTest{
func TestGoodOSArch(t *testing.T) {
for _, test := range tests {
- if DefaultContext.goodOSArch(test.name) != test.result {
- t.Fatalf("goodOSArch(%q) != %v", test.name, test.result)
+ if DefaultContext.goodOSArchFile(test.name) != test.result {
+ t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
}
}
}
diff --git a/src/pkg/go/printer/printer.go b/src/pkg/go/printer/printer.go
index 871fefa0c..bfabd749a 100644
--- a/src/pkg/go/printer/printer.go
+++ b/src/pkg/go/printer/printer.go
@@ -13,7 +13,6 @@ import (
"io"
"os"
"path/filepath"
- "runtime"
"tabwriter"
)
@@ -55,12 +54,17 @@ const (
noExtraLinebreak
)
+// local error wrapper so we can distinguish os.Errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err os.Error
+}
+
type printer struct {
// Configuration (does not change after initialization)
output io.Writer
Config
- fset *token.FileSet
- errors chan os.Error
+ fset *token.FileSet
// Current state
written int // number of bytes written
@@ -95,7 +99,6 @@ func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeS
p.output = output
p.Config = *cfg
p.fset = fset
- p.errors = make(chan os.Error)
p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
p.nodeSizes = nodeSizes
}
@@ -143,8 +146,7 @@ func (p *printer) write0(data []byte) {
n, err := p.output.Write(data)
p.written += n
if err != nil {
- p.errors <- err
- runtime.Goexit()
+ panic(osError{err})
}
}
}
@@ -923,7 +925,7 @@ type Config struct {
}
// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
-func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) {
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (written int, err os.Error) {
// redirect output through a trimmer to eliminate trailing whitespace
// (Input to a tabwriter must be untrimmed since trailing tabs provide
// formatting information. The tabwriter could provide trimming
@@ -950,47 +952,50 @@ func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{
output = tw
}
- // setup printer and print node
+ // setup printer
var p printer
p.init(output, cfg, fset, nodeSizes)
- go func() {
- switch n := node.(type) {
- case ast.Expr:
- p.useNodeComments = true
- p.expr(n, ignoreMultiLine)
- case ast.Stmt:
- p.useNodeComments = true
- // A labeled statement will un-indent to position the
- // label. Set indent to 1 so we don't get indent "underflow".
- if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
- p.indent = 1
- }
- p.stmt(n, false, ignoreMultiLine)
- case ast.Decl:
- p.useNodeComments = true
- p.decl(n, ignoreMultiLine)
- case ast.Spec:
- p.useNodeComments = true
- p.spec(n, 1, false, ignoreMultiLine)
- case *ast.File:
- p.comments = n.Comments
- p.useNodeComments = n.Comments == nil
- p.file(n)
- default:
- p.errors <- fmt.Errorf("printer.Fprint: unsupported node type %T", n)
- runtime.Goexit()
+ defer func() {
+ written = p.written
+ if e := recover(); e != nil {
+ err = e.(osError).err // re-panics if it's not a local osError
}
- p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
- p.errors <- nil // no errors
}()
- err := <-p.errors // wait for completion of goroutine
+
+ // print node
+ switch n := node.(type) {
+ case ast.Expr:
+ p.useNodeComments = true
+ p.expr(n, ignoreMultiLine)
+ case ast.Stmt:
+ p.useNodeComments = true
+ // A labeled statement will un-indent to position the
+ // label. Set indent to 1 so we don't get indent "underflow".
+ if _, labeledStmt := n.(*ast.LabeledStmt); labeledStmt {
+ p.indent = 1
+ }
+ p.stmt(n, false, ignoreMultiLine)
+ case ast.Decl:
+ p.useNodeComments = true
+ p.decl(n, ignoreMultiLine)
+ case ast.Spec:
+ p.useNodeComments = true
+ p.spec(n, 1, false, ignoreMultiLine)
+ case *ast.File:
+ p.comments = n.Comments
+ p.useNodeComments = n.Comments == nil
+ p.file(n)
+ default:
+ panic(osError{fmt.Errorf("printer.Fprint: unsupported node type %T", n)})
+ }
+ p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
// flush tabwriter, if any
if tw != nil {
tw.Flush() // ignore errors
}
- return p.written, err
+ return
}
// Fprint "pretty-prints" an AST node to output and returns the number
diff --git a/src/pkg/go/printer/printer_test.go b/src/pkg/go/printer/printer_test.go
index ff2d906b5..a644aa383 100644
--- a/src/pkg/go/printer/printer_test.go
+++ b/src/pkg/go/printer/printer_test.go
@@ -7,10 +7,10 @@ package printer
import (
"bytes"
"flag"
- "io/ioutil"
"go/ast"
"go/parser"
"go/token"
+ "io/ioutil"
"path/filepath"
"testing"
"time"
@@ -192,3 +192,15 @@ func TestLineComments(t *testing.T) {
t.Errorf("got %d, expected %d\n", nlines, expected)
}
}
+
+// Verify that the printer can be invoked during initialization.
+func init() {
+ const name = "foobar"
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
+ panic(err)
+ }
+ if s := buf.String(); s != name {
+ panic("got " + s + ", want " + name)
+ }
+}
diff --git a/src/pkg/go/token/Makefile b/src/pkg/go/token/Makefile
index 4a4e64dc8..b13b0442b 100644
--- a/src/pkg/go/token/Makefile
+++ b/src/pkg/go/token/Makefile
@@ -7,6 +7,7 @@ include ../../../Make.inc
TARG=go/token
GOFILES=\
position.go\
+ serialize.go\
token.go\
include ../../../Make.pkg
diff --git a/src/pkg/go/token/position.go b/src/pkg/go/token/position.go
index c559e19f8..9155b501d 100644
--- a/src/pkg/go/token/position.go
+++ b/src/pkg/go/token/position.go
@@ -136,10 +136,14 @@ func (s *FileSet) Position(p Pos) (pos Position) {
return
}
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
type lineInfo struct {
- offset int
- filename string
- line int
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
}
// AddLineInfo adds alternative file and line number information for
@@ -152,7 +156,7 @@ type lineInfo struct {
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
- if i := len(f.infos); i == 0 || f.infos[i-1].offset < offset && offset < f.size {
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
f.infos = append(f.infos, lineInfo{offset, filename, line})
}
f.set.mutex.Unlock()
@@ -317,7 +321,7 @@ func searchInts(a []int, x int) int {
}
func searchLineInfos(a []lineInfo, x int) int {
- return sort.Search(len(a), func(i int) bool { return a[i].offset > x }) - 1
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
}
// info returns the file name, line, and column number for a file offset.
@@ -330,9 +334,9 @@ func (f *File) info(offset int) (filename string, line, column int) {
// almost no files have extra line infos
if i := searchLineInfos(f.infos, offset); i >= 0 {
alt := &f.infos[i]
- filename = alt.filename
- if i := searchInts(f.lines, alt.offset); i >= 0 {
- line += alt.line - i - 1
+ filename = alt.Filename
+ if i := searchInts(f.lines, alt.Offset); i >= 0 {
+ line += alt.Line - i - 1
}
}
}
diff --git a/src/pkg/go/token/serialize.go b/src/pkg/go/token/serialize.go
new file mode 100644
index 000000000..80a3323f9
--- /dev/null
+++ b/src/pkg/go/token/serialize.go
@@ -0,0 +1,62 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "gob"
+ "io"
+ "os"
+)
+
+type serializedFile struct {
+ // fields correspond 1:1 to fields with same (lower-case) name in File
+ Name string
+ Base int
+ Size int
+ Lines []int
+ Infos []lineInfo
+}
+
+type serializedFileSet struct {
+ Base int
+ Files []serializedFile
+}
+
+// Read reads the fileset from r into s; s must not be nil.
+func (s *FileSet) Read(r io.Reader) os.Error {
+ var ss serializedFileSet
+ if err := gob.NewDecoder(r).Decode(&ss); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.base = ss.Base
+ files := make([]*File, len(ss.Files))
+ for i := 0; i < len(ss.Files); i++ {
+ f := &ss.Files[i]
+ files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
+ }
+ s.files = files
+ s.last = nil
+ s.mutex.Unlock()
+
+ return nil
+}
+
+// Write writes the fileset s to w.
+func (s *FileSet) Write(w io.Writer) os.Error {
+ var ss serializedFileSet
+
+ s.mutex.Lock()
+ ss.Base = s.base
+ files := make([]serializedFile, len(s.files))
+ for i, f := range s.files {
+ files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
+ }
+ ss.Files = files
+ s.mutex.Unlock()
+
+ return gob.NewEncoder(w).Encode(ss)
+}
diff --git a/src/pkg/go/token/serialize_test.go b/src/pkg/go/token/serialize_test.go
new file mode 100644
index 000000000..24e419abf
--- /dev/null
+++ b/src/pkg/go/token/serialize_test.go
@@ -0,0 +1,105 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "testing"
+)
+
+// equal returns nil if p and q describe the same file set;
+// otherwise it returns an error describing the discrepancy.
+func equal(p, q *FileSet) os.Error {
+ if p == q {
+ // avoid deadlock if p == q
+ return nil
+ }
+
+ // not strictly needed for the test
+ p.mutex.Lock()
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ defer p.mutex.Unlock()
+
+ if p.base != q.base {
+ return fmt.Errorf("different bases: %d != %d", p.base, q.base)
+ }
+
+ if len(p.files) != len(q.files) {
+ return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
+ }
+
+ for i, f := range p.files {
+ g := q.files[i]
+ if f.set != p {
+ return fmt.Errorf("wrong fileset for %q", f.name)
+ }
+ if g.set != q {
+ return fmt.Errorf("wrong fileset for %q", g.name)
+ }
+ if f.name != g.name {
+ return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
+ }
+ if f.base != g.base {
+ return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
+ }
+ if f.size != g.size {
+ return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
+ }
+ for j, l := range f.lines {
+ m := g.lines[j]
+ if l != m {
+ return fmt.Errorf("different offsets for %q", f.name)
+ }
+ }
+ for j, l := range f.infos {
+ m := g.infos[j]
+ if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
+ return fmt.Errorf("different infos for %q", f.name)
+ }
+ }
+ }
+
+ // we don't care about .last - it's just a cache
+ return nil
+}
+
+func checkSerialize(t *testing.T, p *FileSet) {
+ var buf bytes.Buffer
+ if err := p.Write(&buf); err != nil {
+ t.Errorf("writing fileset failed: %s", err)
+ return
+ }
+ q := NewFileSet()
+ if err := q.Read(&buf); err != nil {
+ t.Errorf("reading fileset failed: %s", err)
+ return
+ }
+ if err := equal(p, q); err != nil {
+ t.Errorf("filesets not identical: %s", err)
+ }
+}
+
+func TestSerialization(t *testing.T) {
+ p := NewFileSet()
+ checkSerialize(t, p)
+ // add some files
+ for i := 0; i < 10; i++ {
+ f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
+ checkSerialize(t, p)
+ // add some lines and alternative file infos
+ line := 1000
+ for offs := 0; offs < f.Size(); offs += 40 + i {
+ f.AddLine(offs)
+ if offs%7 == 0 {
+ f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
+ line += 33
+ }
+ }
+ checkSerialize(t, p)
+ }
+}