diff options
Diffstat (limited to 'src/pkg/go/doc')
33 files changed, 3760 insertions, 743 deletions
diff --git a/src/pkg/go/doc/Makefile b/src/pkg/go/doc/Makefile index a5152c793..ac2eeb962 100644 --- a/src/pkg/go/doc/Makefile +++ b/src/pkg/go/doc/Makefile @@ -8,5 +8,15 @@ TARG=go/doc GOFILES=\ comment.go\ doc.go\ + example.go\ + exports.go\ + filter.go\ + reader.go\ include ../../../Make.pkg + +# Script to test heading detection heuristic +CLEANFILES+=headscan +headscan: headscan.go + $(GC) $(GCFLAGS) $(GCIMPORTS) headscan.go + $(LD) -o headscan headscan.$(O) diff --git a/src/pkg/go/doc/comment.go b/src/pkg/go/doc/comment.go index e1989226b..d73b13159 100644 --- a/src/pkg/go/doc/comment.go +++ b/src/pkg/go/doc/comment.go @@ -7,114 +7,14 @@ package doc import ( - "go/ast" "io" "regexp" "strings" - "template" // for HTMLEscape + "text/template" // for HTMLEscape + "unicode" + "unicode/utf8" ) -func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' } - -func stripTrailingWhitespace(s string) string { - i := len(s) - for i > 0 && isWhitespace(s[i-1]) { - i-- - } - return s[0:i] -} - -// CommentText returns the text of comment, -// with the comment markers - //, /*, and */ - removed. -func CommentText(comment *ast.CommentGroup) string { - if comment == nil { - return "" - } - comments := make([]string, len(comment.List)) - for i, c := range comment.List { - comments[i] = string(c.Text) - } - - lines := make([]string, 0, 10) // most comments are less than 10 lines - for _, c := range comments { - // Remove comment markers. - // The parser has given us exactly the comment text. - switch c[1] { - case '/': - //-style comment - c = c[2:] - // Remove leading space after //, if there is one. - // TODO(gri) This appears to be necessary in isolated - // cases (bignum.RatFromString) - why? - if len(c) > 0 && c[0] == ' ' { - c = c[1:] - } - case '*': - /*-style comment */ - c = c[2 : len(c)-2] - } - - // Split on newlines. - cl := strings.Split(c, "\n") - - // Walk lines, stripping trailing white space and adding to list. - for _, l := range cl { - lines = append(lines, stripTrailingWhitespace(l)) - } - } - - // Remove leading blank lines; convert runs of - // interior blank lines to a single blank line. - n := 0 - for _, line := range lines { - if line != "" || n > 0 && lines[n-1] != "" { - lines[n] = line - n++ - } - } - lines = lines[0:n] - - // Add final "" entry to get trailing newline from Join. - if n > 0 && lines[n-1] != "" { - lines = append(lines, "") - } - - return strings.Join(lines, "\n") -} - -// Split bytes into lines. -func split(text []byte) [][]byte { - // count lines - n := 0 - last := 0 - for i, c := range text { - if c == '\n' { - last = i + 1 - n++ - } - } - if last < len(text) { - n++ - } - - // split - out := make([][]byte, n) - last = 0 - n = 0 - for i, c := range text { - if c == '\n' { - out[n] = text[last : i+1] - last = i + 1 - n++ - } - } - if last < len(text) { - out[n] = text[last:] - } - - return out -} - var ( ldquo = []byte("“") rdquo = []byte("”") @@ -122,13 +22,13 @@ var ( // Escape comment text for HTML. If nice is set, // also turn `` into “ and '' into ”. -func commentEscape(w io.Writer, s []byte, nice bool) { +func commentEscape(w io.Writer, text string, nice bool) { last := 0 if nice { - for i := 0; i < len(s)-1; i++ { - ch := s[i] - if ch == s[i+1] && (ch == '`' || ch == '\'') { - template.HTMLEscape(w, s[last:i]) + for i := 0; i < len(text)-1; i++ { + ch := text[i] + if ch == text[i+1] && (ch == '`' || ch == '\'') { + template.HTMLEscape(w, []byte(text[last:i])) last = i + 2 switch ch { case '`': @@ -140,7 +40,7 @@ func commentEscape(w io.Writer, s []byte, nice bool) { } } } - template.HTMLEscape(w, s[last:]) + template.HTMLEscape(w, []byte(text[last:])) } const ( @@ -168,6 +68,9 @@ var ( html_endp = []byte("</p>\n") html_pre = []byte("<pre>") html_endpre = []byte("</pre>\n") + html_h = []byte(`<h3 id="`) + html_hq = []byte(`">`) + html_endh = []byte("</h3>\n") ) // Emphasize and escape a line of text for HTML. URLs are converted into links; @@ -178,9 +81,9 @@ var ( // and the word is converted into a link. If nice is set, the remaining text's // appearance is improved where it makes sense (e.g., `` is turned into “ // and '' into ”). -func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) { +func emphasize(w io.Writer, line string, words map[string]string, nice bool) { for { - m := matchRx.FindSubmatchIndex(line) + m := matchRx.FindStringSubmatchIndex(line) if m == nil { break } @@ -228,7 +131,7 @@ func emphasize(w io.Writer, line []byte, words map[string]string, nice bool) { commentEscape(w, line, nice) } -func indentLen(s []byte) int { +func indentLen(s string) int { i := 0 for i < len(s) && (s[i] == ' ' || s[i] == '\t') { i++ @@ -236,9 +139,11 @@ func indentLen(s []byte) int { return i } -func isBlank(s []byte) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n') } +func isBlank(s string) bool { + return len(s) == 0 || (len(s) == 1 && s[0] == '\n') +} -func commonPrefix(a, b []byte) []byte { +func commonPrefix(a, b string) string { i := 0 for i < len(a) && i < len(b) && a[i] == b[i] { i++ @@ -246,7 +151,7 @@ func commonPrefix(a, b []byte) []byte { return a[0:i] } -func unindent(block [][]byte) { +func unindent(block []string) { if len(block) == 0 { return } @@ -268,7 +173,66 @@ func unindent(block [][]byte) { } } -// Convert comment text to formatted HTML. +// heading returns the trimmed line if it passes as a section heading; +// otherwise it returns the empty string. +func heading(line string) string { + line = strings.TrimSpace(line) + if len(line) == 0 { + return "" + } + + // a heading must start with an uppercase letter + r, _ := utf8.DecodeRuneInString(line) + if !unicode.IsLetter(r) || !unicode.IsUpper(r) { + return "" + } + + // it must end in a letter or digit: + r, _ = utf8.DecodeLastRuneInString(line) + if !unicode.IsLetter(r) && !unicode.IsDigit(r) { + return "" + } + + // exclude lines with illegal characters + if strings.IndexAny(line, ",.;:!?+*/=()[]{}_^°&§~%#@<\">\\") >= 0 { + return "" + } + + // allow "'" for possessive "'s" only + for b := line; ; { + i := strings.IndexRune(b, '\'') + if i < 0 { + break + } + if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') { + return "" // not followed by "s " + } + b = b[i+2:] + } + + return line +} + +type op int + +const ( + opPara op = iota + opHead + opPre +) + +type block struct { + op op + lines []string +} + +var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`) + +func anchorID(line string) string { + return nonAlphaNumRx.ReplaceAllString(line, "_") +} + +// ToHTML converts comment text to formatted HTML. // The comment was prepared by DocReader, // so it is known not to have leading, trailing blank lines // nor to have trailing spaces at the end of lines. @@ -276,6 +240,7 @@ func unindent(block [][]byte) { // // Turn each run of multiple \n into </p><p>. // Turn each run of indented lines into a <pre> block without indent. +// Enclose headings with header tags. // // URLs in the comment text are converted into links; if the URL also appears // in the words map, the link is taken from the map (if the corresponding map @@ -284,23 +249,57 @@ func unindent(block [][]byte) { // Go identifiers that appear in the words map are italicized; if the corresponding // map value is not the empty string, it is considered a URL and the word is converted // into a link. -func ToHTML(w io.Writer, s []byte, words map[string]string) { - inpara := false - - close := func() { - if inpara { +func ToHTML(w io.Writer, text string, words map[string]string) { + for _, b := range blocks(text) { + switch b.op { + case opPara: + w.Write(html_p) + for _, line := range b.lines { + emphasize(w, line, words, true) + } w.Write(html_endp) - inpara = false + case opHead: + w.Write(html_h) + id := "" + for _, line := range b.lines { + if id == "" { + id = anchorID(line) + w.Write([]byte(id)) + w.Write(html_hq) + } + commentEscape(w, line, true) + } + if id == "" { + w.Write(html_hq) + } + w.Write(html_endh) + case opPre: + w.Write(html_pre) + for _, line := range b.lines { + emphasize(w, line, nil, false) + } + w.Write(html_endpre) } } - open := func() { - if !inpara { - w.Write(html_p) - inpara = true +} + +func blocks(text string) []block { + var ( + out []block + para []string + + lastWasBlank = false + lastWasHeading = false + ) + + close := func() { + if para != nil { + out = append(out, block{opPara, para}) + para = nil } } - lines := split(s) + lines := strings.SplitAfter(text, "\n") unindent(lines) for i := 0; i < len(lines); { line := lines[i] @@ -308,6 +307,7 @@ func ToHTML(w io.Writer, s []byte, words map[string]string) { // close paragraph close() i++ + lastWasBlank = true continue } if indentLen(line) > 0 { @@ -323,23 +323,119 @@ func ToHTML(w io.Writer, s []byte, words map[string]string) { for j > i && isBlank(lines[j-1]) { j-- } - block := lines[i:j] + pre := lines[i:j] i = j - unindent(block) + unindent(pre) // put those lines in a pre block - w.Write(html_pre) - for _, line := range block { - emphasize(w, line, nil, false) // no nice text formatting - } - w.Write(html_endpre) + out = append(out, block{opPre, pre}) + lastWasHeading = false continue } + + if lastWasBlank && !lastWasHeading && i+2 < len(lines) && + isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 { + // current line is non-blank, sourounded by blank lines + // and the next non-blank line is not indented: this + // might be a heading. + if head := heading(line); head != "" { + close() + out = append(out, block{opHead, []string{head}}) + i += 2 + lastWasHeading = true + continue + } + } + // open paragraph - open() - emphasize(w, lines[i], words, true) // nice text formatting + lastWasBlank = false + lastWasHeading = false + para = append(para, lines[i]) i++ } close() + + return out +} + +// ToText prepares comment text for presentation in textual output. +// It wraps paragraphs of text to width or fewer Unicode code points +// and then prefixes each line with the indent. In preformatted sections +// (such as program text), it prefixes each non-blank line with preIndent. +func ToText(w io.Writer, text string, indent, preIndent string, width int) { + l := lineWrapper{ + out: w, + width: width, + indent: indent, + } + for _, b := range blocks(text) { + switch b.op { + case opPara: + // l.write will add leading newline if required + for _, line := range b.lines { + l.write(line) + } + l.flush() + case opHead: + w.Write(nl) + for _, line := range b.lines { + l.write(line + "\n") + } + l.flush() + case opPre: + w.Write(nl) + for _, line := range b.lines { + if !isBlank(line) { + w.Write([]byte(preIndent)) + w.Write([]byte(line)) + } + } + } + } +} + +type lineWrapper struct { + out io.Writer + printed bool + width int + indent string + n int + pendSpace int +} + +var nl = []byte("\n") +var space = []byte(" ") + +func (l *lineWrapper) write(text string) { + if l.n == 0 && l.printed { + l.out.Write(nl) // blank line before new paragraph + } + l.printed = true + + for _, f := range strings.Fields(text) { + w := utf8.RuneCountInString(f) + // wrap if line is too long + if l.n > 0 && l.n+l.pendSpace+w > l.width { + l.out.Write(nl) + l.n = 0 + l.pendSpace = 0 + } + if l.n == 0 { + l.out.Write([]byte(l.indent)) + } + l.out.Write(space[:l.pendSpace]) + l.out.Write([]byte(f)) + l.n += l.pendSpace + w + l.pendSpace = 1 + } +} + +func (l *lineWrapper) flush() { + if l.n == 0 { + return + } + l.out.Write(nl) + l.pendSpace = 0 + l.n = 0 } diff --git a/src/pkg/go/doc/comment_test.go b/src/pkg/go/doc/comment_test.go new file mode 100644 index 000000000..e8d7f2e4b --- /dev/null +++ b/src/pkg/go/doc/comment_test.go @@ -0,0 +1,83 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package doc + +import ( + "reflect" + "testing" +) + +var headingTests = []struct { + line string + ok bool +}{ + {"Section", true}, + {"A typical usage", true}, + {"ΔΛΞ is Greek", true}, + {"Foo 42", true}, + {"", false}, + {"section", false}, + {"A typical usage:", false}, + {"This code:", false}, + {"δ is Greek", false}, + {"Foo §", false}, + {"Fermat's Last Sentence", true}, + {"Fermat's", true}, + {"'sX", false}, + {"Ted 'Too' Bar", false}, + {"Use n+m", false}, + {"Scanning:", false}, + {"N:M", false}, +} + +func TestIsHeading(t *testing.T) { + for _, tt := range headingTests { + if h := heading(tt.line); (len(h) > 0) != tt.ok { + t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok) + } + } +} + +var blocksTests = []struct { + in string + out []block +}{ + { + in: `Para 1. +Para 1 line 2. + +Para 2. + +Section + +Para 3. + + pre + pre1 + +Para 4. + pre + pre2 +`, + out: []block{ + {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}}, + {opPara, []string{"Para 2.\n"}}, + {opHead, []string{"Section"}}, + {opPara, []string{"Para 3.\n"}}, + {opPre, []string{"pre\n", "pre1\n"}}, + {opPara, []string{"Para 4.\n"}}, + {opPre, []string{"pre\n", "pre2\n"}}, + }, + }, +} + +func TestBlocks(t *testing.T) { + for i, tt := range blocksTests { + b := blocks(tt.in) + if !reflect.DeepEqual(b, tt.out) { + t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out) + } + } +} diff --git a/src/pkg/go/doc/doc.go b/src/pkg/go/doc/doc.go index c7fed9784..d4aae8ff0 100644 --- a/src/pkg/go/doc/doc.go +++ b/src/pkg/go/doc/doc.go @@ -8,634 +8,86 @@ package doc import ( "go/ast" "go/token" - "regexp" - "sort" ) -// ---------------------------------------------------------------------------- +// Package is the documentation for an entire package. +type Package struct { + Doc string + Name string + ImportPath string + Imports []string + Filenames []string + Bugs []string -type typeDoc struct { - // len(decl.Specs) == 1, and the element type is *ast.TypeSpec - // if the type declaration hasn't been seen yet, decl is nil - decl *ast.GenDecl - // values, factory functions, and methods associated with the type - values []*ast.GenDecl // consts and vars - factories map[string]*ast.FuncDecl - methods map[string]*ast.FuncDecl + // declarations + Consts []*Value + Types []*Type + Vars []*Value + Funcs []*Func } -// docReader accumulates documentation for a single package. -// It modifies the AST: Comments (declaration documentation) -// that have been collected by the DocReader are set to nil -// in the respective AST nodes so that they are not printed -// twice (once when printing the documentation and once when -// printing the corresponding AST node). -// -type docReader struct { - doc *ast.CommentGroup // package documentation, if any - pkgName string - values []*ast.GenDecl // consts and vars - types map[string]*typeDoc - funcs map[string]*ast.FuncDecl - bugs []*ast.CommentGroup -} - -func (doc *docReader) init(pkgName string) { - doc.pkgName = pkgName - doc.types = make(map[string]*typeDoc) - doc.funcs = make(map[string]*ast.FuncDecl) -} - -func (doc *docReader) addDoc(comments *ast.CommentGroup) { - if doc.doc == nil { - // common case: just one package comment - doc.doc = comments - return - } - - // More than one package comment: Usually there will be only - // one file with a package comment, but it's better to collect - // all comments than drop them on the floor. - // (This code isn't particularly clever - no amortized doubling is - // used - but this situation occurs rarely and is not time-critical.) - n1 := len(doc.doc.List) - n2 := len(comments.List) - list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line - copy(list, doc.doc.List) - list[n1] = &ast.Comment{token.NoPos, "//"} // separator line - copy(list[n1+1:], comments.List) - doc.doc = &ast.CommentGroup{list} -} - -func (doc *docReader) addType(decl *ast.GenDecl) { - spec := decl.Specs[0].(*ast.TypeSpec) - typ := doc.lookupTypeDoc(spec.Name.Name) - // typ should always be != nil since declared types - // are always named - be conservative and check - if typ != nil { - // a type should be added at most once, so typ.decl - // should be nil - if it isn't, simply overwrite it - typ.decl = decl - } -} - -func (doc *docReader) lookupTypeDoc(name string) *typeDoc { - if name == "" { - return nil // no type docs for anonymous types - } - if tdoc, found := doc.types[name]; found { - return tdoc - } - // type wasn't found - add one without declaration - tdoc := &typeDoc{nil, nil, make(map[string]*ast.FuncDecl), make(map[string]*ast.FuncDecl)} - doc.types[name] = tdoc - return tdoc -} - -func baseTypeName(typ ast.Expr) string { - switch t := typ.(type) { - case *ast.Ident: - // if the type is not exported, the effect to - // a client is as if there were no type name - if t.IsExported() { - return t.Name - } - case *ast.StarExpr: - return baseTypeName(t.X) - } - return "" -} - -func (doc *docReader) addValue(decl *ast.GenDecl) { - // determine if decl should be associated with a type - // Heuristic: For each typed entry, determine the type name, if any. - // If there is exactly one type name that is sufficiently - // frequent, associate the decl with the respective type. - domName := "" - domFreq := 0 - prev := "" - for _, s := range decl.Specs { - if v, ok := s.(*ast.ValueSpec); ok { - name := "" - switch { - case v.Type != nil: - // a type is present; determine its name - name = baseTypeName(v.Type) - case decl.Tok == token.CONST: - // no type is present but we have a constant declaration; - // use the previous type name (w/o more type information - // we cannot handle the case of unnamed variables with - // initializer expressions except for some trivial cases) - name = prev - } - if name != "" { - // entry has a named type - if domName != "" && domName != name { - // more than one type name - do not associate - // with any type - domName = "" - break - } - domName = name - domFreq++ - } - prev = name - } - } - - // determine values list - const threshold = 0.75 - values := &doc.values - if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) { - // typed entries are sufficiently frequent - typ := doc.lookupTypeDoc(domName) - if typ != nil { - values = &typ.values // associate with that type - } - } - - *values = append(*values, decl) -} - -// Helper function to set the table entry for function f. Makes sure that -// at least one f with associated documentation is stored in table, if there -// are multiple f's with the same name. -func setFunc(table map[string]*ast.FuncDecl, f *ast.FuncDecl) { - name := f.Name.Name - if g, exists := table[name]; exists && g.Doc != nil { - // a function with the same name has already been registered; - // since it has documentation, assume f is simply another - // implementation and ignore it - // TODO(gri) consider collecting all functions, or at least - // all comments - return - } - // function doesn't exist or has no documentation; use f - table[name] = f -} - -func (doc *docReader) addFunc(fun *ast.FuncDecl) { - name := fun.Name.Name - - // determine if it should be associated with a type - if fun.Recv != nil { - // method - typ := doc.lookupTypeDoc(baseTypeName(fun.Recv.List[0].Type)) - if typ != nil { - // exported receiver type - setFunc(typ.methods, fun) - } - // otherwise don't show the method - // TODO(gri): There may be exported methods of non-exported types - // that can be called because of exported values (consts, vars, or - // function results) of that type. Could determine if that is the - // case and then show those methods in an appropriate section. - return - } - - // perhaps a factory function - // determine result type, if any - if fun.Type.Results.NumFields() >= 1 { - res := fun.Type.Results.List[0] - if len(res.Names) <= 1 { - // exactly one (named or anonymous) result associated - // with the first type in result signature (there may - // be more than one result) - tname := baseTypeName(res.Type) - typ := doc.lookupTypeDoc(tname) - if typ != nil { - // named and exported result type - - // Work-around for failure of heuristic: In package os - // too many functions are considered factory functions - // for the Error type. Eliminate manually for now as - // this appears to be the only important case in the - // current library where the heuristic fails. - if doc.pkgName == "os" && tname == "Error" && - name != "NewError" && name != "NewSyscallError" { - // not a factory function for os.Error - setFunc(doc.funcs, fun) // treat as ordinary function - return - } - - setFunc(typ.factories, fun) - return - } - } - } - - // ordinary function - setFunc(doc.funcs, fun) -} - -func (doc *docReader) addDecl(decl ast.Decl) { - switch d := decl.(type) { - case *ast.GenDecl: - if len(d.Specs) > 0 { - switch d.Tok { - case token.CONST, token.VAR: - // constants and variables are always handled as a group - doc.addValue(d) - case token.TYPE: - // types are handled individually - for _, spec := range d.Specs { - // make a (fake) GenDecl node for this TypeSpec - // (we need to do this here - as opposed to just - // for printing - so we don't lose the GenDecl - // documentation) - // - // TODO(gri): Consider just collecting the TypeSpec - // node (and copy in the GenDecl.doc if there is no - // doc in the TypeSpec - this is currently done in - // makeTypeDocs below). Simpler data structures, but - // would lose GenDecl documentation if the TypeSpec - // has documentation as well. - doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, token.NoPos, []ast.Spec{spec}, token.NoPos}) - // A new GenDecl node is created, no need to nil out d.Doc. - } - } - } - case *ast.FuncDecl: - doc.addFunc(d) - } -} - -func copyCommentList(list []*ast.Comment) []*ast.Comment { - return append([]*ast.Comment(nil), list...) -} - -var ( - bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid): - bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char -) - -// addFile adds the AST for a source file to the docReader. -// Adding the same AST multiple times is a no-op. -// -func (doc *docReader) addFile(src *ast.File) { - // add package documentation - if src.Doc != nil { - doc.addDoc(src.Doc) - src.Doc = nil // doc consumed - remove from ast.File node - } - - // add all declarations - for _, decl := range src.Decls { - doc.addDecl(decl) - } - - // collect BUG(...) comments - for _, c := range src.Comments { - text := c.List[0].Text - if m := bug_markers.FindStringIndex(text); m != nil { - // found a BUG comment; maybe empty - if btxt := text[m[1]:]; bug_content.MatchString(btxt) { - // non-empty BUG comment; collect comment without BUG prefix - list := copyCommentList(c.List) - list[0].Text = text[m[1]:] - doc.bugs = append(doc.bugs, &ast.CommentGroup{list}) - } - } - } - src.Comments = nil // consumed unassociated comments - remove from ast.File node -} - -func NewFileDoc(file *ast.File) *PackageDoc { - var r docReader - r.init(file.Name.Name) - r.addFile(file) - return r.newDoc("", nil) -} - -func NewPackageDoc(pkg *ast.Package, importpath string) *PackageDoc { - var r docReader - r.init(pkg.Name) - filenames := make([]string, len(pkg.Files)) - i := 0 - for filename, f := range pkg.Files { - r.addFile(f) - filenames[i] = filename - i++ - } - return r.newDoc(importpath, filenames) -} - -// ---------------------------------------------------------------------------- -// Conversion to external representation - -// ValueDoc is the documentation for a group of declared -// values, either vars or consts. -// -type ValueDoc struct { +// Value is the documentation for a (possibly grouped) var or const declaration. +type Value struct { Doc string + Names []string // var or const names in declaration order Decl *ast.GenDecl - order int -} - -type sortValueDoc []*ValueDoc - -func (p sortValueDoc) Len() int { return len(p) } -func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func declName(d *ast.GenDecl) string { - if len(d.Specs) != 1 { - return "" - } - - switch v := d.Specs[0].(type) { - case *ast.ValueSpec: - return v.Names[0].Name - case *ast.TypeSpec: - return v.Name.Name - } - - return "" + order int } -func (p sortValueDoc) Less(i, j int) bool { - // sort by name - // pull blocks (name = "") up to top - // in original order - if ni, nj := declName(p[i].Decl), declName(p[j].Decl); ni != nj { - return ni < nj - } - return p[i].order < p[j].order -} +// Type is the documentation for a type declaration. +type Type struct { + Doc string + Name string + Decl *ast.GenDecl -func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc { - d := make([]*ValueDoc, len(list)) // big enough in any case - n := 0 - for i, decl := range list { - if decl.Tok == tok { - d[n] = &ValueDoc{CommentText(decl.Doc), decl, i} - n++ - decl.Doc = nil // doc consumed - removed from AST - } - } - d = d[0:n] - sort.Sort(sortValueDoc(d)) - return d + // associated declarations + Consts []*Value // sorted list of constants of (mostly) this type + Vars []*Value // sorted list of variables of (mostly) this type + Funcs []*Func // sorted list of functions returning this type + Methods []*Func // sorted list of methods (including embedded ones) of this type } -// FuncDoc is the documentation for a func declaration, -// either a top-level function or a method function. -// -type FuncDoc struct { +// Func is the documentation for a func declaration. +type Func struct { Doc string - Recv ast.Expr // TODO(rsc): Would like string here Name string Decl *ast.FuncDecl -} - -type sortFuncDoc []*FuncDoc - -func (p sortFuncDoc) Len() int { return len(p) } -func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name } -func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc { - d := make([]*FuncDoc, len(m)) - i := 0 - for _, f := range m { - doc := new(FuncDoc) - doc.Doc = CommentText(f.Doc) - f.Doc = nil // doc consumed - remove from ast.FuncDecl node - if f.Recv != nil { - doc.Recv = f.Recv.List[0].Type - } - doc.Name = f.Name.Name - doc.Decl = f - d[i] = doc - i++ - } - sort.Sort(sortFuncDoc(d)) - return d -} - -// TypeDoc is the documentation for a declared type. -// Consts and Vars are sorted lists of constants and variables of (mostly) that type. -// Factories is a sorted list of factory functions that return that type. -// Methods is a sorted list of method functions on that type. -type TypeDoc struct { - Doc string - Type *ast.TypeSpec - Consts []*ValueDoc - Vars []*ValueDoc - Factories []*FuncDoc - Methods []*FuncDoc - Decl *ast.GenDecl - order int + // methods + // (for functions, these fields have the respective zero value) + Recv string // actual receiver "T" or "*T" + Orig string // original receiver "T" or "*T" + Level int // embedding level; 0 means not embedded } -type sortTypeDoc []*TypeDoc +// Mode values control the operation of New. +type Mode int -func (p sortTypeDoc) Len() int { return len(p) } -func (p sortTypeDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p sortTypeDoc) Less(i, j int) bool { - // sort by name - // pull blocks (name = "") up to top - // in original order - if ni, nj := p[i].Type.Name.Name, p[j].Type.Name.Name; ni != nj { - return ni < nj - } - return p[i].order < p[j].order -} - -// NOTE(rsc): This would appear not to be correct for type ( ) -// blocks, but the doc extractor above has split them into -// individual declarations. -func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc { - d := make([]*TypeDoc, len(m)) - i := 0 - for _, old := range m { - // all typeDocs should have a declaration associated with - // them after processing an entire package - be conservative - // and check - if decl := old.decl; decl != nil { - typespec := decl.Specs[0].(*ast.TypeSpec) - t := new(TypeDoc) - doc := typespec.Doc - typespec.Doc = nil // doc consumed - remove from ast.TypeSpec node - if doc == nil { - // no doc associated with the spec, use the declaration doc, if any - doc = decl.Doc - } - decl.Doc = nil // doc consumed - remove from ast.Decl node - t.Doc = CommentText(doc) - t.Type = typespec - t.Consts = makeValueDocs(old.values, token.CONST) - t.Vars = makeValueDocs(old.values, token.VAR) - t.Factories = makeFuncDocs(old.factories) - t.Methods = makeFuncDocs(old.methods) - t.Decl = old.decl - t.order = i - d[i] = t - i++ - } else { - // no corresponding type declaration found - move any associated - // values, factory functions, and methods back to the top-level - // so that they are not lost (this should only happen if a package - // file containing the explicit type declaration is missing or if - // an unqualified type name was used after a "." import) - // 1) move values - doc.values = append(doc.values, old.values...) - // 2) move factory functions - for name, f := range old.factories { - doc.funcs[name] = f - } - // 3) move methods - for name, f := range old.methods { - // don't overwrite functions with the same name - if _, found := doc.funcs[name]; !found { - doc.funcs[name] = f - } - } - } - } - d = d[0:i] // some types may have been ignored - sort.Sort(sortTypeDoc(d)) - return d -} - -func makeBugDocs(list []*ast.CommentGroup) []string { - d := make([]string, len(list)) - for i, g := range list { - d[i] = CommentText(g) - } - return d -} - -// PackageDoc is the documentation for an entire package. -// -type PackageDoc struct { - PackageName string - ImportPath string - Filenames []string - Doc string - Consts []*ValueDoc - Types []*TypeDoc - Vars []*ValueDoc - Funcs []*FuncDoc - Bugs []string -} +const ( + // extract documentation for all package-level declarations, + // not just exported ones + AllDecls Mode = 1 << iota +) -// newDoc returns the accumulated documentation for the package. +// New computes the package documentation for the given package AST. +// New takes ownership of the AST pkg and may edit or overwrite it. // -func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc { - p := new(PackageDoc) - p.PackageName = doc.pkgName - p.ImportPath = importpath - sort.Strings(filenames) - p.Filenames = filenames - p.Doc = CommentText(doc.doc) - // makeTypeDocs may extend the list of doc.values and - // doc.funcs and thus must be called before any other - // function consuming those lists - p.Types = doc.makeTypeDocs(doc.types) - p.Consts = makeValueDocs(doc.values, token.CONST) - p.Vars = makeValueDocs(doc.values, token.VAR) - p.Funcs = makeFuncDocs(doc.funcs) - p.Bugs = makeBugDocs(doc.bugs) - return p -} - -// ---------------------------------------------------------------------------- -// Filtering by name - -type Filter func(string) bool - -func matchFields(fields *ast.FieldList, f Filter) bool { - if fields != nil { - for _, field := range fields.List { - for _, name := range field.Names { - if f(name.Name) { - return true - } - } - } +func New(pkg *ast.Package, importPath string, mode Mode) *Package { + var r reader + r.readPackage(pkg, mode) + r.computeMethodSets() + r.cleanupTypes() + return &Package{ + Doc: r.doc, + Name: pkg.Name, + ImportPath: importPath, + Imports: sortedKeys(r.imports), + Filenames: r.filenames, + Bugs: r.bugs, + Consts: sortedValues(r.values, token.CONST), + Types: sortedTypes(r.types), + Vars: sortedValues(r.values, token.VAR), + Funcs: sortedFuncs(r.funcs), } - return false -} - -func matchDecl(d *ast.GenDecl, f Filter) bool { - for _, d := range d.Specs { - switch v := d.(type) { - case *ast.ValueSpec: - for _, name := range v.Names { - if f(name.Name) { - return true - } - } - case *ast.TypeSpec: - if f(v.Name.Name) { - return true - } - switch t := v.Type.(type) { - case *ast.StructType: - if matchFields(t.Fields, f) { - return true - } - case *ast.InterfaceType: - if matchFields(t.Methods, f) { - return true - } - } - } - } - return false -} - -func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc { - w := 0 - for _, vd := range a { - if matchDecl(vd.Decl, f) { - a[w] = vd - w++ - } - } - return a[0:w] -} - -func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc { - w := 0 - for _, fd := range a { - if f(fd.Name) { - a[w] = fd - w++ - } - } - return a[0:w] -} - -func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc { - w := 0 - for _, td := range a { - n := 0 // number of matches - if matchDecl(td.Decl, f) { - n = 1 - } else { - // type name doesn't match, but we may have matching consts, vars, factories or methods - td.Consts = filterValueDocs(td.Consts, f) - td.Vars = filterValueDocs(td.Vars, f) - td.Factories = filterFuncDocs(td.Factories, f) - td.Methods = filterFuncDocs(td.Methods, f) - n += len(td.Consts) + len(td.Vars) + len(td.Factories) + len(td.Methods) - } - if n > 0 { - a[w] = td - w++ - } - } - return a[0:w] -} - -// Filter eliminates documentation for names that don't pass through the filter f. -// TODO: Recognize "Type.Method" as a name. -// -func (p *PackageDoc) Filter(f Filter) { - p.Consts = filterValueDocs(p.Consts, f) - p.Vars = filterValueDocs(p.Vars, f) - p.Types = filterTypeDocs(p.Types, f) - p.Funcs = filterFuncDocs(p.Funcs, f) - p.Doc = "" // don't show top-level package doc } diff --git a/src/pkg/go/doc/doc_test.go b/src/pkg/go/doc/doc_test.go new file mode 100644 index 000000000..d9ffe47b6 --- /dev/null +++ b/src/pkg/go/doc/doc_test.go @@ -0,0 +1,121 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package doc + +import ( + "bytes" + "flag" + "fmt" + "go/parser" + "go/printer" + "go/token" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "text/template" +) + +var update = flag.Bool("update", false, "update golden (.out) files") + +const dataDir = "testdata" + +var templateTxt = readTemplate("template.txt") + +func readTemplate(filename string) *template.Template { + t := template.New(filename) + t.Funcs(template.FuncMap{ + "node": nodeFmt, + "synopsis": synopsisFmt, + }) + return template.Must(t.ParseFiles(filepath.Join(dataDir, filename))) +} + +func nodeFmt(node interface{}, fset *token.FileSet) string { + var buf bytes.Buffer + printer.Fprint(&buf, fset, node) + return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1) +} + +func synopsisFmt(s string) string { + const n = 64 + if len(s) > n { + // cut off excess text and go back to a word boundary + s = s[0:n] + if i := strings.LastIndexAny(s, "\t\n "); i >= 0 { + s = s[0:i] + } + s = strings.TrimSpace(s) + " ..." + } + return "// " + strings.Replace(s, "\n", " ", -1) +} + +func isGoFile(fi os.FileInfo) bool { + name := fi.Name() + return !fi.IsDir() && + len(name) > 0 && name[0] != '.' && // ignore .files + filepath.Ext(name) == ".go" +} + +type bundle struct { + *Package + FSet *token.FileSet +} + +func test(t *testing.T, mode Mode) { + // get all packages + fset := token.NewFileSet() + pkgs, err := parser.ParseDir(fset, dataDir, isGoFile, parser.ParseComments) + if err != nil { + t.Fatal(err) + } + + // test all packages + for _, pkg := range pkgs { + importpath := dataDir + "/" + pkg.Name + doc := New(pkg, importpath, mode) + + // golden files always use / in filenames - canonicalize them + for i, filename := range doc.Filenames { + doc.Filenames[i] = filepath.ToSlash(filename) + } + + // print documentation + var buf bytes.Buffer + if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil { + t.Error(err) + continue + } + got := buf.Bytes() + + // update golden file if necessary + golden := filepath.Join(dataDir, fmt.Sprintf("%s.%d.golden", pkg.Name, mode)) + if *update { + err := ioutil.WriteFile(golden, got, 0644) + if err != nil { + t.Error(err) + } + continue + } + + // get golden file + want, err := ioutil.ReadFile(golden) + if err != nil { + t.Error(err) + continue + } + + // compare + if bytes.Compare(got, want) != 0 { + t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want) + } + } +} + +func Test(t *testing.T) { + test(t, 0) + test(t, AllDecls) +} diff --git a/src/pkg/go/doc/example.go b/src/pkg/go/doc/example.go new file mode 100644 index 000000000..7c59bf9bd --- /dev/null +++ b/src/pkg/go/doc/example.go @@ -0,0 +1,57 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extract example functions from package ASTs. + +package doc + +import ( + "go/ast" + "go/printer" + "strings" + "unicode" + "unicode/utf8" +) + +type Example struct { + Name string // name of the item being demonstrated + Body *printer.CommentedNode // code + Output string // expected output +} + +func Examples(pkg *ast.Package) []*Example { + var examples []*Example + for _, src := range pkg.Files { + for _, decl := range src.Decls { + f, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + name := f.Name.Name + if !isTest(name, "Example") { + continue + } + examples = append(examples, &Example{ + Name: name[len("Example"):], + Body: &printer.CommentedNode{f.Body, src.Comments}, + Output: f.Doc.Text(), + }) + } + } + return examples +} + +// isTest tells whether name looks like a test, example, or benchmark. +// It is a Test (say) if there is a character after Test that is not a +// lower-case letter. (We don't want Testiness.) +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} diff --git a/src/pkg/go/doc/exports.go b/src/pkg/go/doc/exports.go new file mode 100644 index 000000000..e6f58ccb3 --- /dev/null +++ b/src/pkg/go/doc/exports.go @@ -0,0 +1,169 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements export filtering of an AST. + +package doc + +import "go/ast" + +// filterIdentList removes unexported names from list in place +// and returns the resulting list. +// +func filterIdentList(list []*ast.Ident) []*ast.Ident { + j := 0 + for _, x := range list { + if ast.IsExported(x.Name) { + list[j] = x + j++ + } + } + return list[0:j] +} + +// filterFieldList removes unexported fields (field names) from the field list +// in place and returns true if fields were removed. Removed fields that are +// anonymous (embedded) fields are added as embedded types to base. filterType +// is called with the types of all remaining fields. +// +func (r *reader) filterFieldList(base *baseType, fields *ast.FieldList) (removedFields bool) { + if fields == nil { + return + } + list := fields.List + j := 0 + for _, field := range list { + keepField := false + if n := len(field.Names); n == 0 { + // anonymous field + name, imp := baseTypeName(field.Type) + if ast.IsExported(name) { + // we keep the field - in this case r.readDecl + // will take care of adding the embedded type + keepField = true + } else if base != nil && !imp { + // we don't keep the field - add it as an embedded + // type so we won't loose its methods, if any + if embedded := r.lookupType(name); embedded != nil { + _, ptr := field.Type.(*ast.StarExpr) + base.addEmbeddedType(embedded, ptr) + } + } + } else { + field.Names = filterIdentList(field.Names) + if len(field.Names) < n { + removedFields = true + } + if len(field.Names) > 0 { + keepField = true + } + } + if keepField { + r.filterType(nil, field.Type) + list[j] = field + j++ + } + } + if j < len(list) { + removedFields = true + } + fields.List = list[0:j] + return +} + +// filterParamList applies filterType to each parameter type in fields. +// +func (r *reader) filterParamList(fields *ast.FieldList) { + if fields != nil { + for _, f := range fields.List { + r.filterType(nil, f.Type) + } + } +} + +// filterType strips any unexported struct fields or method types from typ +// in place. If fields (or methods) have been removed, the corresponding +// struct or interface type has the Incomplete field set to true. +// +func (r *reader) filterType(base *baseType, typ ast.Expr) { + switch t := typ.(type) { + case *ast.Ident: + // nothing to do + case *ast.ParenExpr: + r.filterType(nil, t.X) + case *ast.ArrayType: + r.filterType(nil, t.Elt) + case *ast.StructType: + if r.filterFieldList(base, t.Fields) { + t.Incomplete = true + } + case *ast.FuncType: + r.filterParamList(t.Params) + r.filterParamList(t.Results) + case *ast.InterfaceType: + if r.filterFieldList(base, t.Methods) { + t.Incomplete = true + } + case *ast.MapType: + r.filterType(nil, t.Key) + r.filterType(nil, t.Value) + case *ast.ChanType: + r.filterType(nil, t.Value) + } +} + +func (r *reader) filterSpec(spec ast.Spec) bool { + switch s := spec.(type) { + case *ast.ImportSpec: + // always keep imports so we can collect them + return true + case *ast.ValueSpec: + s.Names = filterIdentList(s.Names) + if len(s.Names) > 0 { + r.filterType(nil, s.Type) + return true + } + case *ast.TypeSpec: + if ast.IsExported(s.Name.Name) { + r.filterType(r.lookupType(s.Name.Name), s.Type) + return true + } + } + return false +} + +func (r *reader) filterSpecList(list []ast.Spec) []ast.Spec { + j := 0 + for _, s := range list { + if r.filterSpec(s) { + list[j] = s + j++ + } + } + return list[0:j] +} + +func (r *reader) filterDecl(decl ast.Decl) bool { + switch d := decl.(type) { + case *ast.GenDecl: + d.Specs = r.filterSpecList(d.Specs) + return len(d.Specs) > 0 + case *ast.FuncDecl: + return ast.IsExported(d.Name.Name) + } + return false +} + +// fileExports removes unexported declarations from src in place. +// +func (r *reader) fileExports(src *ast.File) { + j := 0 + for _, d := range src.Decls { + if r.filterDecl(d) { + src.Decls[j] = d + j++ + } + } + src.Decls = src.Decls[0:j] +} diff --git a/src/pkg/go/doc/filter.go b/src/pkg/go/doc/filter.go new file mode 100644 index 000000000..02b66ccef --- /dev/null +++ b/src/pkg/go/doc/filter.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package doc + +import "go/ast" + +type Filter func(string) bool + +func matchFields(fields *ast.FieldList, f Filter) bool { + if fields != nil { + for _, field := range fields.List { + for _, name := range field.Names { + if f(name.Name) { + return true + } + } + } + } + return false +} + +func matchDecl(d *ast.GenDecl, f Filter) bool { + for _, d := range d.Specs { + switch v := d.(type) { + case *ast.ValueSpec: + for _, name := range v.Names { + if f(name.Name) { + return true + } + } + case *ast.TypeSpec: + if f(v.Name.Name) { + return true + } + switch t := v.Type.(type) { + case *ast.StructType: + if matchFields(t.Fields, f) { + return true + } + case *ast.InterfaceType: + if matchFields(t.Methods, f) { + return true + } + } + } + } + return false +} + +func filterValues(a []*Value, f Filter) []*Value { + w := 0 + for _, vd := range a { + if matchDecl(vd.Decl, f) { + a[w] = vd + w++ + } + } + return a[0:w] +} + +func filterFuncs(a []*Func, f Filter) []*Func { + w := 0 + for _, fd := range a { + if f(fd.Name) { + a[w] = fd + w++ + } + } + return a[0:w] +} + +func filterTypes(a []*Type, f Filter) []*Type { + w := 0 + for _, td := range a { + n := 0 // number of matches + if matchDecl(td.Decl, f) { + n = 1 + } else { + // type name doesn't match, but we may have matching consts, vars, factories or methods + td.Consts = filterValues(td.Consts, f) + td.Vars = filterValues(td.Vars, f) + td.Funcs = filterFuncs(td.Funcs, f) + td.Methods = filterFuncs(td.Methods, f) + n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods) + } + if n > 0 { + a[w] = td + w++ + } + } + return a[0:w] +} + +// Filter eliminates documentation for names that don't pass through the filter f. +// TODO: Recognize "Type.Method" as a name. +// +func (p *Package) Filter(f Filter) { + p.Consts = filterValues(p.Consts, f) + p.Vars = filterValues(p.Vars, f) + p.Types = filterTypes(p.Types, f) + p.Funcs = filterFuncs(p.Funcs, f) + p.Doc = "" // don't show top-level package doc +} diff --git a/src/pkg/go/doc/headscan.go b/src/pkg/go/doc/headscan.go new file mode 100644 index 000000000..37486b126 --- /dev/null +++ b/src/pkg/go/doc/headscan.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + The headscan command extracts comment headings from package files; + it is used to detect false positives which may require an adjustment + to the comment formatting heuristics in comment.go. + + Usage: headscan [-root root_directory] + + By default, the $GOROOT/src directory is scanned. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/doc" + "go/parser" + "go/token" + "os" + "path/filepath" + "runtime" + "strings" +) + +var ( + root = flag.String("root", filepath.Join(runtime.GOROOT(), "src"), "root of filesystem tree to scan") + verbose = flag.Bool("v", false, "verbose mode") +) + +const ( + html_h = "<h3>" + html_endh = "</h3>\n" +) + +func isGoFile(fi os.FileInfo) bool { + return strings.HasSuffix(fi.Name(), ".go") && + !strings.HasSuffix(fi.Name(), "_test.go") +} + +func appendHeadings(list []string, comment string) []string { + var buf bytes.Buffer + doc.ToHTML(&buf, comment, nil) + for s := buf.String(); ; { + i := strings.Index(s, html_h) + if i < 0 { + break + } + i += len(html_h) + j := strings.Index(s, html_endh) + if j < 0 { + list = append(list, s[i:]) // incorrect HTML + break + } + list = append(list, s[i:j]) + s = s[j+len(html_endh):] + } + return list +} + +func main() { + flag.Parse() + fset := token.NewFileSet() + nheadings := 0 + err := filepath.Walk(*root, func(path string, fi os.FileInfo, err error) error { + if !fi.IsDir() { + return nil + } + pkgs, err := parser.ParseDir(fset, path, isGoFile, parser.ParseComments) + if err != nil { + if *verbose { + fmt.Fprintln(os.Stderr, err) + } + return nil + } + for _, pkg := range pkgs { + d := doc.New(pkg, path, doc.Mode(0)) + list := appendHeadings(nil, d.Doc) + for _, d := range d.Consts { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Types { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Vars { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Funcs { + list = appendHeadings(list, d.Doc) + } + if len(list) > 0 { + // directories may contain multiple packages; + // print path and package name + fmt.Printf("%s (package %s)\n", path, pkg.Name) + for _, h := range list { + fmt.Printf("\t%s\n", h) + } + nheadings += len(list) + } + } + return nil + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Println(nheadings, "headings found") +} diff --git a/src/pkg/go/doc/reader.go b/src/pkg/go/doc/reader.go new file mode 100644 index 000000000..9c6f0816b --- /dev/null +++ b/src/pkg/go/doc/reader.go @@ -0,0 +1,734 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package doc + +import ( + "go/ast" + "go/token" + "regexp" + "sort" + "strconv" +) + +// ---------------------------------------------------------------------------- +// function/method sets +// +// Internally, we treat functions like methods and collect them in method sets. + +// methodSet describes a set of methods. Entries where Decl == nil are conflict +// entries (more then one method with the same name at the same embedding level). +// +type methodSet map[string]*Func + +// recvString returns a string representation of recv of the +// form "T", "*T", or "BADRECV" (if not a proper receiver type). +// +func recvString(recv ast.Expr) string { + switch t := recv.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return "*" + recvString(t.X) + } + return "BADRECV" +} + +// set creates the corresponding Func for f and adds it to mset. +// If there are multiple f's with the same name, set keeps the first +// one with documentation; conflicts are ignored. +// +func (mset methodSet) set(f *ast.FuncDecl) { + name := f.Name.Name + if g := mset[name]; g != nil && g.Doc != "" { + // A function with the same name has already been registered; + // since it has documentation, assume f is simply another + // implementation and ignore it. This does not happen if the + // caller is using go/build.ScanDir to determine the list of + // files implementing a package. + return + } + // function doesn't exist or has no documentation; use f + recv := "" + if f.Recv != nil { + var typ ast.Expr + // be careful in case of incorrect ASTs + if list := f.Recv.List; len(list) == 1 { + typ = list[0].Type + } + recv = recvString(typ) + } + mset[name] = &Func{ + Doc: f.Doc.Text(), + Name: name, + Decl: f, + Recv: recv, + Orig: recv, + } + f.Doc = nil // doc consumed - remove from AST +} + +// add adds method m to the method set; m is ignored if the method set +// already contains a method with the same name at the same or a higher +// level then m. +// +func (mset methodSet) add(m *Func) { + old := mset[m.Name] + if old == nil || m.Level < old.Level { + mset[m.Name] = m + return + } + if old != nil && m.Level == old.Level { + // conflict - mark it using a method with nil Decl + mset[m.Name] = &Func{ + Name: m.Name, + Level: m.Level, + } + } +} + +// ---------------------------------------------------------------------------- +// Base types + +// baseTypeName returns the name of the base type of x (or "") +// and whether the type is imported or not. +// +func baseTypeName(x ast.Expr) (name string, imported bool) { + switch t := x.(type) { + case *ast.Ident: + return t.Name, false + case *ast.SelectorExpr: + if _, ok := t.X.(*ast.Ident); ok { + // only possible for qualified type names; + // assume type is imported + return t.Sel.Name, true + } + case *ast.StarExpr: + return baseTypeName(t.X) + } + return +} + +// embeddedType describes the type of an anonymous field. +// +type embeddedType struct { + typ *baseType // the corresponding base type + ptr bool // if set, the anonymous field type is a pointer +} + +type baseType struct { + doc string // doc comment for type + name string // local type name (excluding package qualifier) + decl *ast.GenDecl // nil if declaration hasn't been seen yet + + // associated declarations + values []*Value // consts and vars + funcs methodSet + methods methodSet + + isEmbedded bool // true if this type is embedded + isStruct bool // true if this type is a struct + embedded []embeddedType // list of embedded types +} + +func (typ *baseType) addEmbeddedType(e *baseType, isPtr bool) { + e.isEmbedded = true + typ.embedded = append(typ.embedded, embeddedType{e, isPtr}) +} + +// ---------------------------------------------------------------------------- +// AST reader + +// reader accumulates documentation for a single package. +// It modifies the AST: Comments (declaration documentation) +// that have been collected by the reader are set to nil +// in the respective AST nodes so that they are not printed +// twice (once when printing the documentation and once when +// printing the corresponding AST node). +// +type reader struct { + mode Mode + + // package properties + doc string // package documentation, if any + filenames []string + bugs []string + + // declarations + imports map[string]int + values []*Value // consts and vars + types map[string]*baseType + funcs methodSet +} + +// isVisible reports whether name is visible in the documentation. +// +func (r *reader) isVisible(name string) bool { + return r.mode&AllDecls != 0 || ast.IsExported(name) +} + +// lookupType returns the base type with the given name. +// If the base type has not been encountered yet, a new +// type with the given name but no associated declaration +// is added to the type map. +// +func (r *reader) lookupType(name string) *baseType { + if name == "" || name == "_" { + return nil // no type docs for anonymous types + } + if typ, found := r.types[name]; found { + return typ + } + // type not found - add one without declaration + typ := &baseType{ + name: name, + funcs: make(methodSet), + methods: make(methodSet), + } + r.types[name] = typ + return typ +} + +func (r *reader) readDoc(comment *ast.CommentGroup) { + // By convention there should be only one package comment + // but collect all of them if there are more then one. + text := comment.Text() + if r.doc == "" { + r.doc = text + return + } + r.doc += "\n" + text +} + +func specNames(specs []ast.Spec) []string { + names := make([]string, 0, len(specs)) // reasonable estimate + for _, s := range specs { + // s guaranteed to be an *ast.ValueSpec by readValue + for _, ident := range s.(*ast.ValueSpec).Names { + names = append(names, ident.Name) + } + } + return names +} + +// readValue processes a const or var declaration. +// +func (r *reader) readValue(decl *ast.GenDecl) { + // determine if decl should be associated with a type + // Heuristic: For each typed entry, determine the type name, if any. + // If there is exactly one type name that is sufficiently + // frequent, associate the decl with the respective type. + domName := "" + domFreq := 0 + prev := "" + n := 0 + for _, spec := range decl.Specs { + s, ok := spec.(*ast.ValueSpec) + if !ok { + continue // should not happen, but be conservative + } + name := "" + switch { + case s.Type != nil: + // a type is present; determine its name + if n, imp := baseTypeName(s.Type); !imp && r.isVisible(n) { + name = n + } + case decl.Tok == token.CONST: + // no type is present but we have a constant declaration; + // use the previous type name (w/o more type information + // we cannot handle the case of unnamed variables with + // initializer expressions except for some trivial cases) + name = prev + } + if name != "" { + // entry has a named type + if domName != "" && domName != name { + // more than one type name - do not associate + // with any type + domName = "" + break + } + domName = name + domFreq++ + } + prev = name + n++ + } + + // nothing to do w/o a legal declaration + if n == 0 { + return + } + + // determine values list with which to associate the Value for this decl + values := &r.values + const threshold = 0.75 + if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) { + // typed entries are sufficiently frequent + typ := r.lookupType(domName) + if typ != nil { + values = &typ.values // associate with that type + } + } + + *values = append(*values, &Value{ + Doc: decl.Doc.Text(), + Names: specNames(decl.Specs), + Decl: decl, + order: len(*values), + }) + decl.Doc = nil // doc consumed - remove from AST +} + +// fields returns a struct's fields or an interface's methods. +// +func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) { + var fields *ast.FieldList + switch t := typ.(type) { + case *ast.StructType: + fields = t.Fields + isStruct = true + case *ast.InterfaceType: + fields = t.Methods + } + if fields != nil { + list = fields.List + } + return +} + +// readType processes a type declaration. +// +func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) { + typ := r.lookupType(spec.Name.Name) + if typ == nil { + return // no name or blank name - ignore the type + } + + // A type should be added at most once, so info.decl + // should be nil - if it is not, simply overwrite it. + typ.decl = decl + + // compute documentation + doc := spec.Doc + spec.Doc = nil // doc consumed - remove from AST + if doc == nil { + // no doc associated with the spec, use the declaration doc, if any + doc = decl.Doc + } + decl.Doc = nil // doc consumed - remove from AST + typ.doc = doc.Text() + + // look for anonymous fields that might contribute methods + var list []*ast.Field + list, typ.isStruct = fields(spec.Type) + for _, field := range list { + if len(field.Names) == 0 { + // anonymous field - add corresponding field type to typ + n, imp := baseTypeName(field.Type) + if imp { + // imported type - we don't handle this case + // at the moment + return + } + if embedded := r.lookupType(n); embedded != nil { + _, ptr := field.Type.(*ast.StarExpr) + typ.addEmbeddedType(embedded, ptr) + } + } + } +} + +// readFunc processes a func or method declaration. +// +func (r *reader) readFunc(fun *ast.FuncDecl) { + // strip function body + fun.Body = nil + + // determine if it should be associated with a type + if fun.Recv != nil { + // method + recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type) + if imp { + // should not happen (incorrect AST); + // don't show this method + return + } + var typ *baseType + if r.isVisible(recvTypeName) { + // visible recv type: if not found, add it to r.types + typ = r.lookupType(recvTypeName) + } else { + // invisible recv type: if not found, do not add it + // (invisible embedded types are added before this + // phase, so if the type doesn't exist yet, we don't + // care about this method) + typ = r.types[recvTypeName] + } + if typ != nil { + // associate method with the type + // (if the type is not exported, it may be embedded + // somewhere so we need to collect the method anyway) + typ.methods.set(fun) + } + // otherwise don't show the method + // TODO(gri): There may be exported methods of non-exported types + // that can be called because of exported values (consts, vars, or + // function results) of that type. Could determine if that is the + // case and then show those methods in an appropriate section. + return + } + + // perhaps a factory function + // determine result type, if any + if fun.Type.Results.NumFields() >= 1 { + res := fun.Type.Results.List[0] + if len(res.Names) <= 1 { + // exactly one (named or anonymous) result associated + // with the first type in result signature (there may + // be more than one result) + if n, imp := baseTypeName(res.Type); !imp && r.isVisible(n) { + if typ := r.lookupType(n); typ != nil { + // associate Func with typ + typ.funcs.set(fun) + return + } + } + } + } + + // just an ordinary function + r.funcs.set(fun) +} + +var ( + bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid): + bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char +) + +// readFile adds the AST for a source file to the reader. +// +func (r *reader) readFile(src *ast.File) { + // add package documentation + if src.Doc != nil { + r.readDoc(src.Doc) + src.Doc = nil // doc consumed - remove from AST + } + + // add all declarations + for _, decl := range src.Decls { + switch d := decl.(type) { + case *ast.GenDecl: + switch d.Tok { + case token.IMPORT: + // imports are handled individually + for _, spec := range d.Specs { + if s, ok := spec.(*ast.ImportSpec); ok { + if import_, err := strconv.Unquote(s.Path.Value); err == nil { + r.imports[import_] = 1 + } + } + } + case token.CONST, token.VAR: + // constants and variables are always handled as a group + r.readValue(d) + case token.TYPE: + // types are handled individually + for _, spec := range d.Specs { + if s, ok := spec.(*ast.TypeSpec); ok { + // use an individual (possibly fake) declaration + // for each type; this also ensures that each type + // gets to (re-)use the declaration documentation + // if there's none associated with the spec itself + fake := &ast.GenDecl{ + d.Doc, d.Pos(), token.TYPE, token.NoPos, + []ast.Spec{s}, token.NoPos, + } + r.readType(fake, s) + } + } + } + case *ast.FuncDecl: + r.readFunc(d) + } + } + + // collect BUG(...) comments + for _, c := range src.Comments { + text := c.List[0].Text + if m := bug_markers.FindStringIndex(text); m != nil { + // found a BUG comment; maybe empty + if btxt := text[m[1]:]; bug_content.MatchString(btxt) { + // non-empty BUG comment; collect comment without BUG prefix + list := append([]*ast.Comment(nil), c.List...) // make a copy + list[0].Text = text[m[1]:] + r.bugs = append(r.bugs, (&ast.CommentGroup{list}).Text()) + } + } + } + src.Comments = nil // consumed unassociated comments - remove from AST +} + +func (r *reader) readPackage(pkg *ast.Package, mode Mode) { + // initialize reader + r.filenames = make([]string, len(pkg.Files)) + r.imports = make(map[string]int) + r.mode = mode + r.types = make(map[string]*baseType) + r.funcs = make(methodSet) + + // sort package files before reading them so that the + // result result does not depend on map iteration order + i := 0 + for filename := range pkg.Files { + r.filenames[i] = filename + i++ + } + sort.Strings(r.filenames) + + // process files in sorted order + for _, filename := range r.filenames { + f := pkg.Files[filename] + if mode&AllDecls == 0 { + r.fileExports(f) + } + r.readFile(f) + } +} + +// ---------------------------------------------------------------------------- +// Types + +var predeclaredTypes = map[string]bool{ + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "float32": true, + "float64": true, + "int": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "string": true, + "uint": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, +} + +func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func { + if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 { + return f // shouldn't happen, but be safe + } + + // copy existing receiver field and set new type + newField := *f.Decl.Recv.List[0] + _, origRecvIsPtr := newField.Type.(*ast.StarExpr) + var typ ast.Expr = ast.NewIdent(recvTypeName) + if !embeddedIsPtr && origRecvIsPtr { + typ = &ast.StarExpr{token.NoPos, typ} + } + newField.Type = typ + + // copy existing receiver field list and set new receiver field + newFieldList := *f.Decl.Recv + newFieldList.List = []*ast.Field{&newField} + + // copy existing function declaration and set new receiver field list + newFuncDecl := *f.Decl + newFuncDecl.Recv = &newFieldList + + // copy existing function documentation and set new declaration + newF := *f + newF.Decl = &newFuncDecl + newF.Recv = recvString(typ) + // the Orig field never changes + newF.Level = level + + return &newF +} + +// collectEmbeddedMethods collects the embedded methods from +// all processed embedded types found in info in mset. +// +func collectEmbeddedMethods(mset methodSet, typ *baseType, recvTypeName string, embeddedIsPtr bool, level int) { + for _, e := range typ.embedded { + // Once an embedded type is embedded as a pointer type + // all embedded types in those types are treated like + // pointer types for the purpose of the receiver type + // computation; i.e., embeddedIsPtr is sticky for this + // embedding hierarchy. + thisEmbeddedIsPtr := embeddedIsPtr || e.ptr + for _, m := range e.typ.methods { + // only top-level methods are embedded + if m.Level == 0 { + mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level)) + } + } + collectEmbeddedMethods(mset, e.typ, recvTypeName, thisEmbeddedIsPtr, level+1) + } +} + +// computeMethodSets determines the actual method sets for each type encountered. +// +func (r *reader) computeMethodSets() { + for _, t := range r.types { + // collect embedded methods for t + if t.isStruct { + // struct + collectEmbeddedMethods(t.methods, t, t.name, false, 1) + } else { + // interface + // TODO(gri) fix this + } + } +} + +// cleanupTypes removes the association of functions and methods with +// types that have no declaration. Instead, these functions and methods +// are shown at the package level. It also removes types with missing +// declarations or which are not visible. +// +func (r *reader) cleanupTypes() { + for _, t := range r.types { + visible := r.isVisible(t.name) + if t.decl == nil && (predeclaredTypes[t.name] || t.isEmbedded && visible) { + // t.name is a predeclared type (and was not redeclared in this package), + // or it was embedded somewhere but its declaration is missing (because + // the AST is incomplete): move any associated values, funcs, and methods + // back to the top-level so that they are not lost. + // 1) move values + r.values = append(r.values, t.values...) + // 2) move factory functions + for name, f := range t.funcs { + r.funcs[name] = f + } + // 3) move methods + for name, m := range t.methods { + // don't overwrite functions with the same name - drop them + if _, found := r.funcs[name]; !found { + r.funcs[name] = m + } + } + } + // remove types w/o declaration or which are not visible + if t.decl == nil || !visible { + delete(r.types, t.name) + } + } +} + +// ---------------------------------------------------------------------------- +// Sorting + +type data struct { + n int + swap func(i, j int) + less func(i, j int) bool +} + +func (d *data) Len() int { return d.n } +func (d *data) Swap(i, j int) { d.swap(i, j) } +func (d *data) Less(i, j int) bool { return d.less(i, j) } + +// sortBy is a helper function for sorting +func sortBy(less func(i, j int) bool, swap func(i, j int), n int) { + sort.Sort(&data{n, swap, less}) +} + +func sortedKeys(m map[string]int) []string { + list := make([]string, len(m)) + i := 0 + for key := range m { + list[i] = key + i++ + } + sort.Strings(list) + return list +} + +// sortingName returns the name to use when sorting d into place. +// +func sortingName(d *ast.GenDecl) string { + if len(d.Specs) == 1 { + if s, ok := d.Specs[0].(*ast.ValueSpec); ok { + return s.Names[0].Name + } + } + return "" +} + +func sortedValues(m []*Value, tok token.Token) []*Value { + list := make([]*Value, len(m)) // big enough in any case + i := 0 + for _, val := range m { + if val.Decl.Tok == tok { + list[i] = val + i++ + } + } + list = list[0:i] + + sortBy( + func(i, j int) bool { + if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj { + return ni < nj + } + return list[i].order < list[j].order + }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + + return list +} + +func sortedTypes(m map[string]*baseType) []*Type { + list := make([]*Type, len(m)) + i := 0 + for _, t := range m { + list[i] = &Type{ + Doc: t.doc, + Name: t.name, + Decl: t.decl, + Consts: sortedValues(t.values, token.CONST), + Vars: sortedValues(t.values, token.VAR), + Funcs: sortedFuncs(t.funcs), + Methods: sortedFuncs(t.methods), + } + i++ + } + + sortBy( + func(i, j int) bool { return list[i].Name < list[j].Name }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + + return list +} + +func sortedFuncs(m methodSet) []*Func { + list := make([]*Func, len(m)) + i := 0 + for _, m := range m { + // exclude conflict entries + if m.Decl != nil { + list[i] = m + i++ + } + } + list = list[0:i] + sortBy( + func(i, j int) bool { return list[i].Name < list[j].Name }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + return list +} diff --git a/src/pkg/go/doc/testdata/a.0.golden b/src/pkg/go/doc/testdata/a.0.golden new file mode 100644 index 000000000..24db02d34 --- /dev/null +++ b/src/pkg/go/doc/testdata/a.0.golden @@ -0,0 +1,13 @@ +// comment 0 comment 1 +PACKAGE a + +IMPORTPATH + testdata/a + +FILENAMES + testdata/a0.go + testdata/a1.go + +BUGS + // bug0 + // bug1 diff --git a/src/pkg/go/doc/testdata/a.1.golden b/src/pkg/go/doc/testdata/a.1.golden new file mode 100644 index 000000000..24db02d34 --- /dev/null +++ b/src/pkg/go/doc/testdata/a.1.golden @@ -0,0 +1,13 @@ +// comment 0 comment 1 +PACKAGE a + +IMPORTPATH + testdata/a + +FILENAMES + testdata/a0.go + testdata/a1.go + +BUGS + // bug0 + // bug1 diff --git a/src/pkg/go/doc/testdata/a0.go b/src/pkg/go/doc/testdata/a0.go new file mode 100644 index 000000000..dc552989e --- /dev/null +++ b/src/pkg/go/doc/testdata/a0.go @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// comment 0 +package a + +//BUG(uid): bug0 diff --git a/src/pkg/go/doc/testdata/a1.go b/src/pkg/go/doc/testdata/a1.go new file mode 100644 index 000000000..098776c1b --- /dev/null +++ b/src/pkg/go/doc/testdata/a1.go @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// comment 1 +package a + +//BUG(uid): bug1 diff --git a/src/pkg/go/doc/testdata/b.0.golden b/src/pkg/go/doc/testdata/b.0.golden new file mode 100644 index 000000000..7c3330061 --- /dev/null +++ b/src/pkg/go/doc/testdata/b.0.golden @@ -0,0 +1,43 @@ +// +PACKAGE b + +IMPORTPATH + testdata/b + +IMPORTS + a + +FILENAMES + testdata/b.go + +CONSTANTS + // + const Pi = 3.14 // Pi + + +VARIABLES + // + var MaxInt int // MaxInt + + +FUNCTIONS + // + func F(x int) int + + // Always under the package functions list. + func NotAFactory() int + + // Associated with uint type if AllDecls is set. + func UintFactory() uint + + +TYPES + // + type T struct{} // T + + // + var V T // v + + // + func (x *T) M() + diff --git a/src/pkg/go/doc/testdata/b.1.golden b/src/pkg/go/doc/testdata/b.1.golden new file mode 100644 index 000000000..f30380516 --- /dev/null +++ b/src/pkg/go/doc/testdata/b.1.golden @@ -0,0 +1,49 @@ +// +PACKAGE b + +IMPORTPATH + testdata/b + +IMPORTS + a + +FILENAMES + testdata/b.go + +CONSTANTS + // + const Pi = 3.14 // Pi + + +VARIABLES + // + var MaxInt int // MaxInt + + +FUNCTIONS + // + func F(x int) int + + // Always under the package functions list. + func NotAFactory() int + + +TYPES + // + type T struct{} // T + + // + var V T // v + + // + func (x *T) M() + + // Should only appear if AllDecls is set. + type uint struct{} + + // Associated with uint type if AllDecls is set. + func UintFactory() uint + + // Associated with uint type if AllDecls is set. + func uintFactory() uint + diff --git a/src/pkg/go/doc/testdata/b.go b/src/pkg/go/doc/testdata/b.go new file mode 100644 index 000000000..28660f9be --- /dev/null +++ b/src/pkg/go/doc/testdata/b.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import "a" + +// Basic declarations + +const Pi = 3.14 // Pi +var MaxInt int // MaxInt +type T struct{} // T +var V T // v +func F(x int) int {} // F +func (x *T) M() {} // M + +// Corner cases: association with (presumed) predeclared types + +// Always under the package functions list. +func NotAFactory() int {} + +// Associated with uint type if AllDecls is set. +func UintFactory() uint {} + +// Associated with uint type if AllDecls is set. +func uintFactory() uint {} + +// Should only appear if AllDecls is set. +type uint struct{} // overrides a predeclared type uint diff --git a/src/pkg/go/doc/testdata/benchmark.go b/src/pkg/go/doc/testdata/benchmark.go new file mode 100644 index 000000000..0bf567b7c --- /dev/null +++ b/src/pkg/go/doc/testdata/benchmark.go @@ -0,0 +1,293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "flag" + "fmt" + "os" + "runtime" + "time" +) + +var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run") +var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds") + +// An internal type but exported because it is cross-package; part of the implementation +// of gotest. +type InternalBenchmark struct { + Name string + F func(b *B) +} + +// B is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +type B struct { + common + N int + benchmark InternalBenchmark + bytes int64 + timerOn bool + result BenchmarkResult +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (b *B) StartTimer() { + if !b.timerOn { + b.start = time.Now() + b.timerOn = true + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (b *B) StopTimer() { + if b.timerOn { + b.duration += time.Now().Sub(b.start) + b.timerOn = false + } +} + +// ResetTimer sets the elapsed benchmark time to zero. +// It does not affect whether the timer is running. +func (b *B) ResetTimer() { + if b.timerOn { + b.start = time.Now() + } + b.duration = 0 +} + +// SetBytes records the number of bytes processed in a single operation. +// If this is called, the benchmark will report ns/op and MB/s. +func (b *B) SetBytes(n int64) { b.bytes = n } + +func (b *B) nsPerOp() int64 { + if b.N <= 0 { + return 0 + } + return b.duration.Nanoseconds() / int64(b.N) +} + +// runN runs a single benchmark for the specified number of iterations. +func (b *B) runN(n int) { + // Try to get a comparable environment for each run + // by clearing garbage from previous runs. + runtime.GC() + b.N = n + b.ResetTimer() + b.StartTimer() + b.benchmark.F(b) + b.StopTimer() +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n > 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + if n < (2 * base) { + return 2 * base + } + if n < (5 * base) { + return 5 * base + } + return 10 * base +} + +// run times the benchmark function in a separate goroutine. +func (b *B) run() BenchmarkResult { + go b.launch() + <-b.signal + return b.result +} + +// launch launches the benchmark function. It gradually increases the number +// of benchmark iterations until the benchmark runs for a second in order +// to get a reasonable measurement. It prints timing information in this form +// testing.BenchmarkHello 100000 19 ns/op +// launch is run by the fun function as a separate goroutine. +func (b *B) launch() { + // Run the benchmark for a single iteration in case it's expensive. + n := 1 + + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- b + }() + + b.runN(n) + // Run the benchmark for at least the specified amount of time. + d := time.Duration(*benchTime * float64(time.Second)) + for !b.failed && b.duration < d && n < 1e9 { + last := n + // Predict iterations/sec. + if b.nsPerOp() == 0 { + n = 1e9 + } else { + n = int(d.Nanoseconds() / b.nsPerOp()) + } + // Run more iterations than we think we'll need for a second (1.5x). + // Don't grow too fast in case we had timing errors previously. + // Be sure to run at least one more than last time. + n = max(min(n+n/2, 100*last), last+1) + // Round up to something easy to read. + n = roundUp(n) + b.runN(n) + } + b.result = BenchmarkResult{b.N, b.duration, b.bytes} +} + +// The results of a benchmark run. +type BenchmarkResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. +} + +func (r BenchmarkResult) NsPerOp() int64 { + if r.N <= 0 { + return 0 + } + return r.T.Nanoseconds() / int64(r.N) +} + +func (r BenchmarkResult) mbPerSec() float64 { + if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 { + return 0 + } + return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds() +} + +func (r BenchmarkResult) String() string { + mbs := r.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := r.NsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if r.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N)) + } + } + return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb) +} + +// An internal function but exported because it is cross-package; part of the implementation +// of gotest. +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) { + // If no flag was specified, don't run benchmarks. + if len(*matchBenchmarks) == 0 { + return + } + for _, Benchmark := range benchmarks { + matched, err := matchString(*matchBenchmarks, Benchmark.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err) + os.Exit(1) + } + if !matched { + continue + } + for _, procs := range cpuList { + runtime.GOMAXPROCS(procs) + b := &B{ + common: common{ + signal: make(chan interface{}), + }, + benchmark: Benchmark, + } + benchName := Benchmark.Name + if procs != 1 { + benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs) + } + fmt.Printf("%s\t", benchName) + r := b.run() + if b.failed { + // The output could be very long here, but probably isn't. + // We print it all, regardless, because we don't want to trim the reason + // the benchmark failed. + fmt.Printf("--- FAIL: %s\n%s", benchName, b.output) + continue + } + fmt.Printf("%v\n", r) + // Unlike with tests, we ignore the -chatty flag and always print output for + // benchmarks since the output generation time will skew the results. + if len(b.output) > 0 { + b.trimOutput() + fmt.Printf("--- BENCH: %s\n%s", benchName, b.output) + } + if p := runtime.GOMAXPROCS(-1); p != procs { + fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p) + } + } + } +} + +// trimOutput shortens the output from a benchmark, which can be very long. +func (b *B) trimOutput() { + // The output is likely to appear multiple times because the benchmark + // is run multiple times, but at least it will be seen. This is not a big deal + // because benchmarks rarely print, but just in case, we trim it if it's too long. + const maxNewlines = 10 + for nlCount, j := 0, 0; j < len(b.output); j++ { + if b.output[j] == '\n' { + nlCount++ + if nlCount >= maxNewlines { + b.output = append(b.output[:j], "\n\t... [output truncated]\n"...) + break + } + } + } +} + +// Benchmark benchmarks a single function. Useful for creating +// custom benchmarks that do not use gotest. +func Benchmark(f func(b *B)) BenchmarkResult { + b := &B{ + common: common{ + signal: make(chan interface{}), + }, + benchmark: InternalBenchmark{"", f}, + } + return b.run() +} diff --git a/src/pkg/go/doc/testdata/c.0.golden b/src/pkg/go/doc/testdata/c.0.golden new file mode 100644 index 000000000..e21959b19 --- /dev/null +++ b/src/pkg/go/doc/testdata/c.0.golden @@ -0,0 +1,48 @@ +// +PACKAGE c + +IMPORTPATH + testdata/c + +IMPORTS + a + +FILENAMES + testdata/c.go + +TYPES + // A (should see this) + type A struct{} + + // B (should see this) + type B struct{} + + // C (should see this) + type C struct{} + + // D (should see this) + type D struct{} + + // E1 (should see this) + type E1 struct{} + + // E (should see this for E2 and E3) + type E2 struct{} + + // E (should see this for E2 and E3) + type E3 struct{} + + // E4 (should see this) + type E4 struct{} + + // + type T1 struct{} + + // + func (t1 *T1) M() + + // T2 must not show methods of local T1 + type T2 struct { + a.T1 // not the same as locally declared T1 + } + diff --git a/src/pkg/go/doc/testdata/c.1.golden b/src/pkg/go/doc/testdata/c.1.golden new file mode 100644 index 000000000..e21959b19 --- /dev/null +++ b/src/pkg/go/doc/testdata/c.1.golden @@ -0,0 +1,48 @@ +// +PACKAGE c + +IMPORTPATH + testdata/c + +IMPORTS + a + +FILENAMES + testdata/c.go + +TYPES + // A (should see this) + type A struct{} + + // B (should see this) + type B struct{} + + // C (should see this) + type C struct{} + + // D (should see this) + type D struct{} + + // E1 (should see this) + type E1 struct{} + + // E (should see this for E2 and E3) + type E2 struct{} + + // E (should see this for E2 and E3) + type E3 struct{} + + // E4 (should see this) + type E4 struct{} + + // + type T1 struct{} + + // + func (t1 *T1) M() + + // T2 must not show methods of local T1 + type T2 struct { + a.T1 // not the same as locally declared T1 + } + diff --git a/src/pkg/go/doc/testdata/c.go b/src/pkg/go/doc/testdata/c.go new file mode 100644 index 000000000..e0f39196d --- /dev/null +++ b/src/pkg/go/doc/testdata/c.go @@ -0,0 +1,62 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c + +import "a" + +// ---------------------------------------------------------------------------- +// Test that empty declarations don't cause problems + +const () + +type () + +var () + +// ---------------------------------------------------------------------------- +// Test that types with documentation on both, the Decl and the Spec node +// are handled correctly. + +// A (should see this) +type A struct{} + +// B (should see this) +type ( + B struct{} +) + +type ( + // C (should see this) + C struct{} +) + +// D (should not see this) +type ( + // D (should see this) + D struct{} +) + +// E (should see this for E2 and E3) +type ( + // E1 (should see this) + E1 struct{} + E2 struct{} + E3 struct{} + // E4 (should see this) + E4 struct{} +) + +// ---------------------------------------------------------------------------- +// Test that local and imported types are different when +// handling anonymous fields. + +type T1 struct{} + +func (t1 *T1) M() {} + +// T2 must not show methods of local T1 +type T2 struct { + a.T1 // not the same as locally declared T1 +} diff --git a/src/pkg/go/doc/testdata/d.0.golden b/src/pkg/go/doc/testdata/d.0.golden new file mode 100644 index 000000000..c00519953 --- /dev/null +++ b/src/pkg/go/doc/testdata/d.0.golden @@ -0,0 +1,104 @@ +// +PACKAGE d + +IMPORTPATH + testdata/d + +FILENAMES + testdata/d1.go + testdata/d2.go + +CONSTANTS + // CBx constants should appear before CAx constants. + const ( + CB2 = iota // before CB1 + CB1 // before CB0 + CB0 // at end + ) + + // CAx constants should appear after CBx constants. + const ( + CA2 = iota // before CA1 + CA1 // before CA0 + CA0 // at end + ) + + // C0 should be first. + const C0 = 0 + + // C1 should be second. + const C1 = 1 + + // C2 should be third. + const C2 = 2 + + // + const ( + // Single const declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Cungrouped = 0 + ) + + +VARIABLES + // VBx variables should appear before VAx variables. + var ( + VB2 int // before VB1 + VB1 int // before VB0 + VB0 int // at end + ) + + // VAx variables should appear after VBx variables. + var ( + VA2 int // before VA1 + VA1 int // before VA0 + VA0 int // at end + ) + + // V0 should be first. + var V0 uintptr + + // V1 should be second. + var V1 uint + + // V2 should be third. + var V2 int + + // + var ( + // Single var declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Vungrouped = 0 + ) + + +FUNCTIONS + // F0 should be first. + func F0() + + // F1 should be second. + func F1() + + // F2 should be third. + func F2() + + +TYPES + // T0 should be first. + type T0 struct{} + + // T1 should be second. + type T1 struct{} + + // T2 should be third. + type T2 struct{} + + // TG0 should be first. + type TG0 struct{} + + // TG1 should be second. + type TG1 struct{} + + // TG2 should be third. + type TG2 struct{} + diff --git a/src/pkg/go/doc/testdata/d.1.golden b/src/pkg/go/doc/testdata/d.1.golden new file mode 100644 index 000000000..c00519953 --- /dev/null +++ b/src/pkg/go/doc/testdata/d.1.golden @@ -0,0 +1,104 @@ +// +PACKAGE d + +IMPORTPATH + testdata/d + +FILENAMES + testdata/d1.go + testdata/d2.go + +CONSTANTS + // CBx constants should appear before CAx constants. + const ( + CB2 = iota // before CB1 + CB1 // before CB0 + CB0 // at end + ) + + // CAx constants should appear after CBx constants. + const ( + CA2 = iota // before CA1 + CA1 // before CA0 + CA0 // at end + ) + + // C0 should be first. + const C0 = 0 + + // C1 should be second. + const C1 = 1 + + // C2 should be third. + const C2 = 2 + + // + const ( + // Single const declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Cungrouped = 0 + ) + + +VARIABLES + // VBx variables should appear before VAx variables. + var ( + VB2 int // before VB1 + VB1 int // before VB0 + VB0 int // at end + ) + + // VAx variables should appear after VBx variables. + var ( + VA2 int // before VA1 + VA1 int // before VA0 + VA0 int // at end + ) + + // V0 should be first. + var V0 uintptr + + // V1 should be second. + var V1 uint + + // V2 should be third. + var V2 int + + // + var ( + // Single var declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Vungrouped = 0 + ) + + +FUNCTIONS + // F0 should be first. + func F0() + + // F1 should be second. + func F1() + + // F2 should be third. + func F2() + + +TYPES + // T0 should be first. + type T0 struct{} + + // T1 should be second. + type T1 struct{} + + // T2 should be third. + type T2 struct{} + + // TG0 should be first. + type TG0 struct{} + + // TG1 should be second. + type TG1 struct{} + + // TG2 should be third. + type TG2 struct{} + diff --git a/src/pkg/go/doc/testdata/d1.go b/src/pkg/go/doc/testdata/d1.go new file mode 100644 index 000000000..ebd694195 --- /dev/null +++ b/src/pkg/go/doc/testdata/d1.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for sort order of declarations. + +package d + +// C2 should be third. +const C2 = 2 + +// V2 should be third. +var V2 int + +// CBx constants should appear before CAx constants. +const ( + CB2 = iota // before CB1 + CB1 // before CB0 + CB0 // at end +) + +// VBx variables should appear before VAx variables. +var ( + VB2 int // before VB1 + VB1 int // before VB0 + VB0 int // at end +) + +const ( + // Single const declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Cungrouped = 0 +) + +var ( + // Single var declarations inside ()'s are considered ungrouped + // and show up in sorted order. + Vungrouped = 0 +) + +// T2 should be third. +type T2 struct{} + +// Grouped types are sorted nevertheless. +type ( + // TG2 should be third. + TG2 struct{} + + // TG1 should be second. + TG1 struct{} + + // TG0 should be first. + TG0 struct{} +) + +// F2 should be third. +func F2() {} diff --git a/src/pkg/go/doc/testdata/d2.go b/src/pkg/go/doc/testdata/d2.go new file mode 100644 index 000000000..2f56f4fa4 --- /dev/null +++ b/src/pkg/go/doc/testdata/d2.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for sort order of declarations. + +package d + +// C1 should be second. +const C1 = 1 + +// C0 should be first. +const C0 = 0 + +// V1 should be second. +var V1 uint + +// V0 should be first. +var V0 uintptr + +// CAx constants should appear after CBx constants. +const ( + CA2 = iota // before CA1 + CA1 // before CA0 + CA0 // at end +) + +// VAx variables should appear after VBx variables. +var ( + VA2 int // before VA1 + VA1 int // before VA0 + VA0 int // at end +) + +// T1 should be second. +type T1 struct{} + +// T0 should be first. +type T0 struct{} + +// F1 should be second. +func F1() {} + +// F0 should be first. +func F0() {} diff --git a/src/pkg/go/doc/testdata/e.0.golden b/src/pkg/go/doc/testdata/e.0.golden new file mode 100644 index 000000000..3b128f7be --- /dev/null +++ b/src/pkg/go/doc/testdata/e.0.golden @@ -0,0 +1,31 @@ +// +PACKAGE e + +IMPORTPATH + testdata/e + +FILENAMES + testdata/e.go + +TYPES + // T1 has no (top-level) M method due to conflict. + type T1 struct { + // contains filtered or unexported fields + } + + // T2 has only M as top-level method. + type T2 struct { + // contains filtered or unexported fields + } + + // T2.M should appear as method of T2. + func (T2) M() + + // T3 has only M as top-level method. + type T3 struct { + // contains filtered or unexported fields + } + + // T3.M should appear as method of T3. + func (T3) M() + diff --git a/src/pkg/go/doc/testdata/e.1.golden b/src/pkg/go/doc/testdata/e.1.golden new file mode 100644 index 000000000..d05602d82 --- /dev/null +++ b/src/pkg/go/doc/testdata/e.1.golden @@ -0,0 +1,61 @@ +// +PACKAGE e + +IMPORTPATH + testdata/e + +FILENAMES + testdata/e.go + +TYPES + // T1 has no (top-level) M method due to conflict. + type T1 struct { + t1 + t2 + } + + // T2 has only M as top-level method. + type T2 struct { + t1 + } + + // T2.M should appear as method of T2. + func (T2) M() + + // T3 has only M as top-level method. + type T3 struct { + t1e + t2e + } + + // T3.M should appear as method of T3. + func (T3) M() + + // + type t1 struct{} + + // t1.M should not appear as method in a Tx type. + func (t1) M() + + // + type t1e struct { + t1 + } + + // t1.M should not appear as method in a Tx type. + func (t1e) M() + + // + type t2 struct{} + + // t2.M should not appear as method in a Tx type. + func (t2) M() + + // + type t2e struct { + t2 + } + + // t2.M should not appear as method in a Tx type. + func (t2e) M() + diff --git a/src/pkg/go/doc/testdata/e.go b/src/pkg/go/doc/testdata/e.go new file mode 100644 index 000000000..8ea6a83b6 --- /dev/null +++ b/src/pkg/go/doc/testdata/e.go @@ -0,0 +1,58 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Embedding tests. +// TODO(gri): This should be comprehensive. + +package e + +// ---------------------------------------------------------------------------- +// Conflicting methods M must not show up. + +type t1 struct{} + +// t1.M should not appear as method in a Tx type. +func (t1) M() {} + +type t2 struct{} + +// t2.M should not appear as method in a Tx type. +func (t2) M() {} + +// T1 has no (top-level) M method due to conflict. +type T1 struct { + t1 + t2 +} + +// ---------------------------------------------------------------------------- +// Higher-level method M wins over lower-level method M. + +// T2 has only M as top-level method. +type T2 struct { + t1 +} + +// T2.M should appear as method of T2. +func (T2) M() {} + +// ---------------------------------------------------------------------------- +// Higher-level method M wins over lower-level conflicting methods M. + +type t1e struct { + t1 +} + +type t2e struct { + t2 +} + +// T3 has only M as top-level method. +type T3 struct { + t1e + t2e +} + +// T3.M should appear as method of T3. +func (T3) M() {} diff --git a/src/pkg/go/doc/testdata/example.go b/src/pkg/go/doc/testdata/example.go new file mode 100644 index 000000000..fdeda137e --- /dev/null +++ b/src/pkg/go/doc/testdata/example.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + "time" +) + +type InternalExample struct { + Name string + F func() + Output string +} + +func RunExamples(examples []InternalExample) (ok bool) { + ok = true + + var eg InternalExample + + stdout, stderr := os.Stdout, os.Stderr + defer func() { + os.Stdout, os.Stderr = stdout, stderr + if e := recover(); e != nil { + fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e) + os.Exit(1) + } + }() + + for _, eg = range examples { + if *chatty { + fmt.Printf("=== RUN: %s\n", eg.Name) + } + + // capture stdout and stderr + r, w, err := os.Pipe() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Stdout, os.Stderr = w, w + outC := make(chan string) + go func() { + buf := new(bytes.Buffer) + _, err := io.Copy(buf, r) + if err != nil { + fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err) + os.Exit(1) + } + outC <- buf.String() + }() + + // run example + t0 := time.Now() + eg.F() + dt := time.Now().Sub(t0) + + // close pipe, restore stdout/stderr, get output + w.Close() + os.Stdout, os.Stderr = stdout, stderr + out := <-outC + + // report any errors + tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds()) + if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e { + fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n", + eg.Name, tstr, g, e) + ok = false + } else if *chatty { + fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr) + } + } + + return +} diff --git a/src/pkg/go/doc/testdata/template.txt b/src/pkg/go/doc/testdata/template.txt new file mode 100644 index 000000000..32e331cdd --- /dev/null +++ b/src/pkg/go/doc/testdata/template.txt @@ -0,0 +1,65 @@ +{{synopsis .Doc}} +PACKAGE {{.Name}} + +IMPORTPATH + {{.ImportPath}} + +{{with .Imports}}IMPORTS +{{range .}} {{.}} +{{end}} +{{end}}{{/* + +*/}}FILENAMES +{{range .Filenames}} {{.}} +{{end}}{{/* + +*/}}{{with .Consts}} +CONSTANTS +{{range .}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{end}}{{/* + +*/}}{{with .Vars}} +VARIABLES +{{range .}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{end}}{{/* + +*/}}{{with .Funcs}} +FUNCTIONS +{{range .}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{end}}{{/* + +*/}}{{with .Types}} +TYPES +{{range .}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{range .Consts}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{/* + +*/}}{{range .Vars}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{/* + +*/}}{{range .Funcs}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{/* + +*/}}{{range .Methods}} {{synopsis .Doc}} + {{node .Decl $.FSet}} + +{{end}}{{end}}{{end}}{{/* + +*/}}{{with .Bugs}} +BUGS +{{range .}} {{synopsis .}} +{{end}}{{end}}
\ No newline at end of file diff --git a/src/pkg/go/doc/testdata/testing.0.golden b/src/pkg/go/doc/testdata/testing.0.golden new file mode 100644 index 000000000..15a903986 --- /dev/null +++ b/src/pkg/go/doc/testdata/testing.0.golden @@ -0,0 +1,156 @@ +// Package testing provides support for automated testing of Go ... +PACKAGE testing + +IMPORTPATH + testdata/testing + +IMPORTS + bytes + flag + fmt + io + os + runtime + runtime/pprof + strconv + strings + time + +FILENAMES + testdata/benchmark.go + testdata/example.go + testdata/testing.go + +FUNCTIONS + // An internal function but exported because it is cross-package; ... + func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) + + // An internal function but exported because it is cross-package; ... + func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) + + // + func RunExamples(examples []InternalExample) (ok bool) + + // + func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) + + // Short reports whether the -test.short flag is set. + func Short() bool + + +TYPES + // B is a type passed to Benchmark functions to manage benchmark ... + type B struct { + N int + // contains filtered or unexported fields + } + + // Error is equivalent to Log() followed by Fail(). + func (c *B) Error(args ...interface{}) + + // Errorf is equivalent to Logf() followed by Fail(). + func (c *B) Errorf(format string, args ...interface{}) + + // Fail marks the function as having failed but continues ... + func (c *B) Fail() + + // FailNow marks the function as having failed and stops its ... + func (c *B) FailNow() + + // Failed returns whether the function has failed. + func (c *B) Failed() bool + + // Fatal is equivalent to Log() followed by FailNow(). + func (c *B) Fatal(args ...interface{}) + + // Fatalf is equivalent to Logf() followed by FailNow(). + func (c *B) Fatalf(format string, args ...interface{}) + + // Log formats its arguments using default formatting, analogous ... + func (c *B) Log(args ...interface{}) + + // Logf formats its arguments according to the format, analogous ... + func (c *B) Logf(format string, args ...interface{}) + + // ResetTimer sets the elapsed benchmark time to zero. It does not ... + func (b *B) ResetTimer() + + // SetBytes records the number of bytes processed in a single ... + func (b *B) SetBytes(n int64) + + // StartTimer starts timing a test. This function is called ... + func (b *B) StartTimer() + + // StopTimer stops timing a test. This can be used to pause the ... + func (b *B) StopTimer() + + // The results of a benchmark run. + type BenchmarkResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. + } + + // Benchmark benchmarks a single function. Useful for creating ... + func Benchmark(f func(b *B)) BenchmarkResult + + // + func (r BenchmarkResult) NsPerOp() int64 + + // + func (r BenchmarkResult) String() string + + // An internal type but exported because it is cross-package; part ... + type InternalBenchmark struct { + Name string + F func(b *B) + } + + // + type InternalExample struct { + Name string + F func() + Output string + } + + // An internal type but exported because it is cross-package; part ... + type InternalTest struct { + Name string + F func(*T) + } + + // T is a type passed to Test functions to manage test state and ... + type T struct { + // contains filtered or unexported fields + } + + // Error is equivalent to Log() followed by Fail(). + func (c *T) Error(args ...interface{}) + + // Errorf is equivalent to Logf() followed by Fail(). + func (c *T) Errorf(format string, args ...interface{}) + + // Fail marks the function as having failed but continues ... + func (c *T) Fail() + + // FailNow marks the function as having failed and stops its ... + func (c *T) FailNow() + + // Failed returns whether the function has failed. + func (c *T) Failed() bool + + // Fatal is equivalent to Log() followed by FailNow(). + func (c *T) Fatal(args ...interface{}) + + // Fatalf is equivalent to Logf() followed by FailNow(). + func (c *T) Fatalf(format string, args ...interface{}) + + // Log formats its arguments using default formatting, analogous ... + func (c *T) Log(args ...interface{}) + + // Logf formats its arguments according to the format, analogous ... + func (c *T) Logf(format string, args ...interface{}) + + // Parallel signals that this test is to be run in parallel with ... + func (t *T) Parallel() + diff --git a/src/pkg/go/doc/testdata/testing.1.golden b/src/pkg/go/doc/testdata/testing.1.golden new file mode 100644 index 000000000..1f92f8fe3 --- /dev/null +++ b/src/pkg/go/doc/testdata/testing.1.golden @@ -0,0 +1,298 @@ +// Package testing provides support for automated testing of Go ... +PACKAGE testing + +IMPORTPATH + testdata/testing + +IMPORTS + bytes + flag + fmt + io + os + runtime + runtime/pprof + strconv + strings + time + +FILENAMES + testdata/benchmark.go + testdata/example.go + testdata/testing.go + +VARIABLES + // + var ( + // The short flag requests that tests run more quickly, but its functionality + // is provided by test writers themselves. The testing package is just its + // home. The all.bash installation script sets it to make installation more + // efficient, but by default the flag is off so a plain "gotest" will do a + // full test of the package. + short = flag.Bool("test.short", false, "run smaller test suite to save time") + + // Report as tests are run; default is silent for success. + chatty = flag.Bool("test.v", false, "verbose: print additional output") + match = flag.String("test.run", "", "regular expression to select tests to run") + memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution") + memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate") + cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution") + timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests") + cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test") + parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism") + + cpuList []int + ) + + // + var benchTime = flag.Float64("test.benchtime", 1, "approximate run time for each benchmark, in seconds") + + // + var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run") + + // + var timer *time.Timer + + +FUNCTIONS + // An internal function but exported because it is cross-package; ... + func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) + + // An internal function but exported because it is cross-package; ... + func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) + + // + func RunExamples(examples []InternalExample) (ok bool) + + // + func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) + + // Short reports whether the -test.short flag is set. + func Short() bool + + // after runs after all testing. + func after() + + // alarm is called if the timeout expires. + func alarm() + + // before runs before all testing. + func before() + + // decorate inserts the final newline if needed and indentation ... + func decorate(s string, addFileLine bool) string + + // + func max(x, y int) int + + // + func min(x, y int) int + + // + func parseCpuList() + + // roundDown10 rounds a number down to the nearest power of 10. + func roundDown10(n int) int + + // roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. + func roundUp(n int) int + + // startAlarm starts an alarm if requested. + func startAlarm() + + // stopAlarm turns off the alarm. + func stopAlarm() + + // + func tRunner(t *T, test *InternalTest) + + +TYPES + // B is a type passed to Benchmark functions to manage benchmark ... + type B struct { + common + N int + benchmark InternalBenchmark + bytes int64 + timerOn bool + result BenchmarkResult + } + + // Error is equivalent to Log() followed by Fail(). + func (c *B) Error(args ...interface{}) + + // Errorf is equivalent to Logf() followed by Fail(). + func (c *B) Errorf(format string, args ...interface{}) + + // Fail marks the function as having failed but continues ... + func (c *B) Fail() + + // FailNow marks the function as having failed and stops its ... + func (c *B) FailNow() + + // Failed returns whether the function has failed. + func (c *B) Failed() bool + + // Fatal is equivalent to Log() followed by FailNow(). + func (c *B) Fatal(args ...interface{}) + + // Fatalf is equivalent to Logf() followed by FailNow(). + func (c *B) Fatalf(format string, args ...interface{}) + + // Log formats its arguments using default formatting, analogous ... + func (c *B) Log(args ...interface{}) + + // Logf formats its arguments according to the format, analogous ... + func (c *B) Logf(format string, args ...interface{}) + + // ResetTimer sets the elapsed benchmark time to zero. It does not ... + func (b *B) ResetTimer() + + // SetBytes records the number of bytes processed in a single ... + func (b *B) SetBytes(n int64) + + // StartTimer starts timing a test. This function is called ... + func (b *B) StartTimer() + + // StopTimer stops timing a test. This can be used to pause the ... + func (b *B) StopTimer() + + // launch launches the benchmark function. It gradually increases ... + func (b *B) launch() + + // log generates the output. It's always at the same stack depth. + func (c *B) log(s string) + + // + func (b *B) nsPerOp() int64 + + // run times the benchmark function in a separate goroutine. + func (b *B) run() BenchmarkResult + + // runN runs a single benchmark for the specified number of ... + func (b *B) runN(n int) + + // trimOutput shortens the output from a benchmark, which can be ... + func (b *B) trimOutput() + + // The results of a benchmark run. + type BenchmarkResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. + } + + // Benchmark benchmarks a single function. Useful for creating ... + func Benchmark(f func(b *B)) BenchmarkResult + + // + func (r BenchmarkResult) NsPerOp() int64 + + // + func (r BenchmarkResult) String() string + + // + func (r BenchmarkResult) mbPerSec() float64 + + // An internal type but exported because it is cross-package; part ... + type InternalBenchmark struct { + Name string + F func(b *B) + } + + // + type InternalExample struct { + Name string + F func() + Output string + } + + // An internal type but exported because it is cross-package; part ... + type InternalTest struct { + Name string + F func(*T) + } + + // T is a type passed to Test functions to manage test state and ... + type T struct { + common + name string // Name of test. + startParallel chan bool // Parallel tests will wait on this. + } + + // Error is equivalent to Log() followed by Fail(). + func (c *T) Error(args ...interface{}) + + // Errorf is equivalent to Logf() followed by Fail(). + func (c *T) Errorf(format string, args ...interface{}) + + // Fail marks the function as having failed but continues ... + func (c *T) Fail() + + // FailNow marks the function as having failed and stops its ... + func (c *T) FailNow() + + // Failed returns whether the function has failed. + func (c *T) Failed() bool + + // Fatal is equivalent to Log() followed by FailNow(). + func (c *T) Fatal(args ...interface{}) + + // Fatalf is equivalent to Logf() followed by FailNow(). + func (c *T) Fatalf(format string, args ...interface{}) + + // Log formats its arguments using default formatting, analogous ... + func (c *T) Log(args ...interface{}) + + // Logf formats its arguments according to the format, analogous ... + func (c *T) Logf(format string, args ...interface{}) + + // Parallel signals that this test is to be run in parallel with ... + func (t *T) Parallel() + + // log generates the output. It's always at the same stack depth. + func (c *T) log(s string) + + // + func (t *T) report() + + // common holds the elements common between T and B and captures ... + type common struct { + output []byte // Output generated by test or benchmark. + failed bool // Test or benchmark has failed. + start time.Time // Time test or benchmark started + duration time.Duration + self interface{} // To be sent on signal channel when done. + signal chan interface{} // Output for serial tests. + } + + // Error is equivalent to Log() followed by Fail(). + func (c *common) Error(args ...interface{}) + + // Errorf is equivalent to Logf() followed by Fail(). + func (c *common) Errorf(format string, args ...interface{}) + + // Fail marks the function as having failed but continues ... + func (c *common) Fail() + + // FailNow marks the function as having failed and stops its ... + func (c *common) FailNow() + + // Failed returns whether the function has failed. + func (c *common) Failed() bool + + // Fatal is equivalent to Log() followed by FailNow(). + func (c *common) Fatal(args ...interface{}) + + // Fatalf is equivalent to Logf() followed by FailNow(). + func (c *common) Fatalf(format string, args ...interface{}) + + // Log formats its arguments using default formatting, analogous ... + func (c *common) Log(args ...interface{}) + + // Logf formats its arguments according to the format, analogous ... + func (c *common) Logf(format string, args ...interface{}) + + // log generates the output. It's always at the same stack depth. + func (c *common) log(s string) + diff --git a/src/pkg/go/doc/testdata/testing.go b/src/pkg/go/doc/testdata/testing.go new file mode 100644 index 000000000..cfe212dc1 --- /dev/null +++ b/src/pkg/go/doc/testdata/testing.go @@ -0,0 +1,404 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides support for automated testing of Go packages. +// It is intended to be used in concert with the ``gotest'' utility, which automates +// execution of any function of the form +// func TestXxx(*testing.T) +// where Xxx can be any alphanumeric string (but the first letter must not be in +// [a-z]) and serves to identify the test routine. +// These TestXxx routines should be declared within the package they are testing. +// +// Functions of the form +// func BenchmarkXxx(*testing.B) +// are considered benchmarks, and are executed by gotest when the -test.bench +// flag is provided. +// +// A sample benchmark function looks like this: +// func BenchmarkHello(b *testing.B) { +// for i := 0; i < b.N; i++ { +// fmt.Sprintf("hello") +// } +// } +// The benchmark package will vary b.N until the benchmark function lasts +// long enough to be timed reliably. The output +// testing.BenchmarkHello 10000000 282 ns/op +// means that the loop ran 10000000 times at a speed of 282 ns per loop. +// +// If a benchmark needs some expensive setup before running, the timer +// may be stopped: +// func BenchmarkBigLen(b *testing.B) { +// b.StopTimer() +// big := NewBig() +// b.StartTimer() +// for i := 0; i < b.N; i++ { +// big.Len() +// } +// } +package testing + +import ( + "flag" + "fmt" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" +) + +var ( + // The short flag requests that tests run more quickly, but its functionality + // is provided by test writers themselves. The testing package is just its + // home. The all.bash installation script sets it to make installation more + // efficient, but by default the flag is off so a plain "gotest" will do a + // full test of the package. + short = flag.Bool("test.short", false, "run smaller test suite to save time") + + // Report as tests are run; default is silent for success. + chatty = flag.Bool("test.v", false, "verbose: print additional output") + match = flag.String("test.run", "", "regular expression to select tests to run") + memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution") + memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate") + cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution") + timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests") + cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test") + parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism") + + cpuList []int +) + +// common holds the elements common between T and B and +// captures common methods such as Errorf. +type common struct { + output []byte // Output generated by test or benchmark. + failed bool // Test or benchmark has failed. + start time.Time // Time test or benchmark started + duration time.Duration + self interface{} // To be sent on signal channel when done. + signal chan interface{} // Output for serial tests. +} + +// Short reports whether the -test.short flag is set. +func Short() bool { + return *short +} + +// decorate inserts the final newline if needed and indentation tabs for formatting. +// If addFileLine is true, it also prefixes the string with the file and line of the call site. +func decorate(s string, addFileLine bool) string { + if addFileLine { + _, file, line, ok := runtime.Caller(3) // decorate + log + public function. + if ok { + // Truncate file name at last file name separator. + if index := strings.LastIndex(file, "/"); index >= 0 { + file = file[index+1:] + } else if index = strings.LastIndex(file, "\\"); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + line = 1 + } + s = fmt.Sprintf("%s:%d: %s", file, line, s) + } + s = "\t" + s // Every line is indented at least one tab. + n := len(s) + if n > 0 && s[n-1] != '\n' { + s += "\n" + n++ + } + for i := 0; i < n-1; i++ { // -1 to avoid final newline + if s[i] == '\n' { + // Second and subsequent lines are indented an extra tab. + return s[0:i+1] + "\t" + decorate(s[i+1:n], false) + } + } + return s +} + +// T is a type passed to Test functions to manage test state and support formatted test logs. +// Logs are accumulated during execution and dumped to standard error when done. +type T struct { + common + name string // Name of test. + startParallel chan bool // Parallel tests will wait on this. +} + +// Fail marks the function as having failed but continues execution. +func (c *common) Fail() { c.failed = true } + +// Failed returns whether the function has failed. +func (c *common) Failed() bool { return c.failed } + +// FailNow marks the function as having failed and stops its execution. +// Execution will continue at the next Test. +func (c *common) FailNow() { + c.Fail() + + // Calling runtime.Goexit will exit the goroutine, which + // will run the deferred functions in this goroutine, + // which will eventually run the deferred lines in tRunner, + // which will signal to the test loop that this test is done. + // + // A previous version of this code said: + // + // c.duration = ... + // c.signal <- c.self + // runtime.Goexit() + // + // This previous version duplicated code (those lines are in + // tRunner no matter what), but worse the goroutine teardown + // implicit in runtime.Goexit was not guaranteed to complete + // before the test exited. If a test deferred an important cleanup + // function (like removing temporary files), there was no guarantee + // it would run on a test failure. Because we send on c.signal during + // a top-of-stack deferred function now, we know that the send + // only happens after any other stacked defers have completed. + runtime.Goexit() +} + +// log generates the output. It's always at the same stack depth. +func (c *common) log(s string) { + c.output = append(c.output, decorate(s, true)...) +} + +// Log formats its arguments using default formatting, analogous to Println(), +// and records the text in the error log. +func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) } + +// Logf formats its arguments according to the format, analogous to Printf(), +// and records the text in the error log. +func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) } + +// Error is equivalent to Log() followed by Fail(). +func (c *common) Error(args ...interface{}) { + c.log(fmt.Sprintln(args...)) + c.Fail() +} + +// Errorf is equivalent to Logf() followed by Fail(). +func (c *common) Errorf(format string, args ...interface{}) { + c.log(fmt.Sprintf(format, args...)) + c.Fail() +} + +// Fatal is equivalent to Log() followed by FailNow(). +func (c *common) Fatal(args ...interface{}) { + c.log(fmt.Sprintln(args...)) + c.FailNow() +} + +// Fatalf is equivalent to Logf() followed by FailNow(). +func (c *common) Fatalf(format string, args ...interface{}) { + c.log(fmt.Sprintf(format, args...)) + c.FailNow() +} + +// Parallel signals that this test is to be run in parallel with (and only with) +// other parallel tests in this CPU group. +func (t *T) Parallel() { + t.signal <- (*T)(nil) // Release main testing loop + <-t.startParallel // Wait for serial tests to finish +} + +// An internal type but exported because it is cross-package; part of the implementation +// of gotest. +type InternalTest struct { + Name string + F func(*T) +} + +func tRunner(t *T, test *InternalTest) { + t.start = time.Now() + + // When this goroutine is done, either because test.F(t) + // returned normally or because a test failure triggered + // a call to runtime.Goexit, record the duration and send + // a signal saying that the test is done. + defer func() { + t.duration = time.Now().Sub(t.start) + t.signal <- t + }() + + test.F(t) +} + +// An internal function but exported because it is cross-package; part of the implementation +// of gotest. +func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { + flag.Parse() + parseCpuList() + + before() + startAlarm() + testOk := RunTests(matchString, tests) + exampleOk := RunExamples(examples) + if !testOk || !exampleOk { + fmt.Println("FAIL") + os.Exit(1) + } + fmt.Println("PASS") + stopAlarm() + RunBenchmarks(matchString, benchmarks) + after() +} + +func (t *T) report() { + tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds()) + format := "--- %s: %s %s\n%s" + if t.failed { + fmt.Printf(format, "FAIL", t.name, tstr, t.output) + } else if *chatty { + fmt.Printf(format, "PASS", t.name, tstr, t.output) + } +} + +func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) { + ok = true + if len(tests) == 0 { + fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") + return + } + for _, procs := range cpuList { + runtime.GOMAXPROCS(procs) + // We build a new channel tree for each run of the loop. + // collector merges in one channel all the upstream signals from parallel tests. + // If all tests pump to the same channel, a bug can occur where a test + // kicks off a goroutine that Fails, yet the test still delivers a completion signal, + // which skews the counting. + var collector = make(chan interface{}) + + numParallel := 0 + startParallel := make(chan bool) + + for i := 0; i < len(tests); i++ { + matched, err := matchString(*match, tests[i].Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err) + os.Exit(1) + } + if !matched { + continue + } + testName := tests[i].Name + if procs != 1 { + testName = fmt.Sprintf("%s-%d", tests[i].Name, procs) + } + t := &T{ + common: common{ + signal: make(chan interface{}), + }, + name: testName, + startParallel: startParallel, + } + t.self = t + if *chatty { + fmt.Printf("=== RUN %s\n", t.name) + } + go tRunner(t, &tests[i]) + out := (<-t.signal).(*T) + if out == nil { // Parallel run. + go func() { + collector <- <-t.signal + }() + numParallel++ + continue + } + t.report() + ok = ok && !out.failed + } + + running := 0 + for numParallel+running > 0 { + if running < *parallel && numParallel > 0 { + startParallel <- true + running++ + numParallel-- + continue + } + t := (<-collector).(*T) + t.report() + ok = ok && !t.failed + running-- + } + } + return +} + +// before runs before all testing. +func before() { + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + } + if *cpuProfile != "" { + f, err := os.Create(*cpuProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s", err) + return + } + if err := pprof.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err) + f.Close() + return + } + // Could save f so after can call f.Close; not worth the effort. + } + +} + +// after runs after all testing. +func after() { + if *cpuProfile != "" { + pprof.StopCPUProfile() // flushes profile to disk + } + if *memProfile != "" { + f, err := os.Create(*memProfile) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s", err) + return + } + if err = pprof.WriteHeapProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err) + } + f.Close() + } +} + +var timer *time.Timer + +// startAlarm starts an alarm if requested. +func startAlarm() { + if *timeout > 0 { + timer = time.AfterFunc(*timeout, alarm) + } +} + +// stopAlarm turns off the alarm. +func stopAlarm() { + if *timeout > 0 { + timer.Stop() + } +} + +// alarm is called if the timeout expires. +func alarm() { + panic("test timed out") +} + +func parseCpuList() { + if len(*cpuListStr) == 0 { + cpuList = append(cpuList, runtime.GOMAXPROCS(-1)) + } else { + for _, val := range strings.Split(*cpuListStr, ",") { + cpu, err := strconv.Atoi(val) + if err != nil || cpu <= 0 { + fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val) + os.Exit(1) + } + cpuList = append(cpuList, cpu) + } + } +} |