summaryrefslogtreecommitdiff
path: root/src/cmd/godoc
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/godoc')
-rw-r--r--src/cmd/godoc/Makefile24
-rw-r--r--src/cmd/godoc/appconfig.go19
-rw-r--r--src/cmd/godoc/appinit.go86
-rw-r--r--src/cmd/godoc/codewalk.go487
-rw-r--r--src/cmd/godoc/dirtrees.go342
-rw-r--r--src/cmd/godoc/doc.go130
-rw-r--r--src/cmd/godoc/filesystem.go104
-rw-r--r--src/cmd/godoc/format.go358
-rw-r--r--src/cmd/godoc/godoc.go1159
-rw-r--r--src/cmd/godoc/httpzip.go181
-rw-r--r--src/cmd/godoc/index.go986
-rw-r--r--src/cmd/godoc/main.go423
-rw-r--r--src/cmd/godoc/mapping.go200
-rw-r--r--src/cmd/godoc/parser.go68
-rwxr-xr-xsrc/cmd/godoc/snippet.go100
-rw-r--r--src/cmd/godoc/spec.go198
-rw-r--r--src/cmd/godoc/utils.go168
-rw-r--r--src/cmd/godoc/zip.go207
18 files changed, 5240 insertions, 0 deletions
diff --git a/src/cmd/godoc/Makefile b/src/cmd/godoc/Makefile
new file mode 100644
index 000000000..f40d71703
--- /dev/null
+++ b/src/cmd/godoc/Makefile
@@ -0,0 +1,24 @@
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+include ../../Make.inc
+
+TARG=godoc
+GOFILES=\
+ codewalk.go\
+ dirtrees.go\
+ filesystem.go\
+ format.go\
+ godoc.go\
+ httpzip.go\
+ index.go\
+ main.go\
+ mapping.go\
+ parser.go\
+ snippet.go\
+ spec.go\
+ utils.go\
+ zip.go\
+
+include ../../Make.cmd
diff --git a/src/cmd/godoc/appconfig.go b/src/cmd/godoc/appconfig.go
new file mode 100644
index 000000000..9cbe7a443
--- /dev/null
+++ b/src/cmd/godoc/appconfig.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains configuration information used by
+// godoc when running on app engine. Adjust as needed
+// (typically when the .zip file changes).
+
+package main
+
+const (
+ // zipFilename is the name of the .zip file
+ // containing the file system served by godoc.
+ zipFilename = "go.zip"
+
+ // zipGoroot is the path of the goroot directory
+ // in the .zip file.
+ zipGoroot = "/home/username/go"
+)
diff --git a/src/cmd/godoc/appinit.go b/src/cmd/godoc/appinit.go
new file mode 100644
index 000000000..9b8987223
--- /dev/null
+++ b/src/cmd/godoc/appinit.go
@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To run godoc under app engine, substitute main.go with
+// this file (appinit.go), provide a .zip file containing
+// the file system to serve, and adjust the configuration
+// parameters in appconfig.go accordingly.
+//
+// The current app engine SDK may be based on an older Go
+// release version. To correct for version skew, copy newer
+// packages into the alt directory (e.g. alt/strings) and
+// adjust the imports in the godoc source files (e.g. from
+// `import "strings"` to `import "alt/strings"`). Both old
+// and new packages may be used simultaneously as long as
+// there is no package global state that needs to be shared.
+//
+// The directory structure should look as follows:
+//
+// godoc // directory containing the app engine app
+// alt // alternative packages directory to
+// // correct for version skew
+// strings // never version of the strings package
+// ... //
+// app.yaml // app engine control file
+// go.zip // zip file containing the file system to serve
+// godoc // contains godoc sources
+// appinit.go // this file instead of godoc/main.go
+// appconfig.go // godoc for app engine configuration
+// ... //
+//
+// To run app the engine emulator locally:
+//
+// dev_appserver.py -a 0 godoc
+//
+// godoc is the top-level "goroot" directory.
+// The godoc home page is served at: <hostname>:8080 and localhost:8080.
+
+package main
+
+import (
+ "alt/archive/zip"
+ "http"
+ "log"
+ "os"
+)
+
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err os.Error) {
+ contents := applyTemplate(errorHTML, "errorHTML", err) // err may contain an absolute path!
+ w.WriteHeader(http.StatusNotFound)
+ servePage(w, "File "+relpath, "", "", contents)
+}
+
+func init() {
+ log.Println("initializing godoc ...")
+ *goroot = path.Join("/", zipGoroot) // fsHttp paths are relative to '/'
+
+ // read .zip file and set up file systems
+ const zipfile = zipFilename
+ rc, err := zip.OpenReader(zipfile)
+ if err != nil {
+ log.Fatalf("%s: %s\n", zipfile, err)
+ }
+ fs = NewZipFS(rc)
+ fsHttp = NewHttpZipFS(rc, *goroot)
+
+ // initialize http handlers
+ initHandlers()
+ readTemplates()
+ registerPublicHandlers(http.DefaultServeMux)
+
+ // initialize default directory tree with corresponding timestamp.
+ initFSTree()
+
+ // initialize directory trees for user-defined file systems (-path flag).
+ initDirTrees()
+
+ // create search index
+ // TODO(gri) Disabled for now as it takes too long. Find a solution for this.
+ /*
+ *indexEnabled = true
+ go indexer()
+ */
+
+ log.Println("godoc initialization complete")
+}
diff --git a/src/cmd/godoc/codewalk.go b/src/cmd/godoc/codewalk.go
new file mode 100644
index 000000000..e2643e466
--- /dev/null
+++ b/src/cmd/godoc/codewalk.go
@@ -0,0 +1,487 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The /doc/codewalk/ tree is synthesized from codewalk descriptions,
+// files named $GOROOT/doc/codewalk/*.xml.
+// For an example and a description of the format, see
+// http://golang.org/doc/codewalk/codewalk or run godoc -http=:6060
+// and see http://localhost:6060/doc/codewalk/codewalk .
+// That page is itself a codewalk; the source code for it is
+// $GOROOT/doc/codewalk/codewalk.xml.
+
+package main
+
+import (
+ "container/vector"
+ "fmt"
+ "http"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "template"
+ "utf8"
+ "xml"
+)
+
+// Handler for /doc/codewalk/ and below.
+func codewalk(w http.ResponseWriter, r *http.Request) {
+ relpath := r.URL.Path[len("/doc/codewalk/"):]
+ abspath := absolutePath(r.URL.Path[1:], *goroot)
+
+ r.ParseForm()
+ if f := r.FormValue("fileprint"); f != "" {
+ codewalkFileprint(w, r, f)
+ return
+ }
+
+ // If directory exists, serve list of code walks.
+ dir, err := fs.Lstat(abspath)
+ if err == nil && dir.IsDirectory() {
+ codewalkDir(w, r, relpath, abspath)
+ return
+ }
+
+ // If file exists, serve using standard file server.
+ if err == nil {
+ serveFile(w, r)
+ return
+ }
+
+ // Otherwise append .xml and hope to find
+ // a codewalk description.
+ cw, err := loadCodewalk(abspath + ".xml")
+ if err != nil {
+ log.Print(err)
+ serveError(w, r, relpath, err)
+ return
+ }
+
+ // Canonicalize the path and redirect if changed
+ if redirect(w, r) {
+ return
+ }
+
+ b := applyTemplate(codewalkHTML, "codewalk", cw)
+ servePage(w, "Codewalk: "+cw.Title, "", "", b)
+}
+
+// A Codewalk represents a single codewalk read from an XML file.
+type Codewalk struct {
+ Title string `xml:"attr"`
+ File []string
+ Step []*Codestep
+}
+
+// A Codestep is a single step in a codewalk.
+type Codestep struct {
+ // Filled in from XML
+ Src string `xml:"attr"`
+ Title string `xml:"attr"`
+ XML string `xml:"innerxml"`
+
+ // Derived from Src; not in XML.
+ Err os.Error
+ File string
+ Lo int
+ LoByte int
+ Hi int
+ HiByte int
+ Data []byte
+}
+
+// String method for printing in template.
+// Formats file address nicely.
+func (st *Codestep) String() string {
+ s := st.File
+ if st.Lo != 0 || st.Hi != 0 {
+ s += fmt.Sprintf(":%d", st.Lo)
+ if st.Lo != st.Hi {
+ s += fmt.Sprintf(",%d", st.Hi)
+ }
+ }
+ return s
+}
+
+// loadCodewalk reads a codewalk from the named XML file.
+func loadCodewalk(filename string) (*Codewalk, os.Error) {
+ f, err := fs.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ cw := new(Codewalk)
+ p := xml.NewParser(f)
+ p.Entity = xml.HTMLEntity
+ err = p.Unmarshal(cw, nil)
+ if err != nil {
+ return nil, &os.PathError{"parsing", filename, err}
+ }
+
+ // Compute file list, evaluate line numbers for addresses.
+ m := make(map[string]bool)
+ for _, st := range cw.Step {
+ i := strings.Index(st.Src, ":")
+ if i < 0 {
+ i = len(st.Src)
+ }
+ filename := st.Src[0:i]
+ data, err := fs.ReadFile(absolutePath(filename, *goroot))
+ if err != nil {
+ st.Err = err
+ continue
+ }
+ if i < len(st.Src) {
+ lo, hi, err := addrToByteRange(st.Src[i+1:], 0, data)
+ if err != nil {
+ st.Err = err
+ continue
+ }
+ // Expand match to line boundaries.
+ for lo > 0 && data[lo-1] != '\n' {
+ lo--
+ }
+ for hi < len(data) && (hi == 0 || data[hi-1] != '\n') {
+ hi++
+ }
+ st.Lo = byteToLine(data, lo)
+ st.Hi = byteToLine(data, hi-1)
+ }
+ st.Data = data
+ st.File = filename
+ m[filename] = true
+ }
+
+ // Make list of files
+ cw.File = make([]string, len(m))
+ i := 0
+ for f := range m {
+ cw.File[i] = f
+ i++
+ }
+ sort.Strings(cw.File)
+
+ return cw, nil
+}
+
+// codewalkDir serves the codewalk directory listing.
+// It scans the directory for subdirectories or files named *.xml
+// and prepares a table.
+func codewalkDir(w http.ResponseWriter, r *http.Request, relpath, abspath string) {
+ type elem struct {
+ Name string
+ Title string
+ }
+
+ dir, err := fs.ReadDir(abspath)
+ if err != nil {
+ log.Print(err)
+ serveError(w, r, relpath, err)
+ return
+ }
+ var v vector.Vector
+ for _, fi := range dir {
+ name := fi.Name()
+ if fi.IsDirectory() {
+ v.Push(&elem{name + "/", ""})
+ } else if strings.HasSuffix(name, ".xml") {
+ cw, err := loadCodewalk(abspath + "/" + name)
+ if err != nil {
+ continue
+ }
+ v.Push(&elem{name[0 : len(name)-len(".xml")], cw.Title})
+ }
+ }
+
+ b := applyTemplate(codewalkdirHTML, "codewalkdir", v)
+ servePage(w, "Codewalks", "", "", b)
+}
+
+// codewalkFileprint serves requests with ?fileprint=f&lo=lo&hi=hi.
+// The filename f has already been retrieved and is passed as an argument.
+// Lo and hi are the numbers of the first and last line to highlight
+// in the response. This format is used for the middle window pane
+// of the codewalk pages. It is a separate iframe and does not get
+// the usual godoc HTML wrapper.
+func codewalkFileprint(w http.ResponseWriter, r *http.Request, f string) {
+ abspath := absolutePath(f, *goroot)
+ data, err := fs.ReadFile(abspath)
+ if err != nil {
+ log.Print(err)
+ serveError(w, r, f, err)
+ return
+ }
+ lo, _ := strconv.Atoi(r.FormValue("lo"))
+ hi, _ := strconv.Atoi(r.FormValue("hi"))
+ if hi < lo {
+ hi = lo
+ }
+ lo = lineToByte(data, lo)
+ hi = lineToByte(data, hi+1)
+
+ // Put the mark 4 lines before lo, so that the iframe
+ // shows a few lines of context before the highlighted
+ // section.
+ n := 4
+ mark := lo
+ for ; mark > 0 && n > 0; mark-- {
+ if data[mark-1] == '\n' {
+ if n--; n == 0 {
+ break
+ }
+ }
+ }
+
+ io.WriteString(w, `<style type="text/css">@import "/doc/codewalk/codewalk.css";</style><pre>`)
+ template.HTMLEscape(w, data[0:mark])
+ io.WriteString(w, "<a name='mark'></a>")
+ template.HTMLEscape(w, data[mark:lo])
+ if lo < hi {
+ io.WriteString(w, "<div class='codewalkhighlight'>")
+ template.HTMLEscape(w, data[lo:hi])
+ io.WriteString(w, "</div>")
+ }
+ template.HTMLEscape(w, data[hi:])
+ io.WriteString(w, "</pre>")
+}
+
+// addrToByte evaluates the given address starting at offset start in data.
+// It returns the lo and hi byte offset of the matched region within data.
+// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
+// for details on the syntax.
+func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err os.Error) {
+ var (
+ dir byte
+ prevc byte
+ charOffset bool
+ )
+ lo = start
+ hi = start
+ for addr != "" && err == nil {
+ c := addr[0]
+ switch c {
+ default:
+ err = os.NewError("invalid address syntax near " + string(c))
+ case ',':
+ if len(addr) == 1 {
+ hi = len(data)
+ } else {
+ _, hi, err = addrToByteRange(addr[1:], hi, data)
+ }
+ return
+
+ case '+', '-':
+ if prevc == '+' || prevc == '-' {
+ lo, hi, err = addrNumber(data, lo, hi, prevc, 1, charOffset)
+ }
+ dir = c
+
+ case '$':
+ lo = len(data)
+ hi = len(data)
+ if len(addr) > 1 {
+ dir = '+'
+ }
+
+ case '#':
+ charOffset = true
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ var i int
+ for i = 1; i < len(addr); i++ {
+ if addr[i] < '0' || addr[i] > '9' {
+ break
+ }
+ }
+ var n int
+ n, err = strconv.Atoi(addr[0:i])
+ if err != nil {
+ break
+ }
+ lo, hi, err = addrNumber(data, lo, hi, dir, n, charOffset)
+ dir = 0
+ charOffset = false
+ prevc = c
+ addr = addr[i:]
+ continue
+
+ case '/':
+ var i, j int
+ Regexp:
+ for i = 1; i < len(addr); i++ {
+ switch addr[i] {
+ case '\\':
+ i++
+ case '/':
+ j = i + 1
+ break Regexp
+ }
+ }
+ if j == 0 {
+ j = i
+ }
+ pattern := addr[1:i]
+ lo, hi, err = addrRegexp(data, lo, hi, dir, pattern)
+ prevc = c
+ addr = addr[j:]
+ continue
+ }
+ prevc = c
+ addr = addr[1:]
+ }
+
+ if err == nil && dir != 0 {
+ lo, hi, err = addrNumber(data, lo, hi, dir, 1, charOffset)
+ }
+ if err != nil {
+ return 0, 0, err
+ }
+ return lo, hi, nil
+}
+
+// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
+// dir is '+' or '-', n is the count, and charOffset is true if the syntax
+// used was #n. Applying +n (or +#n) means to advance n lines
+// (or characters) after hi. Applying -n (or -#n) means to back up n lines
+// (or characters) before lo.
+// The return value is the new lo, hi.
+func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, os.Error) {
+ switch dir {
+ case 0:
+ lo = 0
+ hi = 0
+ fallthrough
+
+ case '+':
+ if charOffset {
+ pos := hi
+ for ; n > 0 && pos < len(data); n-- {
+ _, size := utf8.DecodeRune(data[pos:])
+ pos += size
+ }
+ if n == 0 {
+ return pos, pos, nil
+ }
+ break
+ }
+ // find next beginning of line
+ if hi > 0 {
+ for hi < len(data) && data[hi-1] != '\n' {
+ hi++
+ }
+ }
+ lo = hi
+ if n == 0 {
+ return lo, hi, nil
+ }
+ for ; hi < len(data); hi++ {
+ if data[hi] != '\n' {
+ continue
+ }
+ switch n--; n {
+ case 1:
+ lo = hi + 1
+ case 0:
+ return lo, hi + 1, nil
+ }
+ }
+
+ case '-':
+ if charOffset {
+ // Scan backward for bytes that are not UTF-8 continuation bytes.
+ pos := lo
+ for ; pos > 0 && n > 0; pos-- {
+ if data[pos]&0xc0 != 0x80 {
+ n--
+ }
+ }
+ if n == 0 {
+ return pos, pos, nil
+ }
+ break
+ }
+ // find earlier beginning of line
+ for lo > 0 && data[lo-1] != '\n' {
+ lo--
+ }
+ hi = lo
+ if n == 0 {
+ return lo, hi, nil
+ }
+ for ; lo >= 0; lo-- {
+ if lo > 0 && data[lo-1] != '\n' {
+ continue
+ }
+ switch n--; n {
+ case 1:
+ hi = lo
+ case 0:
+ return lo, hi, nil
+ }
+ }
+ }
+
+ return 0, 0, os.NewError("address out of range")
+}
+
+// addrRegexp searches for pattern in the given direction starting at lo, hi.
+// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
+// Backward searches are unimplemented.
+func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, os.Error) {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return 0, 0, err
+ }
+ if dir == '-' {
+ // Could implement reverse search using binary search
+ // through file, but that seems like overkill.
+ return 0, 0, os.NewError("reverse search not implemented")
+ }
+ m := re.FindIndex(data[hi:])
+ if len(m) > 0 {
+ m[0] += hi
+ m[1] += hi
+ } else if hi > 0 {
+ // No match. Wrap to beginning of data.
+ m = re.FindIndex(data)
+ }
+ if len(m) == 0 {
+ return 0, 0, os.NewError("no match for " + pattern)
+ }
+ return m[0], m[1], nil
+}
+
+// lineToByte returns the byte index of the first byte of line n.
+// Line numbers begin at 1.
+func lineToByte(data []byte, n int) int {
+ if n <= 1 {
+ return 0
+ }
+ n--
+ for i, c := range data {
+ if c == '\n' {
+ if n--; n == 0 {
+ return i + 1
+ }
+ }
+ }
+ return len(data)
+}
+
+// byteToLine returns the number of the line containing the byte at index i.
+func byteToLine(data []byte, i int) int {
+ l := 1
+ for j, c := range data {
+ if j == i {
+ return l
+ }
+ if c == '\n' {
+ l++
+ }
+ }
+ return l
+}
diff --git a/src/cmd/godoc/dirtrees.go b/src/cmd/godoc/dirtrees.go
new file mode 100644
index 000000000..aa590b363
--- /dev/null
+++ b/src/cmd/godoc/dirtrees.go
@@ -0,0 +1,342 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the code dealing with package directory trees.
+
+package main
+
+import (
+ "bytes"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "log"
+ "path/filepath"
+ "strings"
+ "unicode"
+)
+
+type Directory struct {
+ Depth int
+ Path string // includes Name
+ Name string
+ Text string // package documentation, if any
+ Dirs []*Directory // subdirectories
+}
+
+func isGoFile(fi FileInfo) bool {
+ name := fi.Name()
+ return fi.IsRegular() &&
+ len(name) > 0 && name[0] != '.' && // ignore .files
+ filepath.Ext(name) == ".go"
+}
+
+func isPkgFile(fi FileInfo) bool {
+ return isGoFile(fi) &&
+ !strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
+}
+
+func isPkgDir(fi FileInfo) bool {
+ name := fi.Name()
+ return fi.IsDirectory() && len(name) > 0 &&
+ name[0] != '_' && name[0] != '.' // ignore _files and .files
+}
+
+func firstSentence(s string) string {
+ i := -1 // index+1 of first terminator (punctuation ending a sentence)
+ j := -1 // index+1 of first terminator followed by white space
+ prev := 'A'
+ for k, ch := range s {
+ k1 := k + 1
+ if ch == '.' || ch == '!' || ch == '?' {
+ if i < 0 {
+ i = k1 // first terminator
+ }
+ if k1 < len(s) && s[k1] <= ' ' {
+ if j < 0 {
+ j = k1 // first terminator followed by white space
+ }
+ if !unicode.IsUpper(prev) {
+ j = k1
+ break
+ }
+ }
+ }
+ prev = ch
+ }
+
+ if j < 0 {
+ // use the next best terminator
+ j = i
+ if j < 0 {
+ // no terminator at all, use the entire string
+ j = len(s)
+ }
+ }
+
+ return s[0:j]
+}
+
+type treeBuilder struct {
+ pathFilter func(string) bool
+ maxDepth int
+}
+
+func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
+ if b.pathFilter != nil && !b.pathFilter(path) {
+ return nil
+ }
+
+ if depth >= b.maxDepth {
+ // return a dummy directory so that the parent directory
+ // doesn't get discarded just because we reached the max
+ // directory depth
+ return &Directory{depth, path, name, "", nil}
+ }
+
+ list, err := fs.ReadDir(path)
+ if err != nil {
+ // newDirTree is called with a path that should be a package
+ // directory; errors here should not happen, but if they do,
+ // we want to know about them
+ log.Printf("ReadDir(%s): %s", path, err)
+ }
+
+ // determine number of subdirectories and if there are package files
+ ndirs := 0
+ hasPkgFiles := false
+ var synopses [4]string // prioritized package documentation (0 == highest priority)
+ for _, d := range list {
+ switch {
+ case isPkgDir(d):
+ ndirs++
+ case isPkgFile(d):
+ // looks like a package file, but may just be a file ending in ".go";
+ // don't just count it yet (otherwise we may end up with hasPkgFiles even
+ // though the directory doesn't contain any real package files - was bug)
+ if synopses[0] == "" {
+ // no "optimal" package synopsis yet; continue to collect synopses
+ file, err := parser.ParseFile(fset, filepath.Join(path, d.Name()), nil,
+ parser.ParseComments|parser.PackageClauseOnly)
+ if err == nil {
+ hasPkgFiles = true
+ if file.Doc != nil {
+ // prioritize documentation
+ i := -1
+ switch file.Name.Name {
+ case name:
+ i = 0 // normal case: directory name matches package name
+ case fakePkgName:
+ i = 1 // synopses for commands
+ case "main":
+ i = 2 // directory contains a main package
+ default:
+ i = 3 // none of the above
+ }
+ if 0 <= i && i < len(synopses) && synopses[i] == "" {
+ synopses[i] = firstSentence(doc.CommentText(file.Doc))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // create subdirectory tree
+ var dirs []*Directory
+ if ndirs > 0 {
+ dirs = make([]*Directory, ndirs)
+ i := 0
+ for _, d := range list {
+ if isPkgDir(d) {
+ name := d.Name()
+ dd := b.newDirTree(fset, filepath.Join(path, name), name, depth+1)
+ if dd != nil {
+ dirs[i] = dd
+ i++
+ }
+ }
+ }
+ dirs = dirs[0:i]
+ }
+
+ // if there are no package files and no subdirectories
+ // containing package files, ignore the directory
+ if !hasPkgFiles && len(dirs) == 0 {
+ return nil
+ }
+
+ // select the highest-priority synopsis for the directory entry, if any
+ synopsis := ""
+ for _, synopsis = range synopses {
+ if synopsis != "" {
+ break
+ }
+ }
+
+ return &Directory{depth, path, name, synopsis, dirs}
+}
+
+// newDirectory creates a new package directory tree with at most maxDepth
+// levels, anchored at root. The result tree is pruned such that it only
+// contains directories that contain package files or that contain
+// subdirectories containing package files (transitively). If a non-nil
+// pathFilter is provided, directory paths additionally must be accepted
+// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
+// provided for maxDepth, nodes at larger depths are pruned as well; they
+// are assumed to contain package files even if their contents are not known
+// (i.e., in this case the tree may contain directories w/o any package files).
+//
+func newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {
+ // The root could be a symbolic link so use Stat not Lstat.
+ d, err := fs.Stat(root)
+ // If we fail here, report detailed error messages; otherwise
+ // is is hard to see why a directory tree was not built.
+ switch {
+ case err != nil:
+ log.Printf("newDirectory(%s): %s", root, err)
+ return nil
+ case !isPkgDir(d):
+ log.Printf("newDirectory(%s): not a package directory", root)
+ return nil
+ }
+ if maxDepth < 0 {
+ maxDepth = 1e6 // "infinity"
+ }
+ b := treeBuilder{pathFilter, maxDepth}
+ // the file set provided is only for local parsing, no position
+ // information escapes and thus we don't need to save the set
+ return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
+}
+
+func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
+ if dir != nil {
+ if len(dir.Dirs) == 0 {
+ buf.WriteString(dir.Path)
+ buf.WriteByte('\n')
+ return
+ }
+
+ for _, d := range dir.Dirs {
+ d.writeLeafs(buf)
+ }
+ }
+}
+
+func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
+ if dir != nil {
+ if !skipRoot {
+ c <- dir
+ }
+ for _, d := range dir.Dirs {
+ d.walk(c, false)
+ }
+ }
+}
+
+func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
+ c := make(chan *Directory)
+ go func() {
+ dir.walk(c, skipRoot)
+ close(c)
+ }()
+ return c
+}
+
+func (dir *Directory) lookupLocal(name string) *Directory {
+ for _, d := range dir.Dirs {
+ if d.Name == name {
+ return d
+ }
+ }
+ return nil
+}
+
+// lookup looks for the *Directory for a given path, relative to dir.
+func (dir *Directory) lookup(path string) *Directory {
+ d := strings.Split(dir.Path, string(filepath.Separator))
+ p := strings.Split(path, string(filepath.Separator))
+ i := 0
+ for i < len(d) {
+ if i >= len(p) || d[i] != p[i] {
+ return nil
+ }
+ i++
+ }
+ for dir != nil && i < len(p) {
+ dir = dir.lookupLocal(p[i])
+ i++
+ }
+ return dir
+}
+
+// DirEntry describes a directory entry. The Depth and Height values
+// are useful for presenting an entry in an indented fashion.
+//
+type DirEntry struct {
+ Depth int // >= 0
+ Height int // = DirList.MaxHeight - Depth, > 0
+ Path string // includes Name, relative to DirList root
+ Name string
+ Synopsis string
+}
+
+type DirList struct {
+ MaxHeight int // directory tree height, > 0
+ List []DirEntry
+}
+
+// listing creates a (linear) directory listing from a directory tree.
+// If skipRoot is set, the root directory itself is excluded from the list.
+//
+func (root *Directory) listing(skipRoot bool) *DirList {
+ if root == nil {
+ return nil
+ }
+
+ // determine number of entries n and maximum height
+ n := 0
+ minDepth := 1 << 30 // infinity
+ maxDepth := 0
+ for d := range root.iter(skipRoot) {
+ n++
+ if minDepth > d.Depth {
+ minDepth = d.Depth
+ }
+ if maxDepth < d.Depth {
+ maxDepth = d.Depth
+ }
+ }
+ maxHeight := maxDepth - minDepth + 1
+
+ if n == 0 {
+ return nil
+ }
+
+ // create list
+ list := make([]DirEntry, n)
+ i := 0
+ for d := range root.iter(skipRoot) {
+ p := &list[i]
+ p.Depth = d.Depth - minDepth
+ p.Height = maxHeight - p.Depth
+ // the path is relative to root.Path - remove the root.Path
+ // prefix (the prefix should always be present but avoid
+ // crashes and check)
+ path := d.Path
+ if strings.HasPrefix(d.Path, root.Path) {
+ path = d.Path[len(root.Path):]
+ }
+ // remove trailing separator if any - path must be relative
+ if len(path) > 0 && path[0] == filepath.Separator {
+ path = path[1:]
+ }
+ p.Path = path
+ p.Name = d.Name
+ p.Synopsis = d.Text
+ i++
+ }
+
+ return &DirList{maxHeight, list}
+}
diff --git a/src/cmd/godoc/doc.go b/src/cmd/godoc/doc.go
new file mode 100644
index 000000000..dc98b0eca
--- /dev/null
+++ b/src/cmd/godoc/doc.go
@@ -0,0 +1,130 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Godoc extracts and generates documentation for Go programs.
+
+It has two modes.
+
+Without the -http flag, it runs in command-line mode and prints plain text
+documentation to standard output and exits. If the -src flag is specified,
+godoc prints the exported interface of a package in Go source form, or the
+implementation of a specific exported language entity:
+
+ godoc fmt # documentation for package fmt
+ godoc fmt Printf # documentation for fmt.Printf
+ godoc -src fmt # fmt package interface in Go source form
+ godoc -src fmt Printf # implementation of fmt.Printf
+
+In command-line mode, the -q flag enables search queries against a godoc running
+as a webserver. If no explicit server address is specified with the -server flag,
+godoc first tries localhost:6060 and then http://golang.org.
+
+ godoc -q Reader Writer
+ godoc -q math.Sin
+ godoc -server=:6060 -q sin
+
+With the -http flag, it runs as a web server and presents the documentation as a
+web page.
+
+ godoc -http=:6060
+
+Usage:
+ godoc [flag] package [name ...]
+
+The flags are:
+ -v
+ verbose mode
+ -q
+ arguments are considered search queries: a legal query is a
+ single identifier (such as ToLower) or a qualified identifier
+ (such as math.Sin).
+ -src
+ print (exported) source in command-line mode
+ -tabwidth=4
+ width of tabs in units of spaces
+ -timestamps=true
+ show timestamps with directory listings
+ -index
+ enable identifier and full text search index
+ (no search box is shown if -index is not set)
+ -maxresults=10000
+ maximum number of full text search results shown
+ (no full text index is built if maxresults <= 0)
+ -path=""
+ additional package directories (colon-separated)
+ -html
+ print HTML in command-line mode
+ -goroot=$GOROOT
+ Go root directory
+ -http=addr
+ HTTP service address (e.g., '127.0.0.1:6060' or just ':6060')
+ -server=addr
+ webserver address for command line searches
+ -sync="command"
+ if this and -sync_minutes are set, run the argument as a
+ command every sync_minutes; it is intended to update the
+ repository holding the source files.
+ -sync_minutes=0
+ sync interval in minutes; sync is disabled if <= 0
+ -filter=""
+ filter file containing permitted package directory paths
+ -filter_minutes=0
+ filter file update interval in minutes; update is disabled if <= 0
+ -zip=""
+ zip file providing the file system to serve; disabled if empty
+
+The -path flag accepts a list of colon-separated paths; unrooted paths are relative
+to the current working directory. Each path is considered as an additional root for
+packages in order of appearance. The last (absolute) path element is the prefix for
+the package path. For instance, given the flag value:
+
+ path=".:/home/bar:/public"
+
+for a godoc started in /home/user/godoc, absolute paths are mapped to package paths
+as follows:
+
+ /home/user/godoc/x -> godoc/x
+ /home/bar/x -> bar/x
+ /public/x -> public/x
+
+Paths provided via -path may point to very large file systems that contain
+non-Go files. Creating the subtree of directories with Go packages may take
+a long amount of time. A file containing newline-separated directory paths
+may be provided with the -filter flag; if it exists, only directories
+on those paths are considered. If -filter_minutes is set, the filter_file is
+updated regularly by walking the entire directory tree.
+
+When godoc runs as a web server and -index is set, a search index is maintained.
+The index is created at startup and is automatically updated every time the
+-sync command terminates with exit status 0, indicating that files have changed.
+
+If the sync exit status is 1, godoc assumes that it succeeded without errors
+but that no files changed; the index is not updated in this case.
+
+In all other cases, sync is assumed to have failed and godoc backs off running
+sync exponentially (up to 1 day). As soon as sync succeeds again (exit status 0
+or 1), the normal sync rhythm is re-established.
+
+The index contains both identifier and full text search information (searchable
+via regular expressions). The maximum number of full text search results shown
+can be set with the -maxresults flag; if set to 0, no full text results are
+shown, and only an identifier index but no full text search index is created.
+
+By default, godoc serves files from the file system of the underlying OS.
+Instead, a .zip file may be provided via the -zip flag, which contains
+the file system to serve. The file paths stored in the .zip file must use
+slash ('/') as path separator; and they must be unrooted. $GOROOT (or -goroot)
+must be set to the .zip file directory path containing the Go root directory.
+For instance, for a .zip file created by the command:
+
+ zip go.zip $HOME/go
+
+one may run godoc as follows:
+
+ godoc -http=:6060 -zip=go.zip -goroot=$HOME/go
+
+*/
+package documentation
diff --git a/src/cmd/godoc/filesystem.go b/src/cmd/godoc/filesystem.go
new file mode 100644
index 000000000..a68c08592
--- /dev/null
+++ b/src/cmd/godoc/filesystem.go
@@ -0,0 +1,104 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines types for abstract file system access and
+// provides an implementation accessing the file system of the
+// underlying OS.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+// The FileInfo interface provides access to file information.
+type FileInfo interface {
+ Name() string
+ Size() int64
+ Mtime_ns() int64
+ IsRegular() bool
+ IsDirectory() bool
+}
+
+// The FileSystem interface specifies the methods godoc is using
+// to access the file system for which it serves documentation.
+type FileSystem interface {
+ Open(path string) (io.ReadCloser, os.Error)
+ Lstat(path string) (FileInfo, os.Error)
+ Stat(path string) (FileInfo, os.Error)
+ ReadDir(path string) ([]FileInfo, os.Error)
+ ReadFile(path string) ([]byte, os.Error)
+}
+
+// ----------------------------------------------------------------------------
+// OS-specific FileSystem implementation
+
+var OS FileSystem = osFS{}
+
+// osFI is the OS-specific implementation of FileInfo.
+type osFI struct {
+ *os.FileInfo
+}
+
+func (fi osFI) Name() string {
+ return fi.FileInfo.Name
+}
+
+func (fi osFI) Size() int64 {
+ if fi.IsDirectory() {
+ return 0
+ }
+ return fi.FileInfo.Size
+}
+
+func (fi osFI) Mtime_ns() int64 {
+ return fi.FileInfo.Mtime_ns
+}
+
+// osFS is the OS-specific implementation of FileSystem
+type osFS struct{}
+
+func (osFS) Open(path string) (io.ReadCloser, os.Error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDirectory() {
+ return nil, fmt.Errorf("Open: %s is a directory", path)
+ }
+ return f, nil
+}
+
+func (osFS) Lstat(path string) (FileInfo, os.Error) {
+ fi, err := os.Lstat(path)
+ return osFI{fi}, err
+}
+
+func (osFS) Stat(path string) (FileInfo, os.Error) {
+ fi, err := os.Stat(path)
+ return osFI{fi}, err
+}
+
+func (osFS) ReadDir(path string) ([]FileInfo, os.Error) {
+ l0, err := ioutil.ReadDir(path) // l0 is sorted
+ if err != nil {
+ return nil, err
+ }
+ l1 := make([]FileInfo, len(l0))
+ for i, e := range l0 {
+ l1[i] = osFI{e}
+ }
+ return l1, nil
+}
+
+func (osFS) ReadFile(path string) ([]byte, os.Error) {
+ return ioutil.ReadFile(path)
+}
diff --git a/src/cmd/godoc/format.go b/src/cmd/godoc/format.go
new file mode 100644
index 000000000..78dde4166
--- /dev/null
+++ b/src/cmd/godoc/format.go
@@ -0,0 +1,358 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FormatSelections and FormatText.
+// FormatText is used to HTML-format Go and non-Go source
+// text with line numbers and highlighted sections. It is
+// built on top of FormatSelections, a generic formatter
+// for "selected" text.
+
+package main
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "io"
+ "regexp"
+ "strconv"
+ "template"
+)
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatSelections
+
+// A Selection is a function returning offset pairs []int{a, b}
+// describing consecutive non-overlapping text segments [a, b).
+// If there are no more segments, a Selection must return nil.
+//
+// TODO It's more efficient to return a pair (a, b int) instead
+// of creating lots of slices. Need to determine how to
+// indicate the end of a Selection.
+//
+type Selection func() []int
+
+// A LinkWriter writes some start or end "tag" to w for the text offset offs.
+// It is called by FormatSelections at the start or end of each link segment.
+//
+type LinkWriter func(w io.Writer, offs int, start bool)
+
+// A SegmentWriter formats a text according to selections and writes it to w.
+// The selections parameter is a bit set indicating which selections provided
+// to FormatSelections overlap with the text segment: If the n'th bit is set
+// in selections, the n'th selection provided to FormatSelections is overlapping
+// with the text.
+//
+type SegmentWriter func(w io.Writer, text []byte, selections int)
+
+// FormatSelections takes a text and writes it to w using link and segment
+// writers lw and sw as follows: lw is invoked for consecutive segment starts
+// and ends as specified through the links selection, and sw is invoked for
+// consecutive segments of text overlapped by the same selections as specified
+// by selections. The link writer lw may be nil, in which case the links
+// Selection is ignored.
+//
+func FormatSelections(w io.Writer, text []byte, lw LinkWriter, links Selection, sw SegmentWriter, selections ...Selection) {
+ if lw != nil {
+ selections = append(selections, links)
+ }
+
+ // compute the sequence of consecutive segment changes
+ changes := newMerger(selections)
+
+ // The i'th bit in bitset indicates that the text
+ // at the current offset is covered by selections[i].
+ bitset := 0
+ lastOffs := 0
+
+ // Text segments are written in a delayed fashion
+ // such that consecutive segments belonging to the
+ // same selection can be combined (peephole optimization).
+ // last describes the last segment which has not yet been written.
+ var last struct {
+ begin, end int // valid if begin < end
+ bitset int
+ }
+
+ // flush writes the last delayed text segment
+ flush := func() {
+ if last.begin < last.end {
+ sw(w, text[last.begin:last.end], last.bitset)
+ }
+ last.begin = last.end // invalidate last
+ }
+
+ // segment runs the segment [lastOffs, end) with the selection
+ // indicated by bitset through the segment peephole optimizer.
+ segment := func(end int) {
+ if lastOffs < end { // ignore empty segments
+ if last.end != lastOffs || last.bitset != bitset {
+ // the last segment is not adjacent to or
+ // differs from the new one
+ flush()
+ // start a new segment
+ last.begin = lastOffs
+ }
+ last.end = end
+ last.bitset = bitset
+ }
+ }
+
+ for {
+ // get the next segment change
+ index, offs, start := changes.next()
+ if index < 0 || offs > len(text) {
+ // no more segment changes or the next change
+ // is past the end of the text - we're done
+ break
+ }
+ // determine the kind of segment change
+ if index == len(selections)-1 {
+ // we have a link segment change:
+ // format the previous selection segment, write the
+ // link tag and start a new selection segment
+ segment(offs)
+ flush()
+ lastOffs = offs
+ lw(w, offs, start)
+ } else {
+ // we have a selection change:
+ // format the previous selection segment, determine
+ // the new selection bitset and start a new segment
+ segment(offs)
+ lastOffs = offs
+ mask := 1 << uint(index)
+ if start {
+ bitset |= mask
+ } else {
+ bitset &^= mask
+ }
+ }
+ }
+ segment(len(text))
+ flush()
+}
+
+// A merger merges a slice of Selections and produces a sequence of
+// consecutive segment change events through repeated next() calls.
+//
+type merger struct {
+ selections []Selection
+ segments [][]int // segments[i] is the next segment of selections[i]
+}
+
+const infinity int = 2e9
+
+func newMerger(selections []Selection) *merger {
+ segments := make([][]int, len(selections))
+ for i, sel := range selections {
+ segments[i] = []int{infinity, infinity}
+ if sel != nil {
+ if seg := sel(); seg != nil {
+ segments[i] = seg
+ }
+ }
+ }
+ return &merger{selections, segments}
+}
+
+// next returns the next segment change: index specifies the Selection
+// to which the segment belongs, offs is the segment start or end offset
+// as determined by the start value. If there are no more segment changes,
+// next returns an index value < 0.
+//
+func (m *merger) next() (index, offs int, start bool) {
+ // find the next smallest offset where a segment starts or ends
+ offs = infinity
+ index = -1
+ for i, seg := range m.segments {
+ switch {
+ case seg[0] < offs:
+ offs = seg[0]
+ index = i
+ start = true
+ case seg[1] < offs:
+ offs = seg[1]
+ index = i
+ start = false
+ }
+ }
+ if index < 0 {
+ // no offset found => all selections merged
+ return
+ }
+ // offset found - it's either the start or end offset but
+ // either way it is ok to consume the start offset: set it
+ // to infinity so it won't be considered in the following
+ // next call
+ m.segments[index][0] = infinity
+ if start {
+ return
+ }
+ // end offset found - consume it
+ m.segments[index][1] = infinity
+ // advance to the next segment for that selection
+ seg := m.selections[index]()
+ if seg == nil {
+ return
+ }
+ m.segments[index] = seg
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Implementation of FormatText
+
+// lineSelection returns the line segments for text as a Selection.
+func lineSelection(text []byte) Selection {
+ i, j := 0, 0
+ return func() (seg []int) {
+ // find next newline, if any
+ for j < len(text) {
+ j++
+ if text[j-1] == '\n' {
+ break
+ }
+ }
+ if i < j {
+ // text[i:j] constitutes a line
+ seg = []int{i, j}
+ i = j
+ }
+ return
+ }
+}
+
+// commentSelection returns the sequence of consecutive comments
+// in the Go src text as a Selection.
+//
+func commentSelection(src []byte) Selection {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+ s.Init(file, src, nil, scanner.ScanComments+scanner.InsertSemis)
+ return func() (seg []int) {
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ offs := file.Offset(pos)
+ if tok == token.COMMENT {
+ seg = []int{offs, offs + len(lit)}
+ break
+ }
+ }
+ return
+ }
+}
+
+// makeSelection is a helper function to make a Selection from a slice of pairs.
+func makeSelection(matches [][]int) Selection {
+ return func() (seg []int) {
+ if len(matches) > 0 {
+ seg = matches[0]
+ matches = matches[1:]
+ }
+ return
+ }
+}
+
+// regexpSelection computes the Selection for the regular expression expr in text.
+func regexpSelection(text []byte, expr string) Selection {
+ var matches [][]int
+ if rx, err := regexp.Compile(expr); err == nil {
+ matches = rx.FindAllIndex(text, -1)
+ }
+ return makeSelection(matches)
+}
+
+var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
+
+// rangeSelection computes the Selection for a text range described
+// by the argument str; the range description must match the selRx
+// regular expression.
+//
+func rangeSelection(str string) Selection {
+ m := selRx.FindStringSubmatch(str)
+ if len(m) >= 2 {
+ from, _ := strconv.Atoi(m[1])
+ to, _ := strconv.Atoi(m[2])
+ if from < to {
+ return makeSelection([][]int{{from, to}})
+ }
+ }
+ return nil
+}
+
+// Span tags for all the possible selection combinations that may
+// be generated by FormatText. Selections are indicated by a bitset,
+// and the value of the bitset specifies the tag to be used.
+//
+// bit 0: comments
+// bit 1: highlights
+// bit 2: selections
+//
+var startTags = [][]byte{
+ /* 000 */ []byte(``),
+ /* 001 */ []byte(`<span class="comment">`),
+ /* 010 */ []byte(`<span class="highlight">`),
+ /* 011 */ []byte(`<span class="highlight-comment">`),
+ /* 100 */ []byte(`<span class="selection">`),
+ /* 101 */ []byte(`<span class="selection-comment">`),
+ /* 110 */ []byte(`<span class="selection-highlight">`),
+ /* 111 */ []byte(`<span class="selection-highlight-comment">`),
+}
+
+var endTag = []byte(`</span>`)
+
+func selectionTag(w io.Writer, text []byte, selections int) {
+ if selections < len(startTags) {
+ if tag := startTags[selections]; len(tag) > 0 {
+ w.Write(tag)
+ template.HTMLEscape(w, text)
+ w.Write(endTag)
+ return
+ }
+ }
+ template.HTMLEscape(w, text)
+}
+
+// FormatText HTML-escapes text and writes it to w.
+// Consecutive text segments are wrapped in HTML spans (with tags as
+// defined by startTags and endTag) as follows:
+//
+// - if line >= 0, line number (ln) spans are inserted before each line,
+// starting with the value of line
+// - if the text is Go source, comments get the "comment" span class
+// - each occurrence of the regular expression pattern gets the "highlight"
+// span class
+// - text segments covered by selection get the "selection" span class
+//
+// Comments, highlights, and selections may overlap arbitrarily; the respective
+// HTML span classes are specified in the startTags variable.
+//
+func FormatText(w io.Writer, text []byte, line int, goSource bool, pattern string, selection Selection) {
+ var comments, highlights Selection
+ if goSource {
+ comments = commentSelection(text)
+ }
+ if pattern != "" {
+ highlights = regexpSelection(text, pattern)
+ }
+ if line >= 0 || comments != nil || highlights != nil || selection != nil {
+ var lineTag LinkWriter
+ if line >= 0 {
+ lineTag = func(w io.Writer, _ int, start bool) {
+ if start {
+ fmt.Fprintf(w, "<a id=\"L%d\"></a><span class=\"ln\">%6d</span>\t", line, line)
+ line++
+ }
+ }
+ }
+ FormatSelections(w, text, lineTag, lineSelection(text), selectionTag, comments, highlights, selection)
+ } else {
+ template.HTMLEscape(w, text)
+ }
+}
diff --git a/src/cmd/godoc/godoc.go b/src/cmd/godoc/godoc.go
new file mode 100644
index 000000000..b8a839404
--- /dev/null
+++ b/src/cmd/godoc/godoc.go
@@ -0,0 +1,1159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/doc"
+ "go/printer"
+ "go/token"
+ "http"
+ "io"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "template"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Globals
+
+type delayTime struct {
+ RWValue
+}
+
+func (dt *delayTime) backoff(max int) {
+ dt.mutex.Lock()
+ v := dt.value.(int) * 2
+ if v > max {
+ v = max
+ }
+ dt.value = v
+ // don't change dt.timestamp - calling backoff indicates an error condition
+ dt.mutex.Unlock()
+}
+
+var (
+ verbose = flag.Bool("v", false, "verbose mode")
+
+ // file system roots
+ // TODO(gri) consider the invariant that goroot always end in '/'
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+
+ // layout control
+ tabwidth = flag.Int("tabwidth", 4, "tab width")
+ showTimestamps = flag.Bool("timestamps", true, "show timestamps with directory listings")
+ templateDir = flag.String("templates", "", "directory containing alternate template files")
+
+ // search index
+ indexEnabled = flag.Bool("index", false, "enable search index")
+ maxResults = flag.Int("maxresults", 10000, "maximum number of full text search results shown")
+
+ // file system mapping
+ fs FileSystem // the underlying file system for godoc
+ fsHttp http.FileSystem // the underlying file system for http
+ fsMap Mapping // user-defined mapping
+ fsTree RWValue // *Directory tree of packages, updated with each sync
+ pathFilter RWValue // filter used when building fsMap directory trees
+ fsModified RWValue // timestamp of last call to invalidateIndex
+
+ // http handlers
+ fileServer http.Handler // default file server
+ cmdHandler httpHandler
+ pkgHandler httpHandler
+)
+
+func initHandlers() {
+ paths := filepath.SplitList(*pkgPath)
+ for _, t := range build.Path {
+ if t.Goroot {
+ continue
+ }
+ paths = append(paths, t.SrcDir())
+ }
+ fsMap.Init(paths)
+
+ fileServer = http.FileServer(fsHttp)
+ cmdHandler = httpHandler{"/cmd/", filepath.Join(*goroot, "src", "cmd"), false}
+ pkgHandler = httpHandler{"/pkg/", filepath.Join(*goroot, "src", "pkg"), true}
+}
+
+func registerPublicHandlers(mux *http.ServeMux) {
+ mux.Handle(cmdHandler.pattern, &cmdHandler)
+ mux.Handle(pkgHandler.pattern, &pkgHandler)
+ mux.HandleFunc("/doc/codewalk/", codewalk)
+ mux.HandleFunc("/search", search)
+ mux.Handle("/robots.txt", fileServer)
+ mux.HandleFunc("/", serveFile)
+}
+
+func initFSTree() {
+ fsTree.set(newDirectory(filepath.Join(*goroot, *testDir), nil, -1))
+ invalidateIndex()
+}
+
+// ----------------------------------------------------------------------------
+// Directory filters
+
+// isParentOf returns true if p is a parent of (or the same as) q
+// where p and q are directory paths.
+func isParentOf(p, q string) bool {
+ n := len(p)
+ return strings.HasPrefix(q, p) && (len(q) <= n || q[n] == '/')
+}
+
+func setPathFilter(list []string) {
+ if len(list) == 0 {
+ pathFilter.set(nil)
+ return
+ }
+
+ // len(list) > 0
+ pathFilter.set(func(path string) bool {
+ // list is sorted in increasing order and for each path all its children are removed
+ i := sort.Search(len(list), func(i int) bool { return list[i] > path })
+ // Now we have list[i-1] <= path < list[i].
+ // Path may be a child of list[i-1] or a parent of list[i].
+ return i > 0 && isParentOf(list[i-1], path) || i < len(list) && isParentOf(path, list[i])
+ })
+}
+
+func getPathFilter() func(string) bool {
+ f, _ := pathFilter.get()
+ if f != nil {
+ return f.(func(string) bool)
+ }
+ return nil
+}
+
+// readDirList reads a file containing a newline-separated list
+// of directory paths and returns the list of paths.
+func readDirList(filename string) ([]string, os.Error) {
+ contents, err := fs.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ // create a sorted list of valid directory names
+ filter := func(path string) bool {
+ d, e := fs.Lstat(path)
+ if e != nil && err == nil {
+ // remember first error and return it from readDirList
+ // so we have at least some information if things go bad
+ err = e
+ }
+ return e == nil && isPkgDir(d)
+ }
+ list := canonicalizePaths(strings.Split(string(contents), "\n"), filter)
+ // for each parent path, remove all its children q
+ // (requirement for binary search to work when filtering)
+ i := 0
+ for _, q := range list {
+ if i == 0 || !isParentOf(list[i-1], q) {
+ list[i] = q
+ i++
+ }
+ }
+ return list[0:i], err
+}
+
+// updateMappedDirs computes the directory tree for
+// each user-defined file system mapping. If a filter
+// is provided, it is used to filter directories.
+//
+func updateMappedDirs(filter func(string) bool) {
+ if !fsMap.IsEmpty() {
+ fsMap.Iterate(func(path string, value *RWValue) bool {
+ value.set(newDirectory(path, filter, -1))
+ return true
+ })
+ invalidateIndex()
+ }
+}
+
+func updateFilterFile() {
+ updateMappedDirs(nil) // no filter for accuracy
+
+ // collect directory tree leaf node paths
+ var buf bytes.Buffer
+ fsMap.Iterate(func(_ string, value *RWValue) bool {
+ v, _ := value.get()
+ if v != nil && v.(*Directory) != nil {
+ v.(*Directory).writeLeafs(&buf)
+ }
+ return true
+ })
+
+ // update filter file
+ if err := writeFileAtomically(*filter, buf.Bytes()); err != nil {
+ log.Printf("writeFileAtomically(%s): %s", *filter, err)
+ filterDelay.backoff(24 * 60) // back off exponentially, but try at least once a day
+ } else {
+ filterDelay.set(*filterMin) // revert to regular filter update schedule
+ }
+}
+
+func initDirTrees() {
+ // setup initial path filter
+ if *filter != "" {
+ list, err := readDirList(*filter)
+ if err != nil {
+ log.Printf("readDirList(%s): %s", *filter, err)
+ }
+ if *verbose || len(list) == 0 {
+ log.Printf("found %d directory paths in file %s", len(list), *filter)
+ }
+ setPathFilter(list)
+ }
+
+ go updateMappedDirs(getPathFilter()) // use filter for speed
+
+ // start filter update goroutine, if enabled.
+ if *filter != "" && *filterMin > 0 {
+ filterDelay.set(*filterMin) // initial filter update delay
+ go func() {
+ for {
+ if *verbose {
+ log.Printf("start update of %s", *filter)
+ }
+ updateFilterFile()
+ delay, _ := filterDelay.get()
+ if *verbose {
+ log.Printf("next filter update in %dmin", delay.(int))
+ }
+ time.Sleep(int64(delay.(int)) * 60e9)
+ }
+ }()
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Path mapping
+
+// Absolute paths are file system paths (backslash-separated on Windows),
+// but relative paths are always slash-separated.
+
+func absolutePath(relpath, defaultRoot string) string {
+ abspath := fsMap.ToAbsolute(relpath)
+ if abspath == "" {
+ // no user-defined mapping found; use default mapping
+ abspath = filepath.Join(defaultRoot, filepath.FromSlash(relpath))
+ }
+ return abspath
+}
+
+func relativeURL(abspath string) string {
+ relpath := fsMap.ToRelative(abspath)
+ if relpath == "" {
+ // prefix must end in a path separator
+ prefix := *goroot
+ if len(prefix) > 0 && prefix[len(prefix)-1] != filepath.Separator {
+ prefix += string(filepath.Separator)
+ }
+ if strings.HasPrefix(abspath, prefix) {
+ // no user-defined mapping found; use default mapping
+ relpath = filepath.ToSlash(abspath[len(prefix):])
+ }
+ }
+ // Only if path is an invalid absolute path is relpath == ""
+ // at this point. This should never happen since absolute paths
+ // are only created via godoc for files that do exist. However,
+ // it is ok to return ""; it will simply provide a link to the
+ // top of the pkg or src directories.
+ return relpath
+}
+
+// ----------------------------------------------------------------------------
+// Tab conversion
+
+var spaces = []byte(" ") // 32 spaces seems like a good number
+
+const (
+ indenting = iota
+ collecting
+)
+
+// A tconv is an io.Writer filter for converting leading tabs into spaces.
+type tconv struct {
+ output io.Writer
+ state int // indenting or collecting
+ indent int // valid if state == indenting
+}
+
+func (p *tconv) writeIndent() (err os.Error) {
+ i := p.indent
+ for i >= len(spaces) {
+ i -= len(spaces)
+ if _, err = p.output.Write(spaces); err != nil {
+ return
+ }
+ }
+ // i < len(spaces)
+ if i > 0 {
+ _, err = p.output.Write(spaces[0:i])
+ }
+ return
+}
+
+func (p *tconv) Write(data []byte) (n int, err os.Error) {
+ if len(data) == 0 {
+ return
+ }
+ pos := 0 // valid if p.state == collecting
+ var b byte
+ for n, b = range data {
+ switch p.state {
+ case indenting:
+ switch b {
+ case '\t':
+ p.indent += *tabwidth
+ case '\n':
+ p.indent = 0
+ if _, err = p.output.Write(data[n : n+1]); err != nil {
+ return
+ }
+ case ' ':
+ p.indent++
+ default:
+ p.state = collecting
+ pos = n
+ if err = p.writeIndent(); err != nil {
+ return
+ }
+ }
+ case collecting:
+ if b == '\n' {
+ p.state = indenting
+ p.indent = 0
+ if _, err = p.output.Write(data[pos : n+1]); err != nil {
+ return
+ }
+ }
+ }
+ }
+ n = len(data)
+ if pos < n && p.state == collecting {
+ _, err = p.output.Write(data[pos:])
+ }
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Templates
+
+// Write an AST node to w.
+func writeNode(w io.Writer, fset *token.FileSet, x interface{}) {
+ // convert trailing tabs into spaces using a tconv filter
+ // to ensure a good outcome in most browsers (there may still
+ // be tabs in comments and strings, but converting those into
+ // the right number of spaces is much harder)
+ //
+ // TODO(gri) rethink printer flags - perhaps tconv can be eliminated
+ // with an another printer mode (which is more efficiently
+ // implemented in the printer than here with another layer)
+ mode := printer.TabIndent | printer.UseSpaces
+ (&printer.Config{mode, *tabwidth}).Fprint(&tconv{output: w}, fset, x)
+}
+
+func filenameFunc(path string) string {
+ _, localname := filepath.Split(path)
+ return localname
+}
+
+func fileInfoNameFunc(fi FileInfo) string {
+ name := fi.Name()
+ if fi.IsDirectory() {
+ name += "/"
+ }
+ return name
+}
+
+func fileInfoTimeFunc(fi FileInfo) string {
+ if t := fi.Mtime_ns(); t != 0 {
+ return time.SecondsToLocalTime(t / 1e9).String()
+ }
+ return "" // don't return epoch if time is obviously not set
+}
+
+// The strings in infoKinds must be properly html-escaped.
+var infoKinds = [nKinds]string{
+ PackageClause: "package&nbsp;clause",
+ ImportDecl: "import&nbsp;decl",
+ ConstDecl: "const&nbsp;decl",
+ TypeDecl: "type&nbsp;decl",
+ VarDecl: "var&nbsp;decl",
+ FuncDecl: "func&nbsp;decl",
+ MethodDecl: "method&nbsp;decl",
+ Use: "use",
+}
+
+func infoKind_htmlFunc(kind SpotKind) string {
+ return infoKinds[kind] // infoKind entries are html-escaped
+}
+
+func infoLineFunc(info SpotInfo) int {
+ line := info.Lori()
+ if info.IsIndex() {
+ index, _ := searchIndex.get()
+ if index != nil {
+ line = index.(*Index).Snippet(line).Line
+ } else {
+ // no line information available because
+ // we don't have an index - this should
+ // never happen; be conservative and don't
+ // crash
+ line = 0
+ }
+ }
+ return line
+}
+
+func infoSnippet_htmlFunc(info SpotInfo) string {
+ if info.IsIndex() {
+ index, _ := searchIndex.get()
+ // Snippet.Text was HTML-escaped when it was generated
+ return index.(*Index).Snippet(info.Lori()).Text
+ }
+ return `<span class="alert">no snippet text available</span>`
+}
+
+func nodeFunc(node interface{}, fset *token.FileSet) string {
+ var buf bytes.Buffer
+ writeNode(&buf, fset, node)
+ return buf.String()
+}
+
+func node_htmlFunc(node interface{}, fset *token.FileSet) string {
+ var buf1 bytes.Buffer
+ writeNode(&buf1, fset, node)
+ var buf2 bytes.Buffer
+ FormatText(&buf2, buf1.Bytes(), -1, true, "", nil)
+ return buf2.String()
+}
+
+func comment_htmlFunc(comment string) string {
+ var buf bytes.Buffer
+ // TODO(gri) Provide list of words (e.g. function parameters)
+ // to be emphasized by ToHTML.
+ doc.ToHTML(&buf, []byte(comment), nil) // does html-escaping
+ return buf.String()
+}
+
+func pkgLinkFunc(path string) string {
+ relpath := relativeURL(path)
+ // because of the irregular mapping under goroot
+ // we need to correct certain relative paths
+ if strings.HasPrefix(relpath, "src/pkg/") {
+ relpath = relpath[len("src/pkg/"):]
+ }
+ return pkgHandler.pattern[1:] + relpath // remove trailing '/' for relative URL
+}
+
+func posLink_urlFunc(node ast.Node, fset *token.FileSet) string {
+ var relpath string
+ var line int
+ var low, high int // selection
+
+ if p := node.Pos(); p.IsValid() {
+ pos := fset.Position(p)
+ relpath = relativeURL(pos.Filename)
+ line = pos.Line
+ low = pos.Offset
+ }
+ if p := node.End(); p.IsValid() {
+ high = fset.Position(p).Offset
+ }
+
+ var buf bytes.Buffer
+ template.HTMLEscape(&buf, []byte(relpath))
+ // selection ranges are of form "s=low:high"
+ if low < high {
+ fmt.Fprintf(&buf, "?s=%d:%d", low, high) // no need for URL escaping
+ // if we have a selection, position the page
+ // such that the selection is a bit below the top
+ line -= 10
+ if line < 1 {
+ line = 1
+ }
+ }
+ // line id's in html-printed source are of the
+ // form "L%d" where %d stands for the line number
+ if line > 0 {
+ fmt.Fprintf(&buf, "#L%d", line) // no need for URL escaping
+ }
+
+ return buf.String()
+}
+
+// fmap describes the template functions installed with all godoc templates.
+// Convention: template function names ending in "_html" or "_url" produce
+// HTML- or URL-escaped strings; all other function results may
+// require explicit escaping in the template.
+var fmap = template.FuncMap{
+ // various helpers
+ "filename": filenameFunc,
+ "repeat": strings.Repeat,
+
+ // accss to FileInfos (directory listings)
+ "fileInfoName": fileInfoNameFunc,
+ "fileInfoTime": fileInfoTimeFunc,
+
+ // access to search result information
+ "infoKind_html": infoKind_htmlFunc,
+ "infoLine": infoLineFunc,
+ "infoSnippet_html": infoSnippet_htmlFunc,
+
+ // formatting of AST nodes
+ "node": nodeFunc,
+ "node_html": node_htmlFunc,
+ "comment_html": comment_htmlFunc,
+
+ // support for URL attributes
+ "pkgLink": pkgLinkFunc,
+ "srcLink": relativeURL,
+ "posLink_url": posLink_urlFunc,
+}
+
+func readTemplate(name string) *template.Template {
+ path := filepath.Join(*goroot, "lib", "godoc", name)
+ if *templateDir != "" {
+ defaultpath := path
+ path = filepath.Join(*templateDir, name)
+ if _, err := fs.Stat(path); err != nil {
+ log.Print("readTemplate:", err)
+ path = defaultpath
+ }
+ }
+ return template.Must(template.New(name).Funcs(fmap).ParseFile(path))
+}
+
+var (
+ codewalkHTML,
+ codewalkdirHTML,
+ dirlistHTML,
+ errorHTML,
+ godocHTML,
+ packageHTML,
+ packageText,
+ searchHTML,
+ searchText *template.Template
+)
+
+func readTemplates() {
+ // have to delay until after flags processing since paths depend on goroot
+ codewalkHTML = readTemplate("codewalk.html")
+ codewalkdirHTML = readTemplate("codewalkdir.html")
+ dirlistHTML = readTemplate("dirlist.html")
+ errorHTML = readTemplate("error.html")
+ godocHTML = readTemplate("godoc.html")
+ packageHTML = readTemplate("package.html")
+ packageText = readTemplate("package.txt")
+ searchHTML = readTemplate("search.html")
+ searchText = readTemplate("search.txt")
+}
+
+// ----------------------------------------------------------------------------
+// Generic HTML wrapper
+
+func servePage(w http.ResponseWriter, title, subtitle, query string, content []byte) {
+ d := struct {
+ Title string
+ Subtitle string
+ PkgRoots []string
+ SearchBox bool
+ Query string
+ Version string
+ Menu []byte
+ Content []byte
+ }{
+ title,
+ subtitle,
+ fsMap.PrefixList(),
+ *indexEnabled,
+ query,
+ runtime.Version(),
+ nil,
+ content,
+ }
+
+ if err := godocHTML.Execute(w, &d); err != nil {
+ log.Printf("godocHTML.Execute: %s", err)
+ }
+}
+
+func serveText(w http.ResponseWriter, text []byte) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Write(text)
+}
+
+// ----------------------------------------------------------------------------
+// Files
+
+var (
+ titleRx = regexp.MustCompile(`<!-- title ([^\-]*)-->`)
+ subtitleRx = regexp.MustCompile(`<!-- subtitle ([^\-]*)-->`)
+ firstCommentRx = regexp.MustCompile(`<!--([^\-]*)-->`)
+)
+
+func extractString(src []byte, rx *regexp.Regexp) (s string) {
+ m := rx.FindSubmatch(src)
+ if m != nil {
+ s = strings.TrimSpace(string(m[1]))
+ }
+ return
+}
+
+func serveHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
+ // get HTML body contents
+ src, err := fs.ReadFile(abspath)
+ if err != nil {
+ log.Printf("ReadFile: %s", err)
+ serveError(w, r, relpath, err)
+ return
+ }
+
+ // if it begins with "<!DOCTYPE " assume it is standalone
+ // html that doesn't need the template wrapping.
+ if bytes.HasPrefix(src, []byte("<!DOCTYPE ")) {
+ w.Write(src)
+ return
+ }
+
+ // if it's the language spec, add tags to EBNF productions
+ if strings.HasSuffix(abspath, "go_spec.html") {
+ var buf bytes.Buffer
+ linkify(&buf, src)
+ src = buf.Bytes()
+ }
+
+ // get title and subtitle, if any
+ title := extractString(src, titleRx)
+ if title == "" {
+ // no title found; try first comment for backward-compatibility
+ title = extractString(src, firstCommentRx)
+ }
+ subtitle := extractString(src, subtitleRx)
+
+ servePage(w, title, subtitle, "", src)
+}
+
+func applyTemplate(t *template.Template, name string, data interface{}) []byte {
+ var buf bytes.Buffer
+ if err := t.Execute(&buf, data); err != nil {
+ log.Printf("%s.Execute: %s", name, err)
+ }
+ return buf.Bytes()
+}
+
+func redirect(w http.ResponseWriter, r *http.Request) (redirected bool) {
+ if canonical := path.Clean(r.URL.Path) + "/"; r.URL.Path != canonical {
+ http.Redirect(w, r, canonical, http.StatusMovedPermanently)
+ redirected = true
+ }
+ return
+}
+
+func serveTextFile(w http.ResponseWriter, r *http.Request, abspath, relpath, title string) {
+ src, err := fs.ReadFile(abspath)
+ if err != nil {
+ log.Printf("ReadFile: %s", err)
+ serveError(w, r, relpath, err)
+ return
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("<pre>")
+ FormatText(&buf, src, 1, filepath.Ext(abspath) == ".go", r.FormValue("h"), rangeSelection(r.FormValue("s")))
+ buf.WriteString("</pre>")
+
+ servePage(w, title+" "+relpath, "", "", buf.Bytes())
+}
+
+func serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
+ if redirect(w, r) {
+ return
+ }
+
+ list, err := fs.ReadDir(abspath)
+ if err != nil {
+ log.Printf("ReadDir: %s", err)
+ serveError(w, r, relpath, err)
+ return
+ }
+
+ contents := applyTemplate(dirlistHTML, "dirlistHTML", list)
+ servePage(w, "Directory "+relpath, "", "", contents)
+}
+
+func serveFile(w http.ResponseWriter, r *http.Request) {
+ relpath := r.URL.Path[1:] // serveFile URL paths start with '/'
+ abspath := absolutePath(relpath, *goroot)
+
+ // pick off special cases and hand the rest to the standard file server
+ switch r.URL.Path {
+ case "/":
+ serveHTMLDoc(w, r, filepath.Join(*goroot, "doc", "root.html"), "doc/root.html")
+ return
+
+ case "/doc/root.html":
+ // hide landing page from its real name
+ http.Redirect(w, r, "/", http.StatusMovedPermanently)
+ return
+ }
+
+ switch path.Ext(relpath) {
+ case ".html":
+ if strings.HasSuffix(relpath, "/index.html") {
+ // We'll show index.html for the directory.
+ // Use the dir/ version as canonical instead of dir/index.html.
+ http.Redirect(w, r, r.URL.Path[0:len(r.URL.Path)-len("index.html")], http.StatusMovedPermanently)
+ return
+ }
+ serveHTMLDoc(w, r, abspath, relpath)
+ return
+
+ case ".go":
+ serveTextFile(w, r, abspath, relpath, "Source file")
+ return
+ }
+
+ dir, err := fs.Lstat(abspath)
+ if err != nil {
+ log.Print(err)
+ serveError(w, r, relpath, err)
+ return
+ }
+
+ if dir != nil && dir.IsDirectory() {
+ if redirect(w, r) {
+ return
+ }
+ if index := filepath.Join(abspath, "index.html"); isTextFile(index) {
+ serveHTMLDoc(w, r, index, relativeURL(index))
+ return
+ }
+ serveDirectory(w, r, abspath, relpath)
+ return
+ }
+
+ if isTextFile(abspath) {
+ serveTextFile(w, r, abspath, relpath, "Text file")
+ return
+ }
+
+ fileServer.ServeHTTP(w, r)
+}
+
+// ----------------------------------------------------------------------------
+// Packages
+
+// Fake package file and name for commands. Contains the command documentation.
+const fakePkgFile = "doc.go"
+const fakePkgName = "documentation"
+
+// Fake relative package path for built-ins. Documentation for all globals
+// (not just exported ones) will be shown for packages in this directory.
+const builtinPkgPath = "builtin/"
+
+type PageInfoMode uint
+
+const (
+ exportsOnly PageInfoMode = 1 << iota // only keep exported stuff
+ genDoc // generate documentation
+)
+
+type PageInfo struct {
+ Dirname string // directory containing the package
+ PList []string // list of package names found
+ FSet *token.FileSet // corresponding file set
+ PAst *ast.File // nil if no single AST with package exports
+ PDoc *doc.PackageDoc // nil if no single package documentation
+ Dirs *DirList // nil if no directory information
+ DirTime int64 // directory time stamp in seconds since epoch
+ IsPkg bool // false if this is not documenting a real package
+ Err os.Error // directory read error or nil
+}
+
+func (info *PageInfo) IsEmpty() bool {
+ return info.Err != nil || info.PAst == nil && info.PDoc == nil && info.Dirs == nil
+}
+
+type httpHandler struct {
+ pattern string // url pattern; e.g. "/pkg/"
+ fsRoot string // file system root to which the pattern is mapped
+ isPkg bool // true if this handler serves real package documentation (as opposed to command documentation)
+}
+
+// getPageInfo returns the PageInfo for a package directory abspath. If the
+// parameter genAST is set, an AST containing only the package exports is
+// computed (PageInfo.PAst), otherwise package documentation (PageInfo.Doc)
+// is extracted from the AST. If there is no corresponding package in the
+// directory, PageInfo.PAst and PageInfo.PDoc are nil. If there are no sub-
+// directories, PageInfo.Dirs is nil. If a directory read error occurred,
+// PageInfo.Err is set to the respective error but the error is not logged.
+//
+func (h *httpHandler) getPageInfo(abspath, relpath, pkgname string, mode PageInfoMode) PageInfo {
+ // filter function to select the desired .go files
+ filter := func(d FileInfo) bool {
+ // If we are looking at cmd documentation, only accept
+ // the special fakePkgFile containing the documentation.
+ return isPkgFile(d) && (h.isPkg || d.Name() == fakePkgFile)
+ }
+
+ // get package ASTs
+ fset := token.NewFileSet()
+ pkgs, err := parseDir(fset, abspath, filter)
+ if err != nil && pkgs == nil {
+ // only report directory read errors, ignore parse errors
+ // (may be able to extract partial package information)
+ return PageInfo{Dirname: abspath, Err: err}
+ }
+
+ // select package
+ var pkg *ast.Package // selected package
+ var plist []string // list of other package (names), if any
+ if len(pkgs) == 1 {
+ // Exactly one package - select it.
+ for _, p := range pkgs {
+ pkg = p
+ }
+
+ } else if len(pkgs) > 1 {
+ // Multiple packages - select the best matching package: The
+ // 1st choice is the package with pkgname, the 2nd choice is
+ // the package with dirname, and the 3rd choice is a package
+ // that is not called "main" if there is exactly one such
+ // package. Otherwise, don't select a package.
+ dirpath, dirname := filepath.Split(abspath)
+
+ // If the dirname is "go" we might be in a sub-directory for
+ // .go files - use the outer directory name instead for better
+ // results.
+ if dirname == "go" {
+ _, dirname = filepath.Split(filepath.Clean(dirpath))
+ }
+
+ var choice3 *ast.Package
+ loop:
+ for _, p := range pkgs {
+ switch {
+ case p.Name == pkgname:
+ pkg = p
+ break loop // 1st choice; we are done
+ case p.Name == dirname:
+ pkg = p // 2nd choice
+ case p.Name != "main":
+ choice3 = p
+ }
+ }
+ if pkg == nil && len(pkgs) == 2 {
+ pkg = choice3
+ }
+
+ // Compute the list of other packages
+ // (excluding the selected package, if any).
+ plist = make([]string, len(pkgs))
+ i := 0
+ for name := range pkgs {
+ if pkg == nil || name != pkg.Name {
+ plist[i] = name
+ i++
+ }
+ }
+ plist = plist[0:i]
+ }
+
+ // compute package documentation
+ var past *ast.File
+ var pdoc *doc.PackageDoc
+ if pkg != nil {
+ if mode&exportsOnly != 0 {
+ ast.PackageExports(pkg)
+ }
+ if mode&genDoc != 0 {
+ pdoc = doc.NewPackageDoc(pkg, path.Clean(relpath)) // no trailing '/' in importpath
+ } else {
+ past = ast.MergePackageFiles(pkg, ast.FilterUnassociatedComments)
+ }
+ }
+
+ // get directory information
+ var dir *Directory
+ var timestamp int64
+ if tree, ts := fsTree.get(); tree != nil && tree.(*Directory) != nil {
+ // directory tree is present; lookup respective directory
+ // (may still fail if the file system was updated and the
+ // new directory tree has not yet been computed)
+ dir = tree.(*Directory).lookup(abspath)
+ timestamp = ts
+ }
+ if dir == nil {
+ // the path may refer to a user-specified file system mapped
+ // via fsMap; lookup that mapping and corresponding RWValue
+ // if any
+ var v *RWValue
+ fsMap.Iterate(func(path string, value *RWValue) bool {
+ if isParentOf(path, abspath) {
+ // mapping found
+ v = value
+ return false
+ }
+ return true
+ })
+ if v != nil {
+ // found a RWValue associated with a user-specified file
+ // system; a non-nil RWValue stores a (possibly out-of-date)
+ // directory tree for that file system
+ if tree, ts := v.get(); tree != nil && tree.(*Directory) != nil {
+ dir = tree.(*Directory).lookup(abspath)
+ timestamp = ts
+ }
+ }
+ }
+ if dir == nil {
+ // no directory tree present (too early after startup or
+ // command-line mode); compute one level for this page
+ // note: cannot use path filter here because in general
+ // it doesn't contain the fsTree path
+ dir = newDirectory(abspath, nil, 1)
+ timestamp = time.Seconds()
+ }
+
+ return PageInfo{abspath, plist, fset, past, pdoc, dir.listing(true), timestamp, h.isPkg, nil}
+}
+
+func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if redirect(w, r) {
+ return
+ }
+
+ relpath := r.URL.Path[len(h.pattern):]
+ abspath := absolutePath(relpath, h.fsRoot)
+ var mode PageInfoMode
+ if relpath != builtinPkgPath {
+ mode = exportsOnly
+ }
+ if r.FormValue("m") != "src" {
+ mode |= genDoc
+ }
+ info := h.getPageInfo(abspath, relpath, r.FormValue("p"), mode)
+ if info.Err != nil {
+ log.Print(info.Err)
+ serveError(w, r, relpath, info.Err)
+ return
+ }
+
+ if r.FormValue("f") == "text" {
+ contents := applyTemplate(packageText, "packageText", info)
+ serveText(w, contents)
+ return
+ }
+
+ var title, subtitle string
+ switch {
+ case info.PAst != nil:
+ title = "Package " + info.PAst.Name.Name
+ case info.PDoc != nil:
+ switch {
+ case info.IsPkg:
+ title = "Package " + info.PDoc.PackageName
+ case info.PDoc.PackageName == fakePkgName:
+ // assume that the directory name is the command name
+ _, pkgname := path.Split(path.Clean(relpath))
+ title = "Command " + pkgname
+ default:
+ title = "Command " + info.PDoc.PackageName
+ }
+ default:
+ title = "Directory " + relativeURL(info.Dirname)
+ if *showTimestamps {
+ subtitle = "Last update: " + time.SecondsToLocalTime(info.DirTime).String()
+ }
+ }
+
+ contents := applyTemplate(packageHTML, "packageHTML", info)
+ servePage(w, title, subtitle, "", contents)
+}
+
+// ----------------------------------------------------------------------------
+// Search
+
+var searchIndex RWValue
+
+type SearchResult struct {
+ Query string
+ Alert string // error or warning message
+
+ // identifier matches
+ Hit *LookupResult // identifier matches of Query
+ Alt *AltWords // alternative identifiers to look for
+
+ // textual matches
+ Found int // number of textual occurrences found
+ Textual []FileLines // textual matches of Query
+ Complete bool // true if all textual occurrences of Query are reported
+}
+
+func lookup(query string) (result SearchResult) {
+ result.Query = query
+
+ index, timestamp := searchIndex.get()
+ if index != nil {
+ index := index.(*Index)
+
+ // identifier search
+ var err os.Error
+ result.Hit, result.Alt, err = index.Lookup(query)
+ if err != nil && *maxResults <= 0 {
+ // ignore the error if full text search is enabled
+ // since the query may be a valid regular expression
+ result.Alert = "Error in query string: " + err.String()
+ return
+ }
+
+ // full text search
+ if *maxResults > 0 && query != "" {
+ rx, err := regexp.Compile(query)
+ if err != nil {
+ result.Alert = "Error in query regular expression: " + err.String()
+ return
+ }
+ // If we get maxResults+1 results we know that there are more than
+ // maxResults results and thus the result may be incomplete (to be
+ // precise, we should remove one result from the result set, but
+ // nobody is going to count the results on the result page).
+ result.Found, result.Textual = index.LookupRegexp(rx, *maxResults+1)
+ result.Complete = result.Found <= *maxResults
+ if !result.Complete {
+ result.Found-- // since we looked for maxResults+1
+ }
+ }
+ }
+
+ // is the result accurate?
+ if *indexEnabled {
+ if _, ts := fsModified.get(); timestamp < ts {
+ // The index is older than the latest file system change
+ // under godoc's observation. Indexing may be in progress
+ // or start shortly (see indexer()).
+ result.Alert = "Indexing in progress: result may be inaccurate"
+ }
+ } else {
+ result.Alert = "Search index disabled: no results available"
+ }
+
+ return
+}
+
+func search(w http.ResponseWriter, r *http.Request) {
+ query := strings.TrimSpace(r.FormValue("q"))
+ result := lookup(query)
+
+ if r.FormValue("f") == "text" {
+ contents := applyTemplate(searchText, "searchText", result)
+ serveText(w, contents)
+ return
+ }
+
+ var title string
+ if result.Hit != nil || len(result.Textual) > 0 {
+ title = fmt.Sprintf(`Results for query %q`, query)
+ } else {
+ title = fmt.Sprintf(`No results found for query %q`, query)
+ }
+
+ contents := applyTemplate(searchHTML, "searchHTML", result)
+ servePage(w, title, "", query, contents)
+}
+
+// ----------------------------------------------------------------------------
+// Indexer
+
+// invalidateIndex should be called whenever any of the file systems
+// under godoc's observation change so that the indexer is kicked on.
+//
+func invalidateIndex() {
+ fsModified.set(nil)
+}
+
+// indexUpToDate() returns true if the search index is not older
+// than any of the file systems under godoc's observation.
+//
+func indexUpToDate() bool {
+ _, fsTime := fsModified.get()
+ _, siTime := searchIndex.get()
+ return fsTime <= siTime
+}
+
+// feedDirnames feeds the directory names of all directories
+// under the file system given by root to channel c.
+//
+func feedDirnames(root *RWValue, c chan<- string) {
+ if dir, _ := root.get(); dir != nil {
+ for d := range dir.(*Directory).iter(false) {
+ c <- d.Path
+ }
+ }
+}
+
+// fsDirnames() returns a channel sending all directory names
+// of all the file systems under godoc's observation.
+//
+func fsDirnames() <-chan string {
+ c := make(chan string, 256) // asynchronous for fewer context switches
+ go func() {
+ feedDirnames(&fsTree, c)
+ fsMap.Iterate(func(_ string, root *RWValue) bool {
+ feedDirnames(root, c)
+ return true
+ })
+ close(c)
+ }()
+ return c
+}
+
+func indexer() {
+ for {
+ if !indexUpToDate() {
+ // index possibly out of date - make a new one
+ if *verbose {
+ log.Printf("updating index...")
+ }
+ start := time.Nanoseconds()
+ index := NewIndex(fsDirnames(), *maxResults > 0)
+ stop := time.Nanoseconds()
+ searchIndex.set(index)
+ if *verbose {
+ secs := float64((stop-start)/1e6) / 1e3
+ stats := index.Stats()
+ log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
+ secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
+ }
+ log.Printf("before GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
+ runtime.GC()
+ log.Printf("after GC: bytes = %d footprint = %d", runtime.MemStats.HeapAlloc, runtime.MemStats.Sys)
+ }
+ var delay int64 = 60 * 1e9 // by default, try every 60s
+ if *testDir != "" {
+ // in test mode, try once a second for fast startup
+ delay = 1 * 1e9
+ }
+ time.Sleep(delay)
+ }
+}
diff --git a/src/cmd/godoc/httpzip.go b/src/cmd/godoc/httpzip.go
new file mode 100644
index 000000000..cb8322ee4
--- /dev/null
+++ b/src/cmd/godoc/httpzip.go
@@ -0,0 +1,181 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides an implementation of the http.FileSystem
+// interface based on the contents of a .zip file.
+//
+// Assumptions:
+//
+// - The file paths stored in the zip file must use a slash ('/') as path
+// separator; and they must be relative (i.e., they must not start with
+// a '/' - this is usually the case if the file was created w/o special
+// options).
+// - The zip file system treats the file paths found in the zip internally
+// like absolute paths w/o a leading '/'; i.e., the paths are considered
+// relative to the root of the file system.
+// - All path arguments to file system methods are considered relative to
+// the root specified with NewHttpZipFS (even if the paths start with a '/').
+
+// TODO(gri) Should define a commonly used FileSystem API that is the same
+// for http and godoc. Then we only need one zip-file based file
+// system implementation.
+
+package main
+
+import (
+ "archive/zip"
+ "fmt"
+ "http"
+ "io"
+ "os"
+ "path"
+ "sort"
+ "strings"
+)
+
+// We cannot import syscall on app engine.
+// TODO(gri) Once we have a truly abstract FileInfo implementation
+// this won't be needed anymore.
+const (
+ S_IFDIR = 0x4000 // == syscall.S_IFDIR
+ S_IFREG = 0x8000 // == syscall.S_IFREG
+)
+
+// httpZipFile is the zip-file based implementation of http.File
+type httpZipFile struct {
+ path string // absolute path within zip FS without leading '/'
+ info os.FileInfo
+ io.ReadCloser // nil for directory
+ list zipList
+}
+
+func (f *httpZipFile) Close() os.Error {
+ if f.info.IsRegular() {
+ return f.ReadCloser.Close()
+ }
+ f.list = nil
+ return nil
+}
+
+func (f *httpZipFile) Stat() (*os.FileInfo, os.Error) {
+ return &f.info, nil
+}
+
+func (f *httpZipFile) Readdir(count int) ([]os.FileInfo, os.Error) {
+ var list []os.FileInfo
+ dirname := f.path + "/"
+ prevname := ""
+ for i, e := range f.list {
+ if count == 0 {
+ f.list = f.list[i:]
+ break
+ }
+ if !strings.HasPrefix(e.Name, dirname) {
+ f.list = nil
+ break // not in the same directory anymore
+ }
+ name := e.Name[len(dirname):] // local name
+ var mode uint32
+ var size, mtime_ns int64
+ if i := strings.IndexRune(name, '/'); i >= 0 {
+ // We infer directories from files in subdirectories.
+ // If we have x/y, return a directory entry for x.
+ name = name[0:i] // keep local directory name only
+ mode = S_IFDIR
+ // no size or mtime_ns for directories
+ } else {
+ mode = S_IFREG
+ size = int64(e.UncompressedSize)
+ mtime_ns = e.Mtime_ns()
+ }
+ // If we have x/y and x/z, don't return two directory entries for x.
+ // TODO(gri): It should be possible to do this more efficiently
+ // by determining the (fs.list) range of local directory entries
+ // (via two binary searches).
+ if name != prevname {
+ list = append(list, os.FileInfo{
+ Name: name,
+ Mode: mode,
+ Size: size,
+ Mtime_ns: mtime_ns,
+ })
+ prevname = name
+ count--
+ }
+ }
+
+ if count >= 0 && len(list) == 0 {
+ return nil, os.EOF
+ }
+
+ return list, nil
+}
+
+func (f *httpZipFile) Seek(offset int64, whence int) (int64, os.Error) {
+ return 0, fmt.Errorf("Seek not implemented for zip file entry: %s", f.info.Name)
+}
+
+// httpZipFS is the zip-file based implementation of http.FileSystem
+type httpZipFS struct {
+ *zip.ReadCloser
+ list zipList
+ root string
+}
+
+func (fs *httpZipFS) Open(name string) (http.File, os.Error) {
+ // fs.root does not start with '/'.
+ path := path.Join(fs.root, name) // path is clean
+ index, exact := fs.list.lookup(path)
+ if index < 0 || !strings.HasPrefix(path, fs.root) {
+ // file not found or not under root
+ return nil, fmt.Errorf("file not found: %s", name)
+ }
+
+ if exact {
+ // exact match found - must be a file
+ f := fs.list[index]
+ rc, err := f.Open()
+ if err != nil {
+ return nil, err
+ }
+ return &httpZipFile{
+ path,
+ os.FileInfo{
+ Name: name,
+ Mode: S_IFREG,
+ Size: int64(f.UncompressedSize),
+ Mtime_ns: f.Mtime_ns(),
+ },
+ rc,
+ nil,
+ }, nil
+ }
+
+ // not an exact match - must be a directory
+ return &httpZipFile{
+ path,
+ os.FileInfo{
+ Name: name,
+ Mode: S_IFDIR,
+ // no size or mtime_ns for directories
+ },
+ nil,
+ fs.list[index:],
+ }, nil
+}
+
+func (fs *httpZipFS) Close() os.Error {
+ fs.list = nil
+ return fs.ReadCloser.Close()
+}
+
+// NewHttpZipFS creates a new http.FileSystem based on the contents of
+// the zip file rc restricted to the directory tree specified by root;
+// root must be an absolute path.
+func NewHttpZipFS(rc *zip.ReadCloser, root string) http.FileSystem {
+ list := make(zipList, len(rc.File))
+ copy(list, rc.File) // sort a copy of rc.File
+ sort.Sort(list)
+ return &httpZipFS{rc, list, zipPath(root)}
+}
diff --git a/src/cmd/godoc/index.go b/src/cmd/godoc/index.go
new file mode 100644
index 000000000..9b4f31514
--- /dev/null
+++ b/src/cmd/godoc/index.go
@@ -0,0 +1,986 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the infrastructure to create an
+// identifier and full-text index for a set of Go files.
+//
+// Algorithm for identifier index:
+// - traverse all .go files of the file tree specified by root
+// - for each word (identifier) encountered, collect all occurrences (spots)
+// into a list; this produces a list of spots for each word
+// - reduce the lists: from a list of spots to a list of FileRuns,
+// and from a list of FileRuns into a list of PakRuns
+// - make a HitList from the PakRuns
+//
+// Details:
+// - keep two lists per word: one containing package-level declarations
+// that have snippets, and one containing all other spots
+// - keep the snippets in a separate table indexed by snippet index
+// and store the snippet index in place of the line number in a SpotInfo
+// (the line number for spots with snippets is stored in the snippet)
+// - at the end, create lists of alternative spellings for a given
+// word
+//
+// Algorithm for full text index:
+// - concatenate all source code in a byte buffer (in memory)
+// - add the files to a file set in lockstep as they are added to the byte
+// buffer such that a byte buffer offset corresponds to the Pos value for
+// that file location
+// - create a suffix array from the concatenated sources
+//
+// String lookup in full text index:
+// - use the suffix array to lookup a string's offsets - the offsets
+// correspond to the Pos values relative to the file set
+// - translate the Pos values back into file and line information and
+// sort the result
+
+package main
+
+import (
+ "bytes"
+ "container/vector"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/scanner"
+ "index/suffixarray"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// ----------------------------------------------------------------------------
+// RunList
+
+// A RunList is a vector of entries that can be sorted according to some
+// criteria. A RunList may be compressed by grouping "runs" of entries
+// which are equal (according to the sort critera) into a new RunList of
+// runs. For instance, a RunList containing pairs (x, y) may be compressed
+// into a RunList containing pair runs (x, {y}) where each run consists of
+// a list of y's with the same x.
+type RunList struct {
+ vector.Vector
+ less func(x, y interface{}) bool
+}
+
+func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
+
+func (h *RunList) sort(less func(x, y interface{}) bool) {
+ h.less = less
+ sort.Sort(h)
+}
+
+// Compress entries which are the same according to a sort criteria
+// (specified by less) into "runs".
+func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
+ // create runs of entries with equal values
+ h.sort(less)
+
+ // for each run, make a new run object and collect them in a new RunList
+ var hh RunList
+ i := 0
+ for j := 0; j < h.Len(); j++ {
+ if less(h.At(i), h.At(j)) {
+ hh.Push(newRun(h, i, j))
+ i = j // start a new run
+ }
+ }
+ // add final run, if any
+ if i < h.Len() {
+ hh.Push(newRun(h, i, h.Len()))
+ }
+
+ return &hh
+}
+
+// ----------------------------------------------------------------------------
+// SpotInfo
+
+// A SpotInfo value describes a particular identifier spot in a given file;
+// It encodes three values: the SpotKind (declaration or use), a line or
+// snippet index "lori", and whether it's a line or index.
+//
+// The following encoding is used:
+//
+// bits 32 4 1 0
+// value [lori|kind|isIndex]
+//
+type SpotInfo uint32
+
+// SpotKind describes whether an identifier is declared (and what kind of
+// declaration) or used.
+type SpotKind uint32
+
+const (
+ PackageClause SpotKind = iota
+ ImportDecl
+ ConstDecl
+ TypeDecl
+ VarDecl
+ FuncDecl
+ MethodDecl
+ Use
+ nKinds
+)
+
+func init() {
+ // sanity check: if nKinds is too large, the SpotInfo
+ // accessor functions may need to be updated
+ if nKinds > 8 {
+ panic("nKinds > 8")
+ }
+}
+
+// makeSpotInfo makes a SpotInfo.
+func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
+ // encode lori: bits [4..32)
+ x := SpotInfo(lori) << 4
+ if int(x>>4) != lori {
+ // lori value doesn't fit - since snippet indices are
+ // most certainly always smaller then 1<<28, this can
+ // only happen for line numbers; give it no line number (= 0)
+ x = 0
+ }
+ // encode kind: bits [1..4)
+ x |= SpotInfo(kind) << 1
+ // encode isIndex: bit 0
+ if isIndex {
+ x |= 1
+ }
+ return x
+}
+
+func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
+func (x SpotInfo) Lori() int { return int(x >> 4) }
+func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
+
+// ----------------------------------------------------------------------------
+// KindRun
+
+// Debugging support. Disable to see multiple entries per line.
+const removeDuplicates = true
+
+// A KindRun is a run of SpotInfos of the same kind in a given file.
+type KindRun struct {
+ Kind SpotKind
+ Infos []SpotInfo
+}
+
+// KindRuns are sorted by line number or index. Since the isIndex bit
+// is always the same for all infos in one list we can compare lori's.
+func (f *KindRun) Len() int { return len(f.Infos) }
+func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
+func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
+
+// FileRun contents are sorted by Kind for the reduction into KindRuns.
+func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
+
+// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
+func newKindRun(h *RunList, i, j int) interface{} {
+ kind := h.At(i).(SpotInfo).Kind()
+ infos := make([]SpotInfo, j-i)
+ k := 0
+ for ; i < j; i++ {
+ infos[k] = h.At(i).(SpotInfo)
+ k++
+ }
+ run := &KindRun{kind, infos}
+
+ // Spots were sorted by file and kind to create this run.
+ // Within this run, sort them by line number or index.
+ sort.Sort(run)
+
+ if removeDuplicates {
+ // Since both the lori and kind field must be
+ // same for duplicates, and since the isIndex
+ // bit is always the same for all infos in one
+ // list we can simply compare the entire info.
+ k := 0
+ var prev SpotInfo
+ for i, x := range infos {
+ if x != prev || i == 0 {
+ infos[k] = x
+ k++
+ prev = x
+ }
+ }
+ run.Infos = infos[0:k]
+ }
+
+ return run
+}
+
+// ----------------------------------------------------------------------------
+// FileRun
+
+// A Pak describes a Go package.
+type Pak struct {
+ Path string // path of directory containing the package
+ Name string // package name as declared by package clause
+}
+
+// Paks are sorted by name (primary key) and by import path (secondary key).
+func (p *Pak) less(q *Pak) bool {
+ return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
+}
+
+// A File describes a Go file.
+type File struct {
+ Path string // complete file name
+ Pak Pak // the package to which the file belongs
+}
+
+// A Spot describes a single occurrence of a word.
+type Spot struct {
+ File *File
+ Info SpotInfo
+}
+
+// A FileRun is a list of KindRuns belonging to the same file.
+type FileRun struct {
+ File *File
+ Groups []*KindRun
+}
+
+// Spots are sorted by path for the reduction into FileRuns.
+func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
+
+// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
+func newFileRun(h0 *RunList, i, j int) interface{} {
+ file := h0.At(i).(Spot).File
+
+ // reduce the list of Spots into a list of KindRuns
+ var h1 RunList
+ h1.Vector.Resize(j-i, 0)
+ k := 0
+ for ; i < j; i++ {
+ h1.Set(k, h0.At(i).(Spot).Info)
+ k++
+ }
+ h2 := h1.reduce(lessKind, newKindRun)
+
+ // create the FileRun
+ groups := make([]*KindRun, h2.Len())
+ for i := 0; i < h2.Len(); i++ {
+ groups[i] = h2.At(i).(*KindRun)
+ }
+ return &FileRun{file, groups}
+}
+
+// ----------------------------------------------------------------------------
+// PakRun
+
+// A PakRun describes a run of *FileRuns of a package.
+type PakRun struct {
+ Pak Pak
+ Files []*FileRun
+}
+
+// Sorting support for files within a PakRun.
+func (p *PakRun) Len() int { return len(p.Files) }
+func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
+func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
+
+// FileRuns are sorted by package for the reduction into PakRuns.
+func lessFileRun(x, y interface{}) bool {
+ return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
+}
+
+// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
+func newPakRun(h *RunList, i, j int) interface{} {
+ pak := h.At(i).(*FileRun).File.Pak
+ files := make([]*FileRun, j-i)
+ k := 0
+ for ; i < j; i++ {
+ files[k] = h.At(i).(*FileRun)
+ k++
+ }
+ run := &PakRun{pak, files}
+ sort.Sort(run) // files were sorted by package; sort them by file now
+ return run
+}
+
+// ----------------------------------------------------------------------------
+// HitList
+
+// A HitList describes a list of PakRuns.
+type HitList []*PakRun
+
+// PakRuns are sorted by package.
+func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
+
+func reduce(h0 *RunList) HitList {
+ // reduce a list of Spots into a list of FileRuns
+ h1 := h0.reduce(lessSpot, newFileRun)
+ // reduce a list of FileRuns into a list of PakRuns
+ h2 := h1.reduce(lessFileRun, newPakRun)
+ // sort the list of PakRuns by package
+ h2.sort(lessPakRun)
+ // create a HitList
+ h := make(HitList, h2.Len())
+ for i := 0; i < h2.Len(); i++ {
+ h[i] = h2.At(i).(*PakRun)
+ }
+ return h
+}
+
+func (h HitList) filter(pakname string) HitList {
+ // determine number of matching packages (most of the time just one)
+ n := 0
+ for _, p := range h {
+ if p.Pak.Name == pakname {
+ n++
+ }
+ }
+ // create filtered HitList
+ hh := make(HitList, n)
+ i := 0
+ for _, p := range h {
+ if p.Pak.Name == pakname {
+ hh[i] = p
+ i++
+ }
+ }
+ return hh
+}
+
+// ----------------------------------------------------------------------------
+// AltWords
+
+type wordPair struct {
+ canon string // canonical word spelling (all lowercase)
+ alt string // alternative spelling
+}
+
+// An AltWords describes a list of alternative spellings for a
+// canonical (all lowercase) spelling of a word.
+type AltWords struct {
+ Canon string // canonical word spelling (all lowercase)
+ Alts []string // alternative spelling for the same word
+}
+
+// wordPairs are sorted by their canonical spelling.
+func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
+
+// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
+func newAltWords(h *RunList, i, j int) interface{} {
+ canon := h.At(i).(*wordPair).canon
+ alts := make([]string, j-i)
+ k := 0
+ for ; i < j; i++ {
+ alts[k] = h.At(i).(*wordPair).alt
+ k++
+ }
+ return &AltWords{canon, alts}
+}
+
+func (a *AltWords) filter(s string) *AltWords {
+ if len(a.Alts) == 1 && a.Alts[0] == s {
+ // there are no different alternatives
+ return nil
+ }
+
+ // make a new AltWords with the current spelling removed
+ alts := make([]string, len(a.Alts))
+ i := 0
+ for _, w := range a.Alts {
+ if w != s {
+ alts[i] = w
+ i++
+ }
+ }
+ return &AltWords{a.Canon, alts[0:i]}
+}
+
+// ----------------------------------------------------------------------------
+// Indexer
+
+// Adjust these flags as seems best.
+const includeMainPackages = true
+const includeTestFiles = true
+
+type IndexResult struct {
+ Decls RunList // package-level declarations (with snippets)
+ Others RunList // all other occurrences
+}
+
+// Statistics provides statistics information for an index.
+type Statistics struct {
+ Bytes int // total size of indexed source files
+ Files int // number of indexed source files
+ Lines int // number of lines (all files)
+ Words int // number of different identifiers
+ Spots int // number of identifier occurrences
+}
+
+// An Indexer maintains the data structures and provides the machinery
+// for indexing .go files under a file tree. It implements the path.Visitor
+// interface for walking file trees, and the ast.Visitor interface for
+// walking Go ASTs.
+type Indexer struct {
+ fset *token.FileSet // file set for all indexed files
+ sources bytes.Buffer // concatenated sources
+ words map[string]*IndexResult // RunLists of Spots
+ snippets vector.Vector // vector of *Snippets, indexed by snippet indices
+ current *token.File // last file added to file set
+ file *File // AST for current file
+ decl ast.Decl // AST for current decl
+ stats Statistics
+}
+
+func (x *Indexer) addSnippet(s *Snippet) int {
+ index := x.snippets.Len()
+ x.snippets.Push(s)
+ return index
+}
+
+func (x *Indexer) visitComment(c *ast.CommentGroup) {
+ if c != nil {
+ ast.Walk(x, c)
+ }
+}
+
+func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
+ if id != nil {
+ lists, found := x.words[id.Name]
+ if !found {
+ lists = new(IndexResult)
+ x.words[id.Name] = lists
+ }
+
+ if kind == Use || x.decl == nil {
+ // not a declaration or no snippet required
+ info := makeSpotInfo(kind, x.current.Line(id.Pos()), false)
+ lists.Others.Push(Spot{x.file, info})
+ } else {
+ // a declaration with snippet
+ index := x.addSnippet(NewSnippet(x.fset, x.decl, id))
+ info := makeSpotInfo(kind, index, true)
+ lists.Decls.Push(Spot{x.file, info})
+ }
+
+ x.stats.Spots++
+ }
+}
+
+func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
+ switch n := spec.(type) {
+ case *ast.ImportSpec:
+ x.visitComment(n.Doc)
+ x.visitIdent(ImportDecl, n.Name)
+ ast.Walk(x, n.Path)
+ x.visitComment(n.Comment)
+
+ case *ast.ValueSpec:
+ x.visitComment(n.Doc)
+ kind := ConstDecl
+ if isVarDecl {
+ kind = VarDecl
+ }
+ for _, n := range n.Names {
+ x.visitIdent(kind, n)
+ }
+ ast.Walk(x, n.Type)
+ for _, v := range n.Values {
+ ast.Walk(x, v)
+ }
+ x.visitComment(n.Comment)
+
+ case *ast.TypeSpec:
+ x.visitComment(n.Doc)
+ x.visitIdent(TypeDecl, n.Name)
+ ast.Walk(x, n.Type)
+ x.visitComment(n.Comment)
+ }
+}
+
+func (x *Indexer) Visit(node ast.Node) ast.Visitor {
+ // TODO(gri): methods in interface types are categorized as VarDecl
+ switch n := node.(type) {
+ case nil:
+ return nil
+
+ case *ast.Ident:
+ x.visitIdent(Use, n)
+
+ case *ast.Field:
+ x.decl = nil // no snippets for fields
+ x.visitComment(n.Doc)
+ for _, m := range n.Names {
+ x.visitIdent(VarDecl, m)
+ }
+ ast.Walk(x, n.Type)
+ ast.Walk(x, n.Tag)
+ x.visitComment(n.Comment)
+
+ case *ast.DeclStmt:
+ if decl, ok := n.Decl.(*ast.GenDecl); ok {
+ // local declarations can only be *ast.GenDecls
+ x.decl = nil // no snippets for local declarations
+ x.visitComment(decl.Doc)
+ for _, s := range decl.Specs {
+ x.visitSpec(s, decl.Tok == token.VAR)
+ }
+ } else {
+ // handle error case gracefully
+ ast.Walk(x, n.Decl)
+ }
+
+ case *ast.GenDecl:
+ x.decl = n
+ x.visitComment(n.Doc)
+ for _, s := range n.Specs {
+ x.visitSpec(s, n.Tok == token.VAR)
+ }
+
+ case *ast.FuncDecl:
+ x.visitComment(n.Doc)
+ kind := FuncDecl
+ if n.Recv != nil {
+ kind = MethodDecl
+ ast.Walk(x, n.Recv)
+ }
+ x.decl = n
+ x.visitIdent(kind, n.Name)
+ ast.Walk(x, n.Type)
+ if n.Body != nil {
+ ast.Walk(x, n.Body)
+ }
+
+ case *ast.File:
+ x.visitComment(n.Doc)
+ x.decl = nil
+ x.visitIdent(PackageClause, n.Name)
+ for _, d := range n.Decls {
+ ast.Walk(x, d)
+ }
+ // don't visit package level comments for now
+ // to avoid duplicate visiting from individual
+ // nodes
+
+ default:
+ return x
+ }
+
+ return nil
+}
+
+func pkgName(filename string) string {
+ // use a new file set each time in order to not pollute the indexer's
+ // file set (which must stay in sync with the concatenated source code)
+ file, err := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
+ if err != nil || file == nil {
+ return ""
+ }
+ return file.Name.Name
+}
+
+// addFile adds a file to the index if possible and returns the file set file
+// and the file's AST if it was successfully parsed as a Go file. If addFile
+// failed (that is, if the file was not added), it returns file == nil.
+func (x *Indexer) addFile(filename string, goFile bool) (file *token.File, ast *ast.File) {
+ // open file
+ f, err := fs.Open(filename)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ // The file set's base offset and x.sources size must be in lock-step;
+ // this permits the direct mapping of suffix array lookup results to
+ // to corresponding Pos values.
+ //
+ // When a file is added to the file set, its offset base increases by
+ // the size of the file + 1; and the initial base offset is 1. Add an
+ // extra byte to the sources here.
+ x.sources.WriteByte(0)
+
+ // If the sources length doesn't match the file set base at this point
+ // the file set implementation changed or we have another error.
+ base := x.fset.Base()
+ if x.sources.Len() != base {
+ panic("internal error - file base incorrect")
+ }
+
+ // append file contents (src) to x.sources
+ if _, err := x.sources.ReadFrom(f); err == nil {
+ src := x.sources.Bytes()[base:]
+
+ if goFile {
+ // parse the file and in the process add it to the file set
+ if ast, err = parser.ParseFile(x.fset, filename, src, parser.ParseComments); err == nil {
+ file = x.fset.File(ast.Pos()) // ast.Pos() is inside the file
+ return
+ }
+ // file has parse errors, and the AST may be incorrect -
+ // set lines information explicitly and index as ordinary
+ // text file (cannot fall through to the text case below
+ // because the file has already been added to the file set
+ // by the parser)
+ file = x.fset.File(token.Pos(base)) // token.Pos(base) is inside the file
+ file.SetLinesForContent(src)
+ ast = nil
+ return
+ }
+
+ if isText(src) {
+ // only add the file to the file set (for the full text index)
+ file = x.fset.AddFile(filename, x.fset.Base(), len(src))
+ file.SetLinesForContent(src)
+ return
+ }
+ }
+
+ // discard possibly added data
+ x.sources.Truncate(base - 1) // -1 to remove added byte 0 since no file was added
+ return
+}
+
+// Design note: Using an explicit white list of permitted files for indexing
+// makes sure that the important files are included and massively reduces the
+// number of files to index. The advantage over a blacklist is that unexpected
+// (non-blacklisted) files won't suddenly explode the index.
+//
+// TODO(gri): We may want to make this list customizable, perhaps via a flag.
+
+// Files are whitelisted if they have a file name or extension
+// present as key in whitelisted.
+var whitelisted = map[string]bool{
+ ".bash": true,
+ ".c": true,
+ ".css": true,
+ ".go": true,
+ ".goc": true,
+ ".h": true,
+ ".html": true,
+ ".js": true,
+ ".out": true,
+ ".py": true,
+ ".s": true,
+ ".sh": true,
+ ".txt": true,
+ ".xml": true,
+ "AUTHORS": true,
+ "CONTRIBUTORS": true,
+ "LICENSE": true,
+ "Makefile": true,
+ "PATENTS": true,
+ "README": true,
+}
+
+// isWhitelisted returns true if a file is on the list
+// of "permitted" files for indexing. The filename must
+// be the directory-local name of the file.
+func isWhitelisted(filename string) bool {
+ key := filepath.Ext(filename)
+ if key == "" {
+ // file has no extension - use entire filename
+ key = filename
+ }
+ return whitelisted[key]
+}
+
+func (x *Indexer) visitFile(dirname string, f FileInfo, fulltextIndex bool) {
+ if !f.IsRegular() {
+ return
+ }
+
+ filename := filepath.Join(dirname, f.Name())
+ goFile := false
+
+ switch {
+ case isGoFile(f):
+ if !includeTestFiles && (!isPkgFile(f) || strings.HasPrefix(filename, "test/")) {
+ return
+ }
+ if !includeMainPackages && pkgName(filename) == "main" {
+ return
+ }
+ goFile = true
+
+ case !fulltextIndex || !isWhitelisted(f.Name()):
+ return
+ }
+
+ file, fast := x.addFile(filename, goFile)
+ if file == nil {
+ return // addFile failed
+ }
+
+ if fast != nil {
+ // we've got a Go file to index
+ x.current = file
+ dir, _ := filepath.Split(filename)
+ pak := Pak{dir, fast.Name.Name}
+ x.file = &File{filename, pak}
+ ast.Walk(x, fast)
+ }
+
+ // update statistics
+ x.stats.Bytes += file.Size()
+ x.stats.Files++
+ x.stats.Lines += file.LineCount()
+}
+
+// ----------------------------------------------------------------------------
+// Index
+
+type LookupResult struct {
+ Decls HitList // package-level declarations (with snippets)
+ Others HitList // all other occurrences
+}
+
+type Index struct {
+ fset *token.FileSet // file set used during indexing; nil if no textindex
+ suffixes *suffixarray.Index // suffixes for concatenated sources; nil if no textindex
+ words map[string]*LookupResult // maps words to hit lists
+ alts map[string]*AltWords // maps canonical(words) to lists of alternative spellings
+ snippets []*Snippet // all snippets, indexed by snippet index
+ stats Statistics
+}
+
+func canonical(w string) string { return strings.ToLower(w) }
+
+// NewIndex creates a new index for the .go files
+// in the directories given by dirnames.
+//
+func NewIndex(dirnames <-chan string, fulltextIndex bool) *Index {
+ var x Indexer
+
+ // initialize Indexer
+ x.fset = token.NewFileSet()
+ x.words = make(map[string]*IndexResult)
+
+ // index all files in the directories given by dirnames
+ for dirname := range dirnames {
+ list, err := fs.ReadDir(dirname)
+ if err != nil {
+ continue // ignore this directory
+ }
+ for _, f := range list {
+ if !f.IsDirectory() {
+ x.visitFile(dirname, f, fulltextIndex)
+ }
+ }
+ }
+
+ if !fulltextIndex {
+ // the file set, the current file, and the sources are
+ // not needed after indexing if no text index is built -
+ // help GC and clear them
+ x.fset = nil
+ x.sources.Reset()
+ x.current = nil // contains reference to fset!
+ }
+
+ // for each word, reduce the RunLists into a LookupResult;
+ // also collect the word with its canonical spelling in a
+ // word list for later computation of alternative spellings
+ words := make(map[string]*LookupResult)
+ var wlist RunList
+ for w, h := range x.words {
+ decls := reduce(&h.Decls)
+ others := reduce(&h.Others)
+ words[w] = &LookupResult{
+ Decls: decls,
+ Others: others,
+ }
+ wlist.Push(&wordPair{canonical(w), w})
+ }
+ x.stats.Words = len(words)
+
+ // reduce the word list {canonical(w), w} into
+ // a list of AltWords runs {canonical(w), {w}}
+ alist := wlist.reduce(lessWordPair, newAltWords)
+
+ // convert alist into a map of alternative spellings
+ alts := make(map[string]*AltWords)
+ for i := 0; i < alist.Len(); i++ {
+ a := alist.At(i).(*AltWords)
+ alts[a.Canon] = a
+ }
+
+ // convert snippet vector into a list
+ snippets := make([]*Snippet, x.snippets.Len())
+ for i := 0; i < x.snippets.Len(); i++ {
+ snippets[i] = x.snippets.At(i).(*Snippet)
+ }
+
+ // create text index
+ var suffixes *suffixarray.Index
+ if fulltextIndex {
+ suffixes = suffixarray.New(x.sources.Bytes())
+ }
+
+ return &Index{x.fset, suffixes, words, alts, snippets, x.stats}
+}
+
+// Stats() returns index statistics.
+func (x *Index) Stats() Statistics {
+ return x.stats
+}
+
+func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
+ match = x.words[w]
+ alt = x.alts[canonical(w)]
+ // remove current spelling from alternatives
+ // (if there is no match, the alternatives do
+ // not contain the current spelling)
+ if match != nil && alt != nil {
+ alt = alt.filter(w)
+ }
+ return
+}
+
+func isIdentifier(s string) bool {
+ var S scanner.Scanner
+ fset := token.NewFileSet()
+ S.Init(fset.AddFile("", fset.Base(), len(s)), []byte(s), nil, 0)
+ if _, tok, _ := S.Scan(); tok == token.IDENT {
+ _, tok, _ := S.Scan()
+ return tok == token.EOF
+ }
+ return false
+}
+
+// For a given query, which is either a single identifier or a qualified
+// identifier, Lookup returns a LookupResult, and a list of alternative
+// spellings, if any. If the query syntax is wrong, an error is reported.
+func (x *Index) Lookup(query string) (match *LookupResult, alt *AltWords, err os.Error) {
+ ss := strings.Split(query, ".")
+
+ // check query syntax
+ for _, s := range ss {
+ if !isIdentifier(s) {
+ err = os.NewError("all query parts must be identifiers")
+ return
+ }
+ }
+
+ switch len(ss) {
+ case 1:
+ match, alt = x.LookupWord(ss[0])
+
+ case 2:
+ pakname := ss[0]
+ match, alt = x.LookupWord(ss[1])
+ if match != nil {
+ // found a match - filter by package name
+ decls := match.Decls.filter(pakname)
+ others := match.Others.filter(pakname)
+ match = &LookupResult{decls, others}
+ }
+
+ default:
+ err = os.NewError("query is not a (qualified) identifier")
+ }
+
+ return
+}
+
+func (x *Index) Snippet(i int) *Snippet {
+ // handle illegal snippet indices gracefully
+ if 0 <= i && i < len(x.snippets) {
+ return x.snippets[i]
+ }
+ return nil
+}
+
+type positionList []struct {
+ filename string
+ line int
+}
+
+func (list positionList) Len() int { return len(list) }
+func (list positionList) Less(i, j int) bool { return list[i].filename < list[j].filename }
+func (list positionList) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
+
+// unique returns the list sorted and with duplicate entries removed
+func unique(list []int) []int {
+ sort.Ints(list)
+ var last int
+ i := 0
+ for _, x := range list {
+ if i == 0 || x != last {
+ last = x
+ list[i] = x
+ i++
+ }
+ }
+ return list[0:i]
+}
+
+// A FileLines value specifies a file and line numbers within that file.
+type FileLines struct {
+ Filename string
+ Lines []int
+}
+
+// LookupRegexp returns the number of matches and the matches where a regular
+// expression r is found in the full text index. At most n matches are
+// returned (thus found <= n).
+//
+func (x *Index) LookupRegexp(r *regexp.Regexp, n int) (found int, result []FileLines) {
+ if x.suffixes == nil || n <= 0 {
+ return
+ }
+ // n > 0
+
+ var list positionList
+ // FindAllIndex may returns matches that span across file boundaries.
+ // Such matches are unlikely, buf after eliminating them we may end up
+ // with fewer than n matches. If we don't have enough at the end, redo
+ // the search with an increased value n1, but only if FindAllIndex
+ // returned all the requested matches in the first place (if it
+ // returned fewer than that there cannot be more).
+ for n1 := n; found < n; n1 += n - found {
+ found = 0
+ matches := x.suffixes.FindAllIndex(r, n1)
+ // compute files, exclude matches that span file boundaries,
+ // and map offsets to file-local offsets
+ list = make(positionList, len(matches))
+ for _, m := range matches {
+ // by construction, an offset corresponds to the Pos value
+ // for the file set - use it to get the file and line
+ p := token.Pos(m[0])
+ if file := x.fset.File(p); file != nil {
+ if base := file.Base(); base <= m[1] && m[1] <= base+file.Size() {
+ // match [m[0], m[1]) is within the file boundaries
+ list[found].filename = file.Name()
+ list[found].line = file.Line(p)
+ found++
+ }
+ }
+ }
+ if found == n || len(matches) < n1 {
+ // found all matches or there's no chance to find more
+ break
+ }
+ }
+ list = list[0:found]
+ sort.Sort(list) // sort by filename
+
+ // collect matches belonging to the same file
+ var last string
+ var lines []int
+ addLines := func() {
+ if len(lines) > 0 {
+ // remove duplicate lines
+ result = append(result, FileLines{last, unique(lines)})
+ lines = nil
+ }
+ }
+ for _, m := range list {
+ if m.filename != last {
+ addLines()
+ last = m.filename
+ }
+ lines = append(lines, m.line)
+ }
+ addLines()
+
+ return
+}
diff --git a/src/cmd/godoc/main.go b/src/cmd/godoc/main.go
new file mode 100644
index 000000000..89b12b9ac
--- /dev/null
+++ b/src/cmd/godoc/main.go
@@ -0,0 +1,423 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// godoc: Go Documentation Server
+
+// Web server tree:
+//
+// http://godoc/ main landing page
+// http://godoc/doc/ serve from $GOROOT/doc - spec, mem, tutorial, etc.
+// http://godoc/src/ serve files from $GOROOT/src; .go gets pretty-printed
+// http://godoc/cmd/ serve documentation about commands
+// http://godoc/pkg/ serve documentation about packages
+// (idea is if you say import "compress/zlib", you go to
+// http://godoc/pkg/compress/zlib)
+//
+// Command-line interface:
+//
+// godoc packagepath [name ...]
+//
+// godoc compress/zlib
+// - prints doc for package compress/zlib
+// godoc crypto/block Cipher NewCMAC
+// - prints doc for Cipher and NewCMAC in package crypto/block
+
+package main
+
+import (
+ "archive/zip"
+ "bytes"
+ _ "expvar" // to serve /debug/vars
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "http"
+ _ "http/pprof" // to serve /debug/pprof/*
+ "io"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "time"
+ "url"
+)
+
+const defaultAddr = ":6060" // default webserver address
+
+var (
+ // file system to serve
+ // (with e.g.: zip -r go.zip $GOROOT -i \*.go -i \*.html -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i favicon.ico)
+ zipfile = flag.String("zip", "", "zip file providing the file system to serve; disabled if empty")
+
+ // periodic sync
+ syncCmd = flag.String("sync", "", "sync command; disabled if empty")
+ syncMin = flag.Int("sync_minutes", 0, "sync interval in minutes; disabled if <= 0")
+ syncDelay delayTime // actual sync interval in minutes; usually syncDelay == syncMin, but syncDelay may back off exponentially
+
+ // network
+ httpAddr = flag.String("http", "", "HTTP service address (e.g., '"+defaultAddr+"')")
+ serverAddr = flag.String("server", "", "webserver address for command line searches")
+
+ // layout control
+ html = flag.Bool("html", false, "print HTML in command-line mode")
+ srcMode = flag.Bool("src", false, "print (exported) source in command-line mode")
+
+ // command-line searches
+ query = flag.Bool("q", false, "arguments are considered search queries")
+)
+
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err os.Error) {
+ contents := applyTemplate(errorHTML, "errorHTML", err) // err may contain an absolute path!
+ w.WriteHeader(http.StatusNotFound)
+ servePage(w, "File "+relpath, "", "", contents)
+}
+
+func exec(rw http.ResponseWriter, args []string) (status int) {
+ r, w, err := os.Pipe()
+ if err != nil {
+ log.Printf("os.Pipe(): %v", err)
+ return 2
+ }
+
+ bin := args[0]
+ fds := []*os.File{nil, w, w}
+ if *verbose {
+ log.Printf("executing %v", args)
+ }
+ p, err := os.StartProcess(bin, args, &os.ProcAttr{Files: fds, Dir: *goroot})
+ defer r.Close()
+ w.Close()
+ if err != nil {
+ log.Printf("os.StartProcess(%q): %v", bin, err)
+ return 2
+ }
+ defer p.Release()
+
+ var buf bytes.Buffer
+ io.Copy(&buf, r)
+ wait, err := p.Wait(0)
+ if err != nil {
+ os.Stderr.Write(buf.Bytes())
+ log.Printf("os.Wait(%d, 0): %v", p.Pid, err)
+ return 2
+ }
+ status = wait.ExitStatus()
+ if !wait.Exited() || status > 1 {
+ os.Stderr.Write(buf.Bytes())
+ log.Printf("executing %v failed (exit status = %d)", args, status)
+ return
+ }
+
+ if *verbose {
+ os.Stderr.Write(buf.Bytes())
+ }
+ if rw != nil {
+ rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ rw.Write(buf.Bytes())
+ }
+
+ return
+}
+
+func dosync(w http.ResponseWriter, r *http.Request) {
+ args := []string{"/bin/sh", "-c", *syncCmd}
+ switch exec(w, args) {
+ case 0:
+ // sync succeeded and some files have changed;
+ // update package tree.
+ // TODO(gri): The directory tree may be temporarily out-of-sync.
+ // Consider keeping separate time stamps so the web-
+ // page can indicate this discrepancy.
+ initFSTree()
+ fallthrough
+ case 1:
+ // sync failed because no files changed;
+ // don't change the package tree
+ syncDelay.set(*syncMin) // revert to regular sync schedule
+ default:
+ // sync failed because of an error - back off exponentially, but try at least once a day
+ syncDelay.backoff(24 * 60)
+ }
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr,
+ "usage: godoc package [name ...]\n"+
+ " godoc -http="+defaultAddr+"\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func loggingHandler(h http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ log.Printf("%s\t%s", req.RemoteAddr, req.URL)
+ h.ServeHTTP(w, req)
+ })
+}
+
+func remoteSearch(query string) (res *http.Response, err os.Error) {
+ search := "/search?f=text&q=" + url.QueryEscape(query)
+
+ // list of addresses to try
+ var addrs []string
+ if *serverAddr != "" {
+ // explicit server address - only try this one
+ addrs = []string{*serverAddr}
+ } else {
+ addrs = []string{
+ defaultAddr,
+ "golang.org",
+ }
+ }
+
+ // remote search
+ for _, addr := range addrs {
+ url := "http://" + addr + search
+ res, err = http.Get(url)
+ if err == nil && res.StatusCode == http.StatusOK {
+ break
+ }
+ }
+
+ if err == nil && res.StatusCode != http.StatusOK {
+ err = os.NewError(res.Status)
+ }
+
+ return
+}
+
+// Does s look like a regular expression?
+func isRegexp(s string) bool {
+ return strings.IndexAny(s, ".(|)*+?^$[]") >= 0
+}
+
+// Make a regular expression of the form
+// names[0]|names[1]|...names[len(names)-1].
+// Returns nil if the regular expression is illegal.
+func makeRx(names []string) (rx *regexp.Regexp) {
+ if len(names) > 0 {
+ s := ""
+ for i, name := range names {
+ if i > 0 {
+ s += "|"
+ }
+ if isRegexp(name) {
+ s += name
+ } else {
+ s += "^" + name + "$" // must match exactly
+ }
+ }
+ rx, _ = regexp.Compile(s) // rx is nil if there's a compilation error
+ }
+ return
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ // Check usage: either server and no args, or command line and args
+ if (*httpAddr != "") != (flag.NArg() == 0) {
+ usage()
+ }
+
+ if *tabwidth < 0 {
+ log.Fatalf("negative tabwidth %d", *tabwidth)
+ }
+
+ // Determine file system to use.
+ // TODO(gri) - fs and fsHttp should really be the same. Try to unify.
+ // - fsHttp doesn't need to be set up in command-line mode,
+ // same is true for the http handlers in initHandlers.
+ if *zipfile == "" {
+ // use file system of underlying OS
+ *goroot = filepath.Clean(*goroot) // normalize path separator
+ fs = OS
+ fsHttp = http.Dir(*goroot)
+ } else {
+ // use file system specified via .zip file (path separator must be '/')
+ rc, err := zip.OpenReader(*zipfile)
+ if err != nil {
+ log.Fatalf("%s: %s\n", *zipfile, err)
+ }
+ *goroot = path.Join("/", *goroot) // fsHttp paths are relative to '/'
+ fs = NewZipFS(rc)
+ fsHttp = NewHttpZipFS(rc, *goroot)
+ }
+
+ initHandlers()
+ readTemplates()
+
+ if *httpAddr != "" {
+ // HTTP server mode.
+ var handler http.Handler = http.DefaultServeMux
+ if *verbose {
+ log.Printf("Go Documentation Server")
+ log.Printf("version = %s", runtime.Version())
+ log.Printf("address = %s", *httpAddr)
+ log.Printf("goroot = %s", *goroot)
+ log.Printf("tabwidth = %d", *tabwidth)
+ switch {
+ case !*indexEnabled:
+ log.Print("search index disabled")
+ case *maxResults > 0:
+ log.Printf("full text index enabled (maxresults = %d)", *maxResults)
+ default:
+ log.Print("identifier search index enabled")
+ }
+ if !fsMap.IsEmpty() {
+ log.Print("user-defined mapping:")
+ fsMap.Fprint(os.Stderr)
+ }
+ handler = loggingHandler(handler)
+ }
+
+ registerPublicHandlers(http.DefaultServeMux)
+ if *syncCmd != "" {
+ http.Handle("/debug/sync", http.HandlerFunc(dosync))
+ }
+
+ // Initialize default directory tree with corresponding timestamp.
+ // (Do it in a goroutine so that launch is quick.)
+ go initFSTree()
+
+ // Initialize directory trees for user-defined file systems (-path flag).
+ initDirTrees()
+
+ // Start sync goroutine, if enabled.
+ if *syncCmd != "" && *syncMin > 0 {
+ syncDelay.set(*syncMin) // initial sync delay
+ go func() {
+ for {
+ dosync(nil, nil)
+ delay, _ := syncDelay.get()
+ if *verbose {
+ log.Printf("next sync in %dmin", delay.(int))
+ }
+ time.Sleep(int64(delay.(int)) * 60e9)
+ }
+ }()
+ }
+
+ // Start indexing goroutine.
+ if *indexEnabled {
+ go indexer()
+ }
+
+ // Start http server.
+ if err := http.ListenAndServe(*httpAddr, handler); err != nil {
+ log.Fatalf("ListenAndServe %s: %v", *httpAddr, err)
+ }
+
+ return
+ }
+
+ // Command line mode.
+ if *html {
+ packageText = packageHTML
+ searchText = packageHTML
+ }
+
+ if *query {
+ // Command-line queries.
+ for i := 0; i < flag.NArg(); i++ {
+ res, err := remoteSearch(flag.Arg(i))
+ if err != nil {
+ log.Fatalf("remoteSearch: %s", err)
+ }
+ io.Copy(os.Stdout, res.Body)
+ }
+ return
+ }
+
+ // determine paths
+ path := flag.Arg(0)
+ if len(path) > 0 && path[0] == '.' {
+ // assume cwd; don't assume -goroot
+ cwd, _ := os.Getwd() // ignore errors
+ path = filepath.Join(cwd, path)
+ }
+ relpath := path
+ abspath := path
+ if t, pkg, err := build.FindTree(path); err == nil {
+ relpath = pkg
+ abspath = filepath.Join(t.SrcDir(), pkg)
+ } else if !filepath.IsAbs(path) {
+ abspath = absolutePath(path, pkgHandler.fsRoot)
+ } else {
+ relpath = relativeURL(path)
+ }
+
+ var mode PageInfoMode
+ if *srcMode {
+ // only filter exports if we don't have explicit command-line filter arguments
+ if flag.NArg() == 1 {
+ mode |= exportsOnly
+ }
+ } else {
+ mode = exportsOnly | genDoc
+ }
+ // TODO(gri): Provide a mechanism (flag?) to select a package
+ // if there are multiple packages in a directory.
+ info := pkgHandler.getPageInfo(abspath, relpath, "", mode)
+
+ if info.IsEmpty() {
+ // try again, this time assume it's a command
+ if !filepath.IsAbs(path) {
+ abspath = absolutePath(path, cmdHandler.fsRoot)
+ }
+ cmdInfo := cmdHandler.getPageInfo(abspath, relpath, "", mode)
+ // only use the cmdInfo if it actually contains a result
+ // (don't hide errors reported from looking up a package)
+ if !cmdInfo.IsEmpty() {
+ info = cmdInfo
+ }
+ }
+ if info.Err != nil {
+ log.Fatalf("%v", info.Err)
+ }
+
+ // If we have more than one argument, use the remaining arguments for filtering
+ if flag.NArg() > 1 {
+ args := flag.Args()[1:]
+ rx := makeRx(args)
+ if rx == nil {
+ log.Fatalf("illegal regular expression from %v", args)
+ }
+
+ filter := func(s string) bool { return rx.MatchString(s) }
+ switch {
+ case info.PAst != nil:
+ ast.FilterFile(info.PAst, filter)
+ // Special case: Don't use templates for printing
+ // so we only get the filtered declarations without
+ // package clause or extra whitespace.
+ for i, d := range info.PAst.Decls {
+ if i > 0 {
+ fmt.Println()
+ }
+ if *html {
+ var buf bytes.Buffer
+ writeNode(&buf, info.FSet, d)
+ FormatText(os.Stdout, buf.Bytes(), -1, true, "", nil)
+ } else {
+ writeNode(os.Stdout, info.FSet, d)
+ }
+ fmt.Println()
+ }
+ return
+
+ case info.PDoc != nil:
+ info.PDoc.Filter(filter)
+ }
+ }
+
+ if err := packageText.Execute(os.Stdout, info); err != nil {
+ log.Printf("packageText.Execute: %s", err)
+ }
+}
diff --git a/src/cmd/godoc/mapping.go b/src/cmd/godoc/mapping.go
new file mode 100644
index 000000000..51f23ab98
--- /dev/null
+++ b/src/cmd/godoc/mapping.go
@@ -0,0 +1,200 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Mapping data structure.
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// A Mapping object maps relative paths (e.g. from URLs)
+// to absolute paths (of the file system) and vice versa.
+//
+// A Mapping object consists of a list of individual mappings
+// of the form: prefix -> path which are interpreted as follows:
+// A relative path of the form prefix/tail is to be mapped to
+// the absolute path/tail, if that absolute path exists in the file
+// system. Given a Mapping object, a relative path is mapped to an
+// absolute path by trying each of the individual mappings in order,
+// until a valid mapping is found. For instance, for the mapping:
+//
+// user -> /home/user
+// public -> /home/user/public
+// public -> /home/build/public
+//
+// the relative paths below are mapped to absolute paths as follows:
+//
+// user/foo -> /home/user/foo
+// public/net/rpc/file1.go -> /home/user/public/net/rpc/file1.go
+//
+// If there is no /home/user/public/net/rpc/file2.go, the next public
+// mapping entry is used to map the relative path to:
+//
+// public/net/rpc/file2.go -> /home/build/public/net/rpc/file2.go
+//
+// (assuming that file exists).
+//
+// Each individual mapping also has a RWValue associated with it that
+// may be used to store mapping-specific information. See the Iterate
+// method.
+//
+type Mapping struct {
+ list []mapping
+ prefixes []string // lazily computed from list
+}
+
+type mapping struct {
+ prefix, path string
+ value *RWValue
+}
+
+// Init initializes the Mapping from a list of paths.
+// Empty paths are ignored; relative paths are assumed to be relative to
+// the current working directory and converted to absolute paths.
+// For each path of the form:
+//
+// dirname/localname
+//
+// a mapping
+//
+// localname -> path
+//
+// is added to the Mapping object, in the order of occurrence.
+// For instance, under Unix, the argument:
+//
+// /home/user:/home/build/public
+//
+// leads to the following mapping:
+//
+// user -> /home/user
+// public -> /home/build/public
+//
+func (m *Mapping) Init(paths []string) {
+ pathlist := canonicalizePaths(paths, nil)
+ list := make([]mapping, len(pathlist))
+
+ // create mapping list
+ for i, path := range pathlist {
+ _, prefix := filepath.Split(path)
+ list[i] = mapping{prefix, path, new(RWValue)}
+ }
+
+ m.list = list
+}
+
+// IsEmpty returns true if there are no mappings specified.
+func (m *Mapping) IsEmpty() bool { return len(m.list) == 0 }
+
+// PrefixList returns a list of all prefixes, with duplicates removed.
+// For instance, for the mapping:
+//
+// user -> /home/user
+// public -> /home/user/public
+// public -> /home/build/public
+//
+// the prefix list is:
+//
+// user, public
+//
+func (m *Mapping) PrefixList() []string {
+ // compute the list lazily
+ if m.prefixes == nil {
+ list := make([]string, len(m.list))
+
+ // populate list
+ for i, e := range m.list {
+ list[i] = e.prefix
+ }
+
+ // sort the list and remove duplicate entries
+ sort.Strings(list)
+ i := 0
+ prev := ""
+ for _, path := range list {
+ if path != prev {
+ list[i] = path
+ i++
+ prev = path
+ }
+ }
+
+ m.prefixes = list[0:i]
+ }
+
+ return m.prefixes
+}
+
+// Fprint prints the mapping.
+func (m *Mapping) Fprint(w io.Writer) {
+ for _, e := range m.list {
+ fmt.Fprintf(w, "\t%s -> %s\n", e.prefix, e.path)
+ }
+}
+
+func splitFirst(path string) (head, tail string) {
+ i := strings.Index(path, string(filepath.Separator))
+ if i > 0 {
+ // 0 < i < len(path)
+ return path[0:i], path[i+1:]
+ }
+ return "", path
+}
+
+// ToAbsolute maps a slash-separated relative path to an absolute filesystem
+// path using the Mapping specified by the receiver. If the path cannot
+// be mapped, the empty string is returned.
+//
+func (m *Mapping) ToAbsolute(spath string) string {
+ fpath := filepath.FromSlash(spath)
+ prefix, tail := splitFirst(fpath)
+ for _, e := range m.list {
+ switch {
+ case e.prefix == prefix:
+ // use tail
+ case e.prefix == "":
+ tail = fpath
+ default:
+ continue // no match
+ }
+ abspath := filepath.Join(e.path, tail)
+ if _, err := fs.Stat(abspath); err == nil {
+ return abspath
+ }
+ }
+
+ return "" // no match
+}
+
+// ToRelative maps an absolute filesystem path to a relative slash-separated
+// path using the Mapping specified by the receiver. If the path cannot
+// be mapped, the empty string is returned.
+//
+func (m *Mapping) ToRelative(fpath string) string {
+ for _, e := range m.list {
+ if strings.HasPrefix(fpath, e.path) {
+ spath := filepath.ToSlash(fpath)
+ // /absolute/prefix/foo -> prefix/foo
+ return path.Join(e.prefix, spath[len(e.path):]) // Join will remove a trailing '/'
+ }
+ }
+ return "" // no match
+}
+
+// Iterate calls f for each path and RWValue in the mapping (in uspecified order)
+// until f returns false.
+//
+func (m *Mapping) Iterate(f func(path string, value *RWValue) bool) {
+ for _, e := range m.list {
+ if !f(e.path, e.value) {
+ return
+ }
+ }
+}
diff --git a/src/cmd/godoc/parser.go b/src/cmd/godoc/parser.go
new file mode 100644
index 000000000..da4b3853c
--- /dev/null
+++ b/src/cmd/godoc/parser.go
@@ -0,0 +1,68 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains support functions for parsing .go files.
+// Similar functionality is found in package go/parser but the
+// functions here operate using godoc's file system fs instead
+// of calling the OS's file operations directly.
+
+package main
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+)
+
+func parseFiles(fset *token.FileSet, filenames []string) (pkgs map[string]*ast.Package, first os.Error) {
+ pkgs = make(map[string]*ast.Package)
+ for _, filename := range filenames {
+ src, err := fs.ReadFile(filename)
+ if err != nil {
+ if first == nil {
+ first = err
+ }
+ continue
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ if first == nil {
+ first = err
+ }
+ continue
+ }
+
+ name := file.Name.Name
+ pkg, found := pkgs[name]
+ if !found {
+ // TODO(gri) Use NewPackage here; reconsider ParseFiles API.
+ pkg = &ast.Package{name, nil, nil, make(map[string]*ast.File)}
+ pkgs[name] = pkg
+ }
+ pkg.Files[filename] = file
+ }
+ return
+}
+
+func parseDir(fset *token.FileSet, path string, filter func(FileInfo) bool) (map[string]*ast.Package, os.Error) {
+ list, err := fs.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ filenames := make([]string, len(list))
+ i := 0
+ for _, d := range list {
+ if filter == nil || filter(d) {
+ filenames[i] = filepath.Join(path, d.Name())
+ i++
+ }
+ }
+ filenames = filenames[0:i]
+
+ return parseFiles(fset, filenames)
+}
diff --git a/src/cmd/godoc/snippet.go b/src/cmd/godoc/snippet.go
new file mode 100755
index 000000000..68e27d9a0
--- /dev/null
+++ b/src/cmd/godoc/snippet.go
@@ -0,0 +1,100 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the infrastructure to create a code
+// snippet for search results.
+//
+// Note: At the moment, this only creates HTML snippets.
+
+package main
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ "fmt"
+)
+
+type Snippet struct {
+ Line int
+ Text string // HTML-escaped
+}
+
+func newSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
+ // TODO instead of pretty-printing the node, should use the original source instead
+ var buf1 bytes.Buffer
+ writeNode(&buf1, fset, decl)
+ // wrap text with <pre> tag
+ var buf2 bytes.Buffer
+ buf2.WriteString("<pre>")
+ FormatText(&buf2, buf1.Bytes(), -1, true, id.Name, nil)
+ buf2.WriteString("</pre>")
+ return &Snippet{fset.Position(id.Pos()).Line, buf2.String()}
+}
+
+func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec {
+ for _, spec := range list {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ if s.Name == id {
+ return s
+ }
+ case *ast.ValueSpec:
+ for _, n := range s.Names {
+ if n == id {
+ return s
+ }
+ }
+ case *ast.TypeSpec:
+ if s.Name == id {
+ return s
+ }
+ }
+ }
+ return nil
+}
+
+func genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet {
+ s := findSpec(d.Specs, id)
+ if s == nil {
+ return nil // declaration doesn't contain id - exit gracefully
+ }
+
+ // only use the spec containing the id for the snippet
+ dd := &ast.GenDecl{d.Doc, d.Pos(), d.Tok, d.Lparen, []ast.Spec{s}, d.Rparen}
+
+ return newSnippet(fset, dd, id)
+}
+
+func funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet {
+ if d.Name != id {
+ return nil // declaration doesn't contain id - exit gracefully
+ }
+
+ // only use the function signature for the snippet
+ dd := &ast.FuncDecl{d.Doc, d.Recv, d.Name, d.Type, nil}
+
+ return newSnippet(fset, dd, id)
+}
+
+// NewSnippet creates a text snippet from a declaration decl containing an
+// identifier id. Parts of the declaration not containing the identifier
+// may be removed for a more compact snippet.
+//
+func NewSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) (s *Snippet) {
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ s = genSnippet(fset, d, id)
+ case *ast.FuncDecl:
+ s = funcSnippet(fset, d, id)
+ }
+
+ // handle failure gracefully
+ if s == nil {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, `<span class="alert">could not generate a snippet for <span class="highlight">%s</span></span>`, id.Name)
+ s = &Snippet{fset.Position(id.Pos()).Line, buf.String()}
+ }
+ return
+}
diff --git a/src/cmd/godoc/spec.go b/src/cmd/godoc/spec.go
new file mode 100644
index 000000000..3f69add86
--- /dev/null
+++ b/src/cmd/godoc/spec.go
@@ -0,0 +1,198 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the mechanism to "linkify" html source
+// text containing EBNF sections (as found in go_spec.html).
+// The result is the input source text with the EBNF sections
+// modified such that identifiers are linked to the respective
+// definitions.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "io"
+)
+
+type ebnfParser struct {
+ out io.Writer // parser output
+ src []byte // parser source
+ file *token.File // for position information
+ scanner scanner.Scanner
+ prev int // offset of previous token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+}
+
+func (p *ebnfParser) flush() {
+ offs := p.file.Offset(p.pos)
+ p.out.Write(p.src[p.prev:offs])
+ p.prev = offs
+}
+
+func (p *ebnfParser) next() {
+ if p.pos.IsValid() {
+ p.flush()
+ }
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+ if p.tok.IsKeyword() {
+ // TODO Should keyword mapping always happen outside scanner?
+ // Or should there be a flag to scanner to enable keyword mapping?
+ p.tok = token.IDENT
+ }
+}
+
+func (p *ebnfParser) Error(pos token.Position, msg string) {
+ fmt.Fprintf(p.out, `<span class="alert">error: %s</span>`, msg)
+}
+
+func (p *ebnfParser) errorExpected(pos token.Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // the error happened at the current position;
+ // make the error message more specific
+ msg += ", found '" + p.tok.String() + "'"
+ if p.tok.IsLiteral() {
+ msg += " " + p.lit
+ }
+ }
+ p.Error(p.file.Position(pos), msg)
+}
+
+func (p *ebnfParser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress in any case
+ return pos
+}
+
+func (p *ebnfParser) parseIdentifier(def bool) {
+ name := p.lit
+ p.expect(token.IDENT)
+ if def {
+ fmt.Fprintf(p.out, `<a id="%s">%s</a>`, name, name)
+ } else {
+ fmt.Fprintf(p.out, `<a href="#%s" class="noline">%s</a>`, name, name)
+ }
+ p.prev += len(name) // skip identifier when calling flush
+}
+
+func (p *ebnfParser) parseTerm() bool {
+ switch p.tok {
+ case token.IDENT:
+ p.parseIdentifier(false)
+
+ case token.STRING:
+ p.next()
+ const ellipsis = "…" // U+2026, the horizontal ellipsis character
+ if p.tok == token.ILLEGAL && p.lit == ellipsis {
+ p.next()
+ p.expect(token.STRING)
+ }
+
+ case token.LPAREN:
+ p.next()
+ p.parseExpression()
+ p.expect(token.RPAREN)
+
+ case token.LBRACK:
+ p.next()
+ p.parseExpression()
+ p.expect(token.RBRACK)
+
+ case token.LBRACE:
+ p.next()
+ p.parseExpression()
+ p.expect(token.RBRACE)
+
+ default:
+ return false
+ }
+
+ return true
+}
+
+func (p *ebnfParser) parseSequence() {
+ if !p.parseTerm() {
+ p.errorExpected(p.pos, "term")
+ }
+ for p.parseTerm() {
+ }
+}
+
+func (p *ebnfParser) parseExpression() {
+ for {
+ p.parseSequence()
+ if p.tok != token.OR {
+ break
+ }
+ p.next()
+ }
+}
+
+func (p *ebnfParser) parseProduction() {
+ p.parseIdentifier(true)
+ p.expect(token.ASSIGN)
+ if p.tok != token.PERIOD {
+ p.parseExpression()
+ }
+ p.expect(token.PERIOD)
+}
+
+func (p *ebnfParser) parse(fset *token.FileSet, out io.Writer, src []byte) {
+ // initialize ebnfParser
+ p.out = out
+ p.src = src
+ p.file = fset.AddFile("", fset.Base(), len(src))
+ p.scanner.Init(p.file, src, p, scanner.AllowIllegalChars)
+ p.next() // initializes pos, tok, lit
+
+ // process source
+ for p.tok != token.EOF {
+ p.parseProduction()
+ }
+ p.flush()
+}
+
+// Markers around EBNF sections
+var (
+ openTag = []byte(`<pre class="ebnf">`)
+ closeTag = []byte(`</pre>`)
+)
+
+func linkify(out io.Writer, src []byte) {
+ fset := token.NewFileSet()
+ for len(src) > 0 {
+ n := len(src)
+
+ // i: beginning of EBNF text (or end of source)
+ i := bytes.Index(src, openTag)
+ if i < 0 {
+ i = n - len(openTag)
+ }
+ i += len(openTag)
+
+ // j: end of EBNF text (or end of source)
+ j := bytes.Index(src[i:n], closeTag) // close marker
+ if j < 0 {
+ j = n - i
+ }
+ j += i
+
+ // write text before EBNF
+ out.Write(src[0:i])
+ // parse and write EBNF
+ var p ebnfParser
+ p.parse(fset, out, src[i:j])
+
+ // advance
+ src = src[j:n]
+ }
+}
diff --git a/src/cmd/godoc/utils.go b/src/cmd/godoc/utils.go
new file mode 100644
index 000000000..11e46aee5
--- /dev/null
+++ b/src/cmd/godoc/utils.go
@@ -0,0 +1,168 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains support functionality for godoc.
+
+package main
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "utf8"
+)
+
+// An RWValue wraps a value and permits mutually exclusive
+// access to it and records the time the value was last set.
+//
+type RWValue struct {
+ mutex sync.RWMutex
+ value interface{}
+ timestamp int64 // time of last set(), in seconds since epoch
+}
+
+func (v *RWValue) set(value interface{}) {
+ v.mutex.Lock()
+ v.value = value
+ v.timestamp = time.Seconds()
+ v.mutex.Unlock()
+}
+
+func (v *RWValue) get() (interface{}, int64) {
+ v.mutex.RLock()
+ defer v.mutex.RUnlock()
+ return v.value, v.timestamp
+}
+
+// TODO(gri) For now, using os.Getwd() is ok here since the functionality
+// based on this code is not invoked for the appengine version,
+// but this is fragile. Determine what the right thing to do is,
+// here (possibly have some Getwd-equivalent in FileSystem).
+var cwd, _ = os.Getwd() // ignore errors
+
+// canonicalizePaths takes a list of (directory/file) paths and returns
+// the list of corresponding absolute paths in sorted (increasing) order.
+// Relative paths are assumed to be relative to the current directory,
+// empty and duplicate paths as well as paths for which filter(path) is
+// false are discarded. filter may be nil in which case it is not used.
+//
+func canonicalizePaths(list []string, filter func(path string) bool) []string {
+ i := 0
+ for _, path := range list {
+ path = strings.TrimSpace(path)
+ if len(path) == 0 {
+ continue // ignore empty paths (don't assume ".")
+ }
+ // len(path) > 0: normalize path
+ if filepath.IsAbs(path) {
+ path = filepath.Clean(path)
+ } else {
+ path = filepath.Join(cwd, path)
+ }
+ // we have a non-empty absolute path
+ if filter != nil && !filter(path) {
+ continue
+ }
+ // keep the path
+ list[i] = path
+ i++
+ }
+ list = list[0:i]
+
+ // sort the list and remove duplicate entries
+ sort.Strings(list)
+ i = 0
+ prev := ""
+ for _, path := range list {
+ if path != prev {
+ list[i] = path
+ i++
+ prev = path
+ }
+ }
+
+ return list[0:i]
+}
+
+// writeFileAtomically writes data to a temporary file and then
+// atomically renames that file to the file named by filename.
+//
+func writeFileAtomically(filename string, data []byte) os.Error {
+ // TODO(gri) this won't work on appengine
+ f, err := ioutil.TempFile(filepath.Split(filename))
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ f.Close()
+ if err != nil {
+ return err
+ }
+ if n < len(data) {
+ return io.ErrShortWrite
+ }
+ return os.Rename(f.Name(), filename)
+}
+
+// isText returns true if a significant prefix of s looks like correct UTF-8;
+// that is, if it is likely that s is human-readable text.
+//
+func isText(s []byte) bool {
+ const max = 1024 // at least utf8.UTFMax
+ if len(s) > max {
+ s = s[0:max]
+ }
+ for i, c := range string(s) {
+ if i+utf8.UTFMax > len(s) {
+ // last char may be incomplete - ignore
+ break
+ }
+ if c == 0xFFFD || c < ' ' && c != '\n' && c != '\t' && c != '\f' {
+ // decoding error or control character - not a text file
+ return false
+ }
+ }
+ return true
+}
+
+// TODO(gri): Should have a mapping from extension to handler, eventually.
+
+// textExt[x] is true if the extension x indicates a text file, and false otherwise.
+var textExt = map[string]bool{
+ ".css": false, // must be served raw
+ ".js": false, // must be served raw
+}
+
+// isTextFile returns true if the file has a known extension indicating
+// a text file, or if a significant chunk of the specified file looks like
+// correct UTF-8; that is, if it is likely that the file contains human-
+// readable text.
+//
+func isTextFile(filename string) bool {
+ // if the extension is known, use it for decision making
+ if isText, found := textExt[filepath.Ext(filename)]; found {
+ return isText
+ }
+
+ // the extension is not known; read an initial chunk
+ // of the file and check if it looks like text
+ f, err := fs.Open(filename)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+
+ var buf [1024]byte
+ n, err := f.Read(buf[0:])
+ if err != nil {
+ return false
+ }
+
+ return isText(buf[0:n])
+}
diff --git a/src/cmd/godoc/zip.go b/src/cmd/godoc/zip.go
new file mode 100644
index 000000000..27dc142f5
--- /dev/null
+++ b/src/cmd/godoc/zip.go
@@ -0,0 +1,207 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file provides an implementation of the FileSystem
+// interface based on the contents of a .zip file.
+//
+// Assumptions:
+//
+// - The file paths stored in the zip file must use a slash ('/') as path
+// separator; and they must be relative (i.e., they must not start with
+// a '/' - this is usually the case if the file was created w/o special
+// options).
+// - The zip file system treats the file paths found in the zip internally
+// like absolute paths w/o a leading '/'; i.e., the paths are considered
+// relative to the root of the file system.
+// - All path arguments to file system methods must be absolute paths.
+
+package main
+
+import (
+ "archive/zip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "strings"
+)
+
+// zipFI is the zip-file based implementation of FileInfo
+type zipFI struct {
+ name string // directory-local name
+ file *zip.File // nil for a directory
+}
+
+func (fi zipFI) Name() string {
+ return fi.name
+}
+
+func (fi zipFI) Size() int64 {
+ if f := fi.file; f != nil {
+ return int64(f.UncompressedSize)
+ }
+ return 0 // directory
+}
+
+func (fi zipFI) Mtime_ns() int64 {
+ if f := fi.file; f != nil {
+ return f.Mtime_ns()
+ }
+ return 0 // directory has no modified time entry
+}
+
+func (fi zipFI) IsDirectory() bool {
+ return fi.file == nil
+}
+
+func (fi zipFI) IsRegular() bool {
+ return fi.file != nil
+}
+
+// zipFS is the zip-file based implementation of FileSystem
+type zipFS struct {
+ *zip.ReadCloser
+ list zipList
+}
+
+func (fs *zipFS) Close() os.Error {
+ fs.list = nil
+ return fs.ReadCloser.Close()
+}
+
+func zipPath(name string) string {
+ name = path.Clean(name)
+ if !path.IsAbs(name) {
+ panic(fmt.Sprintf("stat: not an absolute path: %s", name))
+ }
+ return name[1:] // strip leading '/'
+}
+
+func (fs *zipFS) stat(abspath string) (int, zipFI, os.Error) {
+ i, exact := fs.list.lookup(abspath)
+ if i < 0 {
+ // abspath has leading '/' stripped - print it explicitly
+ return -1, zipFI{}, fmt.Errorf("file not found: /%s", abspath)
+ }
+ _, name := path.Split(abspath)
+ var file *zip.File
+ if exact {
+ file = fs.list[i] // exact match found - must be a file
+ }
+ return i, zipFI{name, file}, nil
+}
+
+func (fs *zipFS) Open(abspath string) (io.ReadCloser, os.Error) {
+ _, fi, err := fs.stat(zipPath(abspath))
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDirectory() {
+ return nil, fmt.Errorf("Open: %s is a directory", abspath)
+ }
+ return fi.file.Open()
+}
+
+func (fs *zipFS) Lstat(abspath string) (FileInfo, os.Error) {
+ _, fi, err := fs.stat(zipPath(abspath))
+ return fi, err
+}
+
+func (fs *zipFS) Stat(abspath string) (FileInfo, os.Error) {
+ _, fi, err := fs.stat(zipPath(abspath))
+ return fi, err
+}
+
+func (fs *zipFS) ReadDir(abspath string) ([]FileInfo, os.Error) {
+ path := zipPath(abspath)
+ i, fi, err := fs.stat(path)
+ if err != nil {
+ return nil, err
+ }
+ if !fi.IsDirectory() {
+ return nil, fmt.Errorf("ReadDir: %s is not a directory", abspath)
+ }
+
+ var list []FileInfo
+ dirname := path + "/"
+ prevname := ""
+ for _, e := range fs.list[i:] {
+ if !strings.HasPrefix(e.Name, dirname) {
+ break // not in the same directory anymore
+ }
+ name := e.Name[len(dirname):] // local name
+ file := e
+ if i := strings.IndexRune(name, '/'); i >= 0 {
+ // We infer directories from files in subdirectories.
+ // If we have x/y, return a directory entry for x.
+ name = name[0:i] // keep local directory name only
+ file = nil
+ }
+ // If we have x/y and x/z, don't return two directory entries for x.
+ // TODO(gri): It should be possible to do this more efficiently
+ // by determining the (fs.list) range of local directory entries
+ // (via two binary searches).
+ if name != prevname {
+ list = append(list, zipFI{name, file})
+ prevname = name
+ }
+ }
+
+ return list, nil
+}
+
+func (fs *zipFS) ReadFile(abspath string) ([]byte, os.Error) {
+ rc, err := fs.Open(abspath)
+ if err != nil {
+ return nil, err
+ }
+ return ioutil.ReadAll(rc)
+}
+
+func NewZipFS(rc *zip.ReadCloser) FileSystem {
+ list := make(zipList, len(rc.File))
+ copy(list, rc.File) // sort a copy of rc.File
+ sort.Sort(list)
+ return &zipFS{rc, list}
+}
+
+type zipList []*zip.File
+
+// zipList implements sort.Interface
+func (z zipList) Len() int { return len(z) }
+func (z zipList) Less(i, j int) bool { return z[i].Name < z[j].Name }
+func (z zipList) Swap(i, j int) { z[i], z[j] = z[j], z[i] }
+
+// lookup returns the smallest index of an entry with an exact match
+// for name, or an inexact match starting with name/. If there is no
+// such entry, the result is -1, false.
+func (z zipList) lookup(name string) (index int, exact bool) {
+ // look for exact match first (name comes before name/ in z)
+ i := sort.Search(len(z), func(i int) bool {
+ return name <= z[i].Name
+ })
+ if i < 0 {
+ return -1, false
+ }
+ if z[i].Name == name {
+ return i, true
+ }
+
+ // look for inexact match (must be in z[i:], if present)
+ z = z[i:]
+ name += "/"
+ j := sort.Search(len(z), func(i int) bool {
+ return name <= z[i].Name
+ })
+ if j < 0 {
+ return -1, false
+ }
+ if strings.HasPrefix(z[j].Name, name) {
+ return i + j, false
+ }
+
+ return -1, false
+}