summaryrefslogtreecommitdiff
path: root/src/cmd/godoc
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/godoc')
-rw-r--r--src/cmd/godoc/README.godoc-app80
-rw-r--r--src/cmd/godoc/appconfig.go30
-rw-r--r--src/cmd/godoc/appinit.go43
-rw-r--r--src/cmd/godoc/codewalk.go39
-rw-r--r--src/cmd/godoc/dirtrees.go14
-rw-r--r--src/cmd/godoc/doc.go17
-rw-r--r--src/cmd/godoc/filesystem.go65
-rw-r--r--src/cmd/godoc/format.go6
-rw-r--r--src/cmd/godoc/godoc.go504
-rw-r--r--src/cmd/godoc/httpzip.go84
-rw-r--r--src/cmd/godoc/index.go44
-rw-r--r--src/cmd/godoc/main.go41
-rw-r--r--src/cmd/godoc/mapping.go29
-rw-r--r--src/cmd/godoc/parser.go6
-rw-r--r--src/cmd/godoc/setup-godoc-app.bash121
-rw-r--r--src/cmd/godoc/snippet.go2
-rw-r--r--src/cmd/godoc/spec.go135
-rw-r--r--src/cmd/godoc/throttle.go36
-rw-r--r--src/cmd/godoc/utils.go10
-rw-r--r--src/cmd/godoc/zip.go37
20 files changed, 864 insertions, 479 deletions
diff --git a/src/cmd/godoc/README.godoc-app b/src/cmd/godoc/README.godoc-app
new file mode 100644
index 000000000..88cfee41e
--- /dev/null
+++ b/src/cmd/godoc/README.godoc-app
@@ -0,0 +1,80 @@
+Copyright 2011 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style
+license that can be found in the LICENSE file.
+
+godoc on appengine
+------------------
+
+Prerequisites
+-------------
+
+* Go appengine SDK 1.5.5 - 2011-10-11
+ http://code.google.com/appengine/downloads.html#Google_App_Engine_SDK_for_Go
+
+* Go sources at tip under $GOROOT
+
+
+Directory structure
+-------------------
+
+* Let $APPDIR be the directory containing the app engine files.
+ (e.g., $APPDIR=$HOME/godoc-app)
+
+* $APPDIR contains the following entries (this may change depending on
+ app-engine release and version of godoc):
+
+ alt/
+ encoding/binary/
+ go/*
+ index/suffixarray/
+ app.yaml
+ godoc.zip
+ godoc/
+ index.split.*
+
+* The app.yaml file is set up per app engine documentation.
+ For instance:
+
+ application: godoc-app
+ version: 1-5-5
+ runtime: go
+ api_version: 3
+
+ handlers:
+ - url: /.*
+ script: _go_app
+
+* The godoc/ directory contains a copy of the files under $GOROOT/src/cmd/godoc
+ with modifications:
+
+ - doc.go is excluded (it belongs to pseudo-package ÒdocumentationÓ)
+ - main.go is excluded (appinit.go is taking its place)
+
+ Additional manual modifications are required to refer to the alt/ packages
+ where the app-engine library is not up-to-date with the godoc version.
+
+* The alt/ directory contains up-to-date copies of Go packages that a tip-based
+ godoc is dependent on but which do not yet exist in the current app-engine SDK.
+ At the time of this writing (10/14/2011) this is the entire go directory tree
+ (for the missing FileSet serialization code in go/token) as well as the
+ index/suffixarray package (for the missing suffix array serialization code).
+ The latest (alt/)index/suffixarray package internally requires the latest
+ version of encoding/binary, which is why it also needs to be present under
+ alt/.
+
+
+Configuring and running godoc
+-----------------------------
+
+To configure godoc, run
+
+ bash setup-godoc-app.bash
+
+to create the godoc.zip, index.split.*, and godoc/appconfig.go files
+based on $GOROOT and $APPDIR. See the script for details on usage.
+
+To run godoc locally, using the app-engine emulator, run
+
+ <path to google_appengine>/dev_appserver.py $APPDIR
+
+godoc should come up at http://localhost:8080 .
diff --git a/src/cmd/godoc/appconfig.go b/src/cmd/godoc/appconfig.go
deleted file mode 100644
index 052a9ebc8..000000000
--- a/src/cmd/godoc/appconfig.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains configuration information used by
-// godoc when running on app engine. Adjust as needed
-// (typically when the .zip file changes).
-
-package main
-
-const (
- // zipFilename is the name of the .zip file
- // containing the file system served by godoc.
- zipFilename = "godoc.zip"
-
- // zipGoroot is the path of the goroot directory
- // in the .zip file.
- zipGoroot = "/home/user/go"
-
- // If indexFilenames != "", the search index is
- // initialized with the index stored in these
- // files (otherwise it will be built at run-time,
- // eventually). indexFilenames is a glob pattern;
- // the specified files are concatenated in sorted
- // order (by filename).
- // app-engine limit: file sizes must be <= 10MB;
- // use "split -b8m indexfile index.split." to get
- // smaller files.
- indexFilenames = "index.split.*"
-)
diff --git a/src/cmd/godoc/appinit.go b/src/cmd/godoc/appinit.go
index 8c93425f3..3113498c8 100644
--- a/src/cmd/godoc/appinit.go
+++ b/src/cmd/godoc/appinit.go
@@ -2,52 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// To run godoc under app engine, substitute main.go with
-// this file (appinit.go), provide a .zip file containing
-// the file system to serve, the index file (or files)
-// containing the pre-computed search index and adjust
-// the configuration parameters in appconfig.go accordingly.
-//
-// The current app engine SDK may be based on an older Go
-// release version. To correct for version skew, copy newer
-// packages into the alt directory (e.g. alt/strings) and
-// adjust the imports in the godoc source files (e.g. from
-// `import "strings"` to `import "alt/strings"`). Both old
-// and new packages may be used simultaneously as long as
-// there is no package global state that needs to be shared.
-//
-// The directory structure should look as follows:
-//
-// godoc-app // directory containing the app engine app
-// alt // alternative packages directory to
-// // correct for version skew
-// strings // never version of the strings package
-// ... //
-// app.yaml // app engine control file
-// godoc.zip // .zip file containing the file system to serve
-// godoc // contains godoc sources
-// appinit.go // this file instead of godoc/main.go
-// appconfig.go // godoc for app engine configuration
-// ... //
-// index.split.* // index file(s) containing the search index to serve
-//
-// To run app the engine emulator locally:
-//
-// dev_appserver.py -a 0 godoc-app
-//
-// The godoc home page is served at: <hostname>:8080 and localhost:8080.
+// +build ignore
package main
+// This file replaces main.go when running godoc under app-engine.
+// See README.godoc-app for details.
+
import (
"archive/zip"
- "http"
"log"
- "os"
+ "net/http"
"path"
)
-func serveError(w http.ResponseWriter, r *http.Request, relpath string, err os.Error) {
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
contents := applyTemplate(errorHTML, "errorHTML", err) // err may contain an absolute path!
w.WriteHeader(http.StatusNotFound)
servePage(w, "File "+relpath, "", "", contents)
diff --git a/src/cmd/godoc/codewalk.go b/src/cmd/godoc/codewalk.go
index fb5f27596..b3bc79abe 100644
--- a/src/cmd/godoc/codewalk.go
+++ b/src/cmd/godoc/codewalk.go
@@ -13,18 +13,19 @@
package main
import (
+ "encoding/xml"
+ "errors"
"fmt"
- "http"
"io"
"log"
+ "net/http"
"os"
- "exp/regexp"
+ "regexp"
"sort"
"strconv"
"strings"
- "template"
- "utf8"
- "xml"
+ "text/template"
+ "unicode/utf8"
)
// Handler for /doc/codewalk/ and below.
@@ -40,7 +41,7 @@ func codewalk(w http.ResponseWriter, r *http.Request) {
// If directory exists, serve list of code walks.
dir, err := fs.Lstat(abspath)
- if err == nil && dir.IsDirectory() {
+ if err == nil && dir.IsDir() {
codewalkDir(w, r, relpath, abspath)
return
}
@@ -84,7 +85,7 @@ type Codestep struct {
XML string `xml:"innerxml"`
// Derived from Src; not in XML.
- Err os.Error
+ Err error
File string
Lo int
LoByte int
@@ -107,16 +108,16 @@ func (st *Codestep) String() string {
}
// loadCodewalk reads a codewalk from the named XML file.
-func loadCodewalk(filename string) (*Codewalk, os.Error) {
+func loadCodewalk(filename string) (*Codewalk, error) {
f, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
cw := new(Codewalk)
- p := xml.NewParser(f)
- p.Entity = xml.HTMLEntity
- err = p.Unmarshal(cw, nil)
+ d := xml.NewDecoder(f)
+ d.Entity = xml.HTMLEntity
+ err = d.Decode(cw)
if err != nil {
return nil, &os.PathError{"parsing", filename, err}
}
@@ -185,7 +186,7 @@ func codewalkDir(w http.ResponseWriter, r *http.Request, relpath, abspath string
var v []interface{}
for _, fi := range dir {
name := fi.Name()
- if fi.IsDirectory() {
+ if fi.IsDir() {
v = append(v, &elem{name + "/", ""})
} else if strings.HasSuffix(name, ".xml") {
cw, err := loadCodewalk(abspath + "/" + name)
@@ -252,7 +253,7 @@ func codewalkFileprint(w http.ResponseWriter, r *http.Request, f string) {
// It returns the lo and hi byte offset of the matched region within data.
// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
// for details on the syntax.
-func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err os.Error) {
+func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
var (
dir byte
prevc byte
@@ -264,7 +265,7 @@ func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err os.Er
c := addr[0]
switch c {
default:
- err = os.NewError("invalid address syntax near " + string(c))
+ err = errors.New("invalid address syntax near " + string(c))
case ',':
if len(addr) == 1 {
hi = len(data)
@@ -348,7 +349,7 @@ func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err os.Er
// (or characters) after hi. Applying -n (or -#n) means to back up n lines
// (or characters) before lo.
// The return value is the new lo, hi.
-func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, os.Error) {
+func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, error) {
switch dir {
case 0:
lo = 0
@@ -424,13 +425,13 @@ func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int,
}
}
- return 0, 0, os.NewError("address out of range")
+ return 0, 0, errors.New("address out of range")
}
// addrRegexp searches for pattern in the given direction starting at lo, hi.
// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
// Backward searches are unimplemented.
-func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, os.Error) {
+func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, error) {
re, err := regexp.Compile(pattern)
if err != nil {
return 0, 0, err
@@ -438,7 +439,7 @@ func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, os
if dir == '-' {
// Could implement reverse search using binary search
// through file, but that seems like overkill.
- return 0, 0, os.NewError("reverse search not implemented")
+ return 0, 0, errors.New("reverse search not implemented")
}
m := re.FindIndex(data[hi:])
if len(m) > 0 {
@@ -449,7 +450,7 @@ func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, os
m = re.FindIndex(data)
}
if len(m) == 0 {
- return 0, 0, os.NewError("no match for " + pattern)
+ return 0, 0, errors.New("no match for " + pattern)
}
return m[0], m[1], nil
}
diff --git a/src/cmd/godoc/dirtrees.go b/src/cmd/godoc/dirtrees.go
index 7595ef96f..c61f791dc 100644
--- a/src/cmd/godoc/dirtrees.go
+++ b/src/cmd/godoc/dirtrees.go
@@ -8,10 +8,10 @@ package main
import (
"bytes"
- "go/doc"
"go/parser"
"go/token"
"log"
+ "os"
"path/filepath"
"strings"
"unicode"
@@ -25,21 +25,21 @@ type Directory struct {
Dirs []*Directory // subdirectories
}
-func isGoFile(fi FileInfo) bool {
+func isGoFile(fi os.FileInfo) bool {
name := fi.Name()
- return fi.IsRegular() &&
+ return !fi.IsDir() &&
len(name) > 0 && name[0] != '.' && // ignore .files
filepath.Ext(name) == ".go"
}
-func isPkgFile(fi FileInfo) bool {
+func isPkgFile(fi os.FileInfo) bool {
return isGoFile(fi) &&
!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
}
-func isPkgDir(fi FileInfo) bool {
+func isPkgDir(fi os.FileInfo) bool {
name := fi.Name()
- return fi.IsDirectory() && len(name) > 0 &&
+ return fi.IsDir() && len(name) > 0 &&
name[0] != '_' && name[0] != '.' // ignore _files and .files
}
@@ -135,7 +135,7 @@ func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth i
i = 3 // none of the above
}
if 0 <= i && i < len(synopses) && synopses[i] == "" {
- synopses[i] = firstSentence(doc.CommentText(file.Doc))
+ synopses[i] = firstSentence(file.Doc.Text())
}
}
}
diff --git a/src/cmd/godoc/doc.go b/src/cmd/godoc/doc.go
index 3f0b8e458..acea2b5d0 100644
--- a/src/cmd/godoc/doc.go
+++ b/src/cmd/godoc/doc.go
@@ -80,6 +80,10 @@ The flags are:
repository holding the source files.
-sync_minutes=0
sync interval in minutes; sync is disabled if <= 0
+ -templates=""
+ directory containing alternate template files; if set,
+ the directory may provide alternative template files
+ for the files in $GOROOT/lib/godoc
-filter=""
filter file containing permitted package directory paths
-filter_minutes=0
@@ -124,6 +128,18 @@ via regular expressions). The maximum number of full text search results shown
can be set with the -maxresults flag; if set to 0, no full text results are
shown, and only an identifier index but no full text search index is created.
+The presentation mode of web pages served by godoc can be controlled with the
+"m" URL parameter; it accepts a comma-separated list of flag names as value:
+
+ all show documentation for all (not just exported) declarations
+ src show the original source code rather then the extracted documentation
+ text present the page in textual (command-line) form rather than HTML
+ flat present flat (not indented) directory listings using full paths
+
+For instance, http://golang.org/pkg/math/big/?m=all,text shows the documentation
+for all (not just the exported) declarations of package big, in textual form (as
+it would appear when using godoc from the command line: "godoc -src math/big .*").
+
By default, godoc serves files from the file system of the underlying OS.
Instead, a .zip file may be provided via the -zip flag, which contains
the file system to serve. The file paths stored in the .zip file must use
@@ -137,7 +153,6 @@ one may run godoc as follows:
godoc -http=:6060 -zip=go.zip -goroot=$HOME/go
-
See "Godoc: documenting Go code" for how to write good comments for godoc:
http://blog.golang.org/2011/03/godoc-documenting-go-code.html
*/
diff --git a/src/cmd/godoc/filesystem.go b/src/cmd/godoc/filesystem.go
index 011977af9..4e48c9e68 100644
--- a/src/cmd/godoc/filesystem.go
+++ b/src/cmd/godoc/filesystem.go
@@ -15,26 +15,17 @@ import (
"os"
)
-// The FileInfo interface provides access to file information.
-type FileInfo interface {
- Name() string
- Size() int64
- Mtime_ns() int64
- IsRegular() bool
- IsDirectory() bool
-}
-
// The FileSystem interface specifies the methods godoc is using
// to access the file system for which it serves documentation.
type FileSystem interface {
- Open(path string) (io.ReadCloser, os.Error)
- Lstat(path string) (FileInfo, os.Error)
- Stat(path string) (FileInfo, os.Error)
- ReadDir(path string) ([]FileInfo, os.Error)
+ Open(path string) (io.ReadCloser, error)
+ Lstat(path string) (os.FileInfo, error)
+ Stat(path string) (os.FileInfo, error)
+ ReadDir(path string) ([]os.FileInfo, error)
}
// ReadFile reads the file named by path from fs and returns the contents.
-func ReadFile(fs FileSystem, path string) ([]byte, os.Error) {
+func ReadFile(fs FileSystem, path string) ([]byte, error) {
rc, err := fs.Open(path)
if err != nil {
return nil, err
@@ -48,30 +39,10 @@ func ReadFile(fs FileSystem, path string) ([]byte, os.Error) {
var OS FileSystem = osFS{}
-// osFI is the OS-specific implementation of FileInfo.
-type osFI struct {
- *os.FileInfo
-}
-
-func (fi osFI) Name() string {
- return fi.FileInfo.Name
-}
-
-func (fi osFI) Size() int64 {
- if fi.IsDirectory() {
- return 0
- }
- return fi.FileInfo.Size
-}
-
-func (fi osFI) Mtime_ns() int64 {
- return fi.FileInfo.Mtime_ns
-}
-
// osFS is the OS-specific implementation of FileSystem
type osFS struct{}
-func (osFS) Open(path string) (io.ReadCloser, os.Error) {
+func (osFS) Open(path string) (io.ReadCloser, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -80,30 +51,20 @@ func (osFS) Open(path string) (io.ReadCloser, os.Error) {
if err != nil {
return nil, err
}
- if fi.IsDirectory() {
+ if fi.IsDir() {
return nil, fmt.Errorf("Open: %s is a directory", path)
}
return f, nil
}
-func (osFS) Lstat(path string) (FileInfo, os.Error) {
- fi, err := os.Lstat(path)
- return osFI{fi}, err
+func (osFS) Lstat(path string) (os.FileInfo, error) {
+ return os.Lstat(path)
}
-func (osFS) Stat(path string) (FileInfo, os.Error) {
- fi, err := os.Stat(path)
- return osFI{fi}, err
+func (osFS) Stat(path string) (os.FileInfo, error) {
+ return os.Stat(path)
}
-func (osFS) ReadDir(path string) ([]FileInfo, os.Error) {
- l0, err := ioutil.ReadDir(path) // l0 is sorted
- if err != nil {
- return nil, err
- }
- l1 := make([]FileInfo, len(l0))
- for i, e := range l0 {
- l1[i] = osFI{e}
- }
- return l1, nil
+func (osFS) ReadDir(path string) ([]os.FileInfo, error) {
+ return ioutil.ReadDir(path) // is sorted
}
diff --git a/src/cmd/godoc/format.go b/src/cmd/godoc/format.go
index 91b746034..3b1b9a822 100644
--- a/src/cmd/godoc/format.go
+++ b/src/cmd/godoc/format.go
@@ -15,9 +15,9 @@ import (
"go/scanner"
"go/token"
"io"
- "exp/regexp"
+ "regexp"
"strconv"
- "template"
+ "text/template"
)
// ----------------------------------------------------------------------------
@@ -231,7 +231,7 @@ func commentSelection(src []byte) Selection {
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile("", fset.Base(), len(src))
- s.Init(file, src, nil, scanner.ScanComments+scanner.InsertSemis)
+ s.Init(file, src, nil, scanner.ScanComments)
return func() (seg []int) {
for {
pos, tok, lit := s.Scan()
diff --git a/src/cmd/godoc/godoc.go b/src/cmd/godoc/godoc.go
index 3bf721bcc..86983fbe1 100644
--- a/src/cmd/godoc/godoc.go
+++ b/src/cmd/godoc/godoc.go
@@ -6,6 +6,7 @@ package main
import (
"bytes"
+ "encoding/json"
"flag"
"fmt"
"go/ast"
@@ -13,18 +14,21 @@ import (
"go/doc"
"go/printer"
"go/token"
- "http"
"io"
"log"
+ "net/http"
+ "net/url"
"os"
"path"
"path/filepath"
- "exp/regexp"
+ "regexp"
"runtime"
"sort"
"strings"
- "template"
+ "text/template"
"time"
+ "unicode"
+ "unicode/utf8"
)
// ----------------------------------------------------------------------------
@@ -34,9 +38,9 @@ type delayTime struct {
RWValue
}
-func (dt *delayTime) backoff(max int) {
+func (dt *delayTime) backoff(max time.Duration) {
dt.mutex.Lock()
- v := dt.value.(int) * 2
+ v := dt.value.(time.Duration) * 2
if v > max {
v = max
}
@@ -70,12 +74,13 @@ var (
indexThrottle = flag.Float64("index_throttle", 0.75, "index throttle value; 0.0 = no time allocated, 1.0 = full throttle")
// file system mapping
- fs FileSystem // the underlying file system for godoc
- fsHttp http.FileSystem // the underlying file system for http
- fsMap Mapping // user-defined mapping
- fsTree RWValue // *Directory tree of packages, updated with each sync
- pathFilter RWValue // filter used when building fsMap directory trees
- fsModified RWValue // timestamp of last call to invalidateIndex
+ fs FileSystem // the underlying file system for godoc
+ fsHttp http.FileSystem // the underlying file system for http
+ fsMap Mapping // user-defined mapping
+ fsTree RWValue // *Directory tree of packages, updated with each sync
+ pathFilter RWValue // filter used when building fsMap directory trees
+ fsModified RWValue // timestamp of last call to invalidateIndex
+ docMetadata RWValue // mapping from paths to *Metadata
// http handlers
fileServer http.Handler // default file server
@@ -104,6 +109,7 @@ func registerPublicHandlers(mux *http.ServeMux) {
mux.HandleFunc("/doc/codewalk/", codewalk)
mux.HandleFunc("/search", search)
mux.Handle("/robots.txt", fileServer)
+ mux.HandleFunc("/opensearch.xml", serveSearchDesc)
mux.HandleFunc("/", serveFile)
}
@@ -148,7 +154,7 @@ func getPathFilter() func(string) bool {
// readDirList reads a file containing a newline-separated list
// of directory paths and returns the list of paths.
-func readDirList(filename string) ([]string, os.Error) {
+func readDirList(filename string) ([]string, error) {
contents, err := ReadFile(fs, filename)
if err != nil {
return nil, err
@@ -206,7 +212,7 @@ func updateFilterFile() {
// update filter file
if err := writeFileAtomically(*filter, buf.Bytes()); err != nil {
log.Printf("writeFileAtomically(%s): %s", *filter, err)
- filterDelay.backoff(24 * 60) // back off exponentially, but try at least once a day
+ filterDelay.backoff(24 * time.Hour) // back off exponentially, but try at least once a day
} else {
filterDelay.set(*filterMin) // revert to regular filter update schedule
}
@@ -229,7 +235,7 @@ func initDirTrees() {
// start filter update goroutine, if enabled.
if *filter != "" && *filterMin > 0 {
- filterDelay.set(*filterMin) // initial filter update delay
+ filterDelay.set(time.Duration(*filterMin) * time.Minute) // initial filter update delay
go func() {
for {
if *verbose {
@@ -237,10 +243,11 @@ func initDirTrees() {
}
updateFilterFile()
delay, _ := filterDelay.get()
+ dt := delay.(time.Duration)
if *verbose {
- log.Printf("next filter update in %dmin", delay.(int))
+ log.Printf("next filter update in %s", dt)
}
- time.Sleep(int64(delay.(int)) * 60e9)
+ time.Sleep(dt)
}
}()
}
@@ -299,7 +306,7 @@ type tconv struct {
indent int // valid if state == indenting
}
-func (p *tconv) writeIndent() (err os.Error) {
+func (p *tconv) writeIndent() (err error) {
i := p.indent
for i >= len(spaces) {
i -= len(spaces)
@@ -314,7 +321,7 @@ func (p *tconv) writeIndent() (err os.Error) {
return
}
-func (p *tconv) Write(data []byte) (n int, err os.Error) {
+func (p *tconv) Write(data []byte) (n int, err error) {
if len(data) == 0 {
return
}
@@ -371,7 +378,10 @@ func writeNode(w io.Writer, fset *token.FileSet, x interface{}) {
// with an another printer mode (which is more efficiently
// implemented in the printer than here with another layer)
mode := printer.TabIndent | printer.UseSpaces
- (&printer.Config{mode, *tabwidth}).Fprint(&tconv{output: w}, fset, x)
+ err := (&printer.Config{mode, *tabwidth}).Fprint(&tconv{output: w}, fset, x)
+ if err != nil {
+ log.Print(err)
+ }
}
func filenameFunc(path string) string {
@@ -379,17 +389,17 @@ func filenameFunc(path string) string {
return localname
}
-func fileInfoNameFunc(fi FileInfo) string {
+func fileInfoNameFunc(fi os.FileInfo) string {
name := fi.Name()
- if fi.IsDirectory() {
+ if fi.IsDir() {
name += "/"
}
return name
}
-func fileInfoTimeFunc(fi FileInfo) string {
- if t := fi.Mtime_ns(); t != 0 {
- return time.SecondsToLocalTime(t / 1e9).String()
+func fileInfoTimeFunc(fi os.FileInfo) string {
+ if t := fi.ModTime(); t.Unix() != 0 {
+ return t.Local().String()
}
return "" // don't return epoch if time is obviously not set
}
@@ -454,7 +464,65 @@ func comment_htmlFunc(comment string) string {
var buf bytes.Buffer
// TODO(gri) Provide list of words (e.g. function parameters)
// to be emphasized by ToHTML.
- doc.ToHTML(&buf, []byte(comment), nil) // does html-escaping
+ doc.ToHTML(&buf, comment, nil) // does html-escaping
+ return buf.String()
+}
+
+// punchCardWidth is the number of columns of fixed-width
+// characters to assume when wrapping text. Very few people
+// use terminals or cards smaller than 80 characters, so 80 it is.
+// We do not try to sniff the environment or the tty to adapt to
+// the situation; instead, by using a constant we make sure that
+// godoc always produces the same output regardless of context,
+// a consistency that is lost otherwise. For example, if we sniffed
+// the environment or tty, then http://golang.org/pkg/math/?m=text
+// would depend on the width of the terminal where godoc started,
+// which is clearly bogus. More generally, the Unix tools that behave
+// differently when writing to a tty than when writing to a file have
+// a history of causing confusion (compare `ls` and `ls | cat`), and we
+// want to avoid that mistake here.
+const punchCardWidth = 80
+
+func comment_textFunc(comment, indent, preIndent string) string {
+ var buf bytes.Buffer
+ doc.ToText(&buf, comment, indent, preIndent, punchCardWidth-2*len(indent))
+ return buf.String()
+}
+
+func startsWithUppercase(s string) bool {
+ r, _ := utf8.DecodeRuneInString(s)
+ return unicode.IsUpper(r)
+}
+
+func example_htmlFunc(funcName string, examples []*doc.Example, fset *token.FileSet) string {
+ var buf bytes.Buffer
+ for _, eg := range examples {
+ name := eg.Name
+
+ // strip lowercase braz in Foo_braz or Foo_Bar_braz from name
+ // while keeping uppercase Braz in Foo_Braz
+ if i := strings.LastIndex(name, "_"); i != -1 {
+ if i < len(name)-1 && !startsWithUppercase(name[i+1:]) {
+ name = name[:i]
+ }
+ }
+
+ if name != funcName {
+ continue
+ }
+
+ // print code, unindent and remove surrounding braces
+ code := node_htmlFunc(eg.Body, fset)
+ code = strings.Replace(code, "\n ", "\n", -1)
+ code = code[2 : len(code)-2]
+
+ err := exampleHTML.Execute(&buf, struct {
+ Code, Output string
+ }{code, eg.Output})
+ if err != nil {
+ log.Print(err)
+ }
+ }
return buf.String()
}
@@ -526,11 +594,15 @@ var fmap = template.FuncMap{
"node": nodeFunc,
"node_html": node_htmlFunc,
"comment_html": comment_htmlFunc,
+ "comment_text": comment_textFunc,
// support for URL attributes
"pkgLink": pkgLinkFunc,
"srcLink": relativeURL,
"posLink_url": posLink_urlFunc,
+
+ // formatting of Examples
+ "example_html": example_htmlFunc,
}
func readTemplate(name string) *template.Template {
@@ -563,11 +635,13 @@ var (
codewalkdirHTML,
dirlistHTML,
errorHTML,
+ exampleHTML,
godocHTML,
packageHTML,
packageText,
searchHTML,
- searchText *template.Template
+ searchText,
+ searchDescXML *template.Template
)
func readTemplates() {
@@ -576,11 +650,13 @@ func readTemplates() {
codewalkdirHTML = readTemplate("codewalkdir.html")
dirlistHTML = readTemplate("dirlist.html")
errorHTML = readTemplate("error.html")
+ exampleHTML = readTemplate("example.html")
godocHTML = readTemplate("godoc.html")
packageHTML = readTemplate("package.html")
packageText = readTemplate("package.txt")
searchHTML = readTemplate("search.html")
searchText = readTemplate("search.txt")
+ searchDescXML = readTemplate("opensearch.xml")
}
// ----------------------------------------------------------------------------
@@ -621,19 +697,11 @@ func serveText(w http.ResponseWriter, text []byte) {
// Files
var (
- titleRx = regexp.MustCompile(`<!-- title ([^\-]*)-->`)
- subtitleRx = regexp.MustCompile(`<!-- subtitle ([^\-]*)-->`)
- firstCommentRx = regexp.MustCompile(`<!--([^\-]*)-->`)
+ doctype = []byte("<!DOCTYPE ")
+ jsonStart = []byte("<!--{")
+ jsonEnd = []byte("}-->")
)
-func extractString(src []byte, rx *regexp.Regexp) (s string) {
- m := rx.FindSubmatch(src)
- if m != nil {
- s = strings.TrimSpace(string(m[1]))
- }
- return
-}
-
func serveHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
// get HTML body contents
src, err := ReadFile(fs, abspath)
@@ -645,27 +713,25 @@ func serveHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath strin
// if it begins with "<!DOCTYPE " assume it is standalone
// html that doesn't need the template wrapping.
- if bytes.HasPrefix(src, []byte("<!DOCTYPE ")) {
+ if bytes.HasPrefix(src, doctype) {
w.Write(src)
return
}
+ // if it begins with a JSON blob, read in the metadata.
+ meta, src, err := extractMetadata(src)
+ if err != nil {
+ log.Printf("decoding metadata %s: %v", relpath, err)
+ }
+
// if it's the language spec, add tags to EBNF productions
if strings.HasSuffix(abspath, "go_spec.html") {
var buf bytes.Buffer
- linkify(&buf, src)
+ Linkify(&buf, src)
src = buf.Bytes()
}
- // get title and subtitle, if any
- title := extractString(src, titleRx)
- if title == "" {
- // no title found; try first comment for backward-compatibility
- title = extractString(src, firstCommentRx)
- }
- subtitle := extractString(src, subtitleRx)
-
- servePage(w, title, subtitle, "", src)
+ servePage(w, meta.Title, meta.Subtitle, "", src)
}
func applyTemplate(t *template.Template, name string, data interface{}) []byte {
@@ -717,21 +783,22 @@ func serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath str
}
func serveFile(w http.ResponseWriter, r *http.Request) {
- relpath := r.URL.Path[1:] // serveFile URL paths start with '/'
- abspath := absolutePath(relpath, *goroot)
+ relpath := r.URL.Path
- // pick off special cases and hand the rest to the standard file server
- switch r.URL.Path {
- case "/":
- serveHTMLDoc(w, r, filepath.Join(*goroot, "doc", "root.html"), "doc/root.html")
- return
-
- case "/doc/root.html":
- // hide landing page from its real name
- http.Redirect(w, r, "/", http.StatusMovedPermanently)
- return
+ // Check to see if we need to redirect or serve another file.
+ if m := metadataFor(relpath); m != nil {
+ if m.Path != relpath {
+ // Redirect to canonical path.
+ http.Redirect(w, r, m.Path, http.StatusMovedPermanently)
+ return
+ }
+ // Serve from the actual filesystem path.
+ relpath = m.filePath
}
+ relpath = relpath[1:] // strip leading slash
+ abspath := absolutePath(relpath, *goroot)
+
switch path.Ext(relpath) {
case ".html":
if strings.HasSuffix(relpath, "/index.html") {
@@ -755,7 +822,7 @@ func serveFile(w http.ResponseWriter, r *http.Request) {
return
}
- if dir != nil && dir.IsDirectory() {
+ if dir != nil && dir.IsDir() {
if redirect(w, r) {
return
}
@@ -775,6 +842,16 @@ func serveFile(w http.ResponseWriter, r *http.Request) {
fileServer.ServeHTTP(w, r)
}
+func serveSearchDesc(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/opensearchdescription+xml")
+ data := map[string]interface{}{
+ "BaseURL": fmt.Sprintf("http://%s", r.Host),
+ }
+ if err := searchDescXML.Execute(w, &data); err != nil {
+ log.Printf("searchDescXML.Execute: %s", err)
+ }
+}
+
// ----------------------------------------------------------------------------
// Packages
@@ -784,25 +861,69 @@ const fakePkgName = "documentation"
// Fake relative package path for built-ins. Documentation for all globals
// (not just exported ones) will be shown for packages in this directory.
-const builtinPkgPath = "builtin/"
+const builtinPkgPath = "builtin"
type PageInfoMode uint
const (
- exportsOnly PageInfoMode = 1 << iota // only keep exported stuff
- genDoc // generate documentation
+ noFiltering PageInfoMode = 1 << iota // do not filter exports
+ showSource // show source code, do not extract documentation
+ noHtml // show result in textual form, do not generate HTML
+ flatDir // show directory in a flat (non-indented) manner
)
+// modeNames defines names for each PageInfoMode flag.
+var modeNames = map[string]PageInfoMode{
+ "all": noFiltering,
+ "src": showSource,
+ "text": noHtml,
+ "flat": flatDir,
+}
+
+// getPageInfoMode computes the PageInfoMode flags by analyzing the request
+// URL form value "m". It is value is a comma-separated list of mode names
+// as defined by modeNames (e.g.: m=src,text).
+func getPageInfoMode(r *http.Request) PageInfoMode {
+ var mode PageInfoMode
+ for _, k := range strings.Split(r.FormValue("m"), ",") {
+ if m, found := modeNames[strings.TrimSpace(k)]; found {
+ mode |= m
+ }
+ }
+ return adjustPageInfoMode(r, mode)
+}
+
+// Specialized versions of godoc may adjust the PageInfoMode by overriding
+// this variable.
+var adjustPageInfoMode = func(_ *http.Request, mode PageInfoMode) PageInfoMode {
+ return mode
+}
+
+// remoteSearchURL returns the search URL for a given query as needed by
+// remoteSearch. If html is set, an html result is requested; otherwise
+// the result is in textual form.
+// Adjust this function as necessary if modeNames or FormValue parameters
+// change.
+func remoteSearchURL(query string, html bool) string {
+ s := "/search?m=text&q="
+ if html {
+ s = "/search?q="
+ }
+ return s + url.QueryEscape(query)
+}
+
type PageInfo struct {
- Dirname string // directory containing the package
- PList []string // list of package names found
- FSet *token.FileSet // corresponding file set
- PAst *ast.File // nil if no single AST with package exports
- PDoc *doc.PackageDoc // nil if no single package documentation
- Dirs *DirList // nil if no directory information
- DirTime int64 // directory time stamp in seconds since epoch
- IsPkg bool // false if this is not documenting a real package
- Err os.Error // directory read error or nil
+ Dirname string // directory containing the package
+ PList []string // list of package names found
+ FSet *token.FileSet // corresponding file set
+ PAst *ast.File // nil if no single AST with package exports
+ PDoc *doc.Package // nil if no single package documentation
+ Examples []*doc.Example // nil if no example code
+ Dirs *DirList // nil if no directory information
+ DirTime time.Time // directory time stamp
+ DirFlat bool // if set, show directory in a flat (non-indented) manner
+ IsPkg bool // false if this is not documenting a real package
+ Err error // I/O error or nil
}
func (info *PageInfo) IsEmpty() bool {
@@ -816,26 +937,12 @@ type httpHandler struct {
}
// fsReadDir implements ReadDir for the go/build package.
-func fsReadDir(dir string) ([]*os.FileInfo, os.Error) {
- fi, err := fs.ReadDir(dir)
- if err != nil {
- return nil, err
- }
-
- // Convert []FileInfo to []*os.FileInfo.
- osfi := make([]*os.FileInfo, len(fi))
- for i, f := range fi {
- mode := uint32(S_IFREG)
- if f.IsDirectory() {
- mode = S_IFDIR
- }
- osfi[i] = &os.FileInfo{Name: f.Name(), Size: f.Size(), Mtime_ns: f.Mtime_ns(), Mode: mode}
- }
- return osfi, nil
+func fsReadDir(dir string) ([]os.FileInfo, error) {
+ return fs.ReadDir(dir)
}
// fsReadFile implements ReadFile for the go/build package.
-func fsReadFile(dir, name string) (path string, data []byte, err os.Error) {
+func fsReadFile(dir, name string) (path string, data []byte, err error) {
path = filepath.Join(dir, name)
data, err = ReadFile(fs, path)
return
@@ -881,7 +988,7 @@ func (h *httpHandler) getPageInfo(abspath, relpath, pkgname string, mode PageInf
}
// filter function to select the desired .go files
- filter := func(d FileInfo) bool {
+ filter := func(d os.FileInfo) bool {
// Only Go files.
if !isPkgFile(d) {
return false
@@ -958,23 +1065,44 @@ func (h *httpHandler) getPageInfo(abspath, relpath, pkgname string, mode PageInf
plist = plist[0:i]
}
+ // get examples from *_test.go files
+ var examples []*doc.Example
+ filter = func(d os.FileInfo) bool {
+ return isGoFile(d) && strings.HasSuffix(d.Name(), "_test.go")
+ }
+ if testpkgs, err := parseDir(fset, abspath, filter); err != nil {
+ log.Println("parsing test files:", err)
+ } else {
+ for _, testpkg := range testpkgs {
+ examples = append(examples, doc.Examples(testpkg)...)
+ }
+ }
+
// compute package documentation
var past *ast.File
- var pdoc *doc.PackageDoc
+ var pdoc *doc.Package
if pkg != nil {
- if mode&exportsOnly != 0 {
- ast.PackageExports(pkg)
- }
- if mode&genDoc != 0 {
- pdoc = doc.NewPackageDoc(pkg, path.Clean(relpath)) // no trailing '/' in importpath
+ if mode&showSource == 0 {
+ // show extracted documentation
+ var m doc.Mode
+ if mode&noFiltering != 0 {
+ m = doc.AllDecls
+ }
+ pdoc = doc.New(pkg, path.Clean(relpath), m) // no trailing '/' in importpath
} else {
+ // show source code
+ // TODO(gri) Consider eliminating export filtering in this mode,
+ // or perhaps eliminating the mode altogether.
+ if mode&noFiltering == 0 {
+ ast.PackageExports(pkg)
+ }
past = ast.MergePackageFiles(pkg, ast.FilterUnassociatedComments)
}
}
// get directory information
var dir *Directory
- var timestamp int64
+ var timestamp time.Time
if tree, ts := fsTree.get(); tree != nil && tree.(*Directory) != nil {
// directory tree is present; lookup respective directory
// (may still fail if the file system was updated and the
@@ -1011,10 +1139,22 @@ func (h *httpHandler) getPageInfo(abspath, relpath, pkgname string, mode PageInf
// note: cannot use path filter here because in general
// it doesn't contain the fsTree path
dir = newDirectory(abspath, nil, 1)
- timestamp = time.Seconds()
+ timestamp = time.Now()
+ }
+
+ return PageInfo{
+ Dirname: abspath,
+ PList: plist,
+ FSet: fset,
+ PAst: past,
+ PDoc: pdoc,
+ Examples: examples,
+ Dirs: dir.listing(true),
+ DirTime: timestamp,
+ DirFlat: mode&flatDir != 0,
+ IsPkg: h.isPkg,
+ Err: nil,
}
-
- return PageInfo{abspath, plist, fset, past, pdoc, dir.listing(true), timestamp, h.isPkg, nil}
}
func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@@ -1022,14 +1162,11 @@ func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- relpath := r.URL.Path[len(h.pattern):]
+ relpath := path.Clean(r.URL.Path[len(h.pattern):])
abspath := absolutePath(relpath, h.fsRoot)
- var mode PageInfoMode
- if relpath != builtinPkgPath {
- mode = exportsOnly
- }
- if r.FormValue("m") != "src" {
- mode |= genDoc
+ mode := getPageInfoMode(r)
+ if relpath == builtinPkgPath {
+ mode = noFiltering
}
info := h.getPageInfo(abspath, relpath, r.FormValue("p"), mode)
if info.Err != nil {
@@ -1038,7 +1175,7 @@ func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- if r.FormValue("f") == "text" {
+ if mode&noHtml != 0 {
contents := applyTemplate(packageText, "packageText", info)
serveText(w, contents)
return
@@ -1051,18 +1188,18 @@ func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case info.PDoc != nil:
switch {
case info.IsPkg:
- title = "Package " + info.PDoc.PackageName
- case info.PDoc.PackageName == fakePkgName:
+ title = "Package " + info.PDoc.Name
+ case info.PDoc.Name == fakePkgName:
// assume that the directory name is the command name
- _, pkgname := path.Split(path.Clean(relpath))
+ _, pkgname := path.Split(relpath)
title = "Command " + pkgname
default:
- title = "Command " + info.PDoc.PackageName
+ title = "Command " + info.PDoc.Name
}
default:
title = "Directory " + relativeURL(info.Dirname)
if *showTimestamps {
- subtitle = "Last update: " + time.SecondsToLocalTime(info.DirTime).String()
+ subtitle = "Last update: " + info.DirTime.String()
}
}
@@ -1098,12 +1235,12 @@ func lookup(query string) (result SearchResult) {
index := index.(*Index)
// identifier search
- var err os.Error
+ var err error
result.Pak, result.Hit, result.Alt, err = index.Lookup(query)
if err != nil && *maxResults <= 0 {
// ignore the error if full text search is enabled
// since the query may be a valid regular expression
- result.Alert = "Error in query string: " + err.String()
+ result.Alert = "Error in query string: " + err.Error()
return
}
@@ -1111,7 +1248,7 @@ func lookup(query string) (result SearchResult) {
if *maxResults > 0 && query != "" {
rx, err := regexp.Compile(query)
if err != nil {
- result.Alert = "Error in query regular expression: " + err.String()
+ result.Alert = "Error in query regular expression: " + err.Error()
return
}
// If we get maxResults+1 results we know that there are more than
@@ -1128,7 +1265,7 @@ func lookup(query string) (result SearchResult) {
// is the result accurate?
if *indexEnabled {
- if _, ts := fsModified.get(); timestamp < ts {
+ if _, ts := fsModified.get(); timestamp.Before(ts) {
// The index is older than the latest file system change under godoc's observation.
result.Alert = "Indexing in progress: result may be inaccurate"
}
@@ -1143,7 +1280,7 @@ func search(w http.ResponseWriter, r *http.Request) {
query := strings.TrimSpace(r.FormValue("q"))
result := lookup(query)
- if r.FormValue("f") == "text" {
+ if getPageInfoMode(r)&noHtml != 0 {
contents := applyTemplate(searchText, "searchText", result)
serveText(w, contents)
return
@@ -1161,6 +1298,120 @@ func search(w http.ResponseWriter, r *http.Request) {
}
// ----------------------------------------------------------------------------
+// Documentation Metadata
+
+type Metadata struct {
+ Title string
+ Subtitle string
+ Path string // canonical path for this page
+ filePath string // filesystem path relative to goroot
+}
+
+// extractMetadata extracts the Metadata from a byte slice.
+// It returns the Metadata value and the remaining data.
+// If no metadata is present the original byte slice is returned.
+//
+func extractMetadata(b []byte) (meta Metadata, tail []byte, err error) {
+ tail = b
+ if !bytes.HasPrefix(b, jsonStart) {
+ return
+ }
+ end := bytes.Index(b, jsonEnd)
+ if end < 0 {
+ return
+ }
+ b = b[len(jsonStart)-1 : end+1] // drop leading <!-- and include trailing }
+ if err = json.Unmarshal(b, &meta); err != nil {
+ return
+ }
+ tail = tail[end+len(jsonEnd):]
+ return
+}
+
+// updateMetadata scans $GOROOT/doc for HTML files, reads their metadata,
+// and updates the docMetadata map.
+//
+func updateMetadata() {
+ metadata := make(map[string]*Metadata)
+ var scan func(string) // scan is recursive
+ scan = func(dir string) {
+ fis, err := fs.ReadDir(dir)
+ if err != nil {
+ log.Println("updateMetadata:", err)
+ return
+ }
+ for _, fi := range fis {
+ name := filepath.Join(dir, fi.Name())
+ if fi.IsDir() {
+ scan(name) // recurse
+ continue
+ }
+ if !strings.HasSuffix(name, ".html") {
+ continue
+ }
+ // Extract metadata from the file.
+ b, err := ReadFile(fs, name)
+ if err != nil {
+ log.Printf("updateMetadata %s: %v", name, err)
+ continue
+ }
+ meta, _, err := extractMetadata(b)
+ if err != nil {
+ log.Printf("updateMetadata: %s: %v", name, err)
+ continue
+ }
+ // Store relative filesystem path in Metadata.
+ meta.filePath = filepath.Join("/", name[len(*goroot):])
+ if meta.Path == "" {
+ // If no Path, canonical path is actual path.
+ meta.Path = meta.filePath
+ }
+ // Store under both paths.
+ metadata[meta.Path] = &meta
+ metadata[meta.filePath] = &meta
+ }
+ }
+ scan(filepath.Join(*goroot, "doc"))
+ docMetadata.set(metadata)
+}
+
+// Send a value on this channel to trigger a metadata refresh.
+// It is buffered so that if a signal is not lost if sent during a refresh.
+//
+var refreshMetadataSignal = make(chan bool, 1)
+
+// refreshMetadata sends a signal to update docMetadata. If a refresh is in
+// progress the metadata will be refreshed again afterward.
+//
+func refreshMetadata() {
+ select {
+ case refreshMetadataSignal <- true:
+ default:
+ }
+}
+
+// refreshMetadataLoop runs forever, updating docMetadata when the underlying
+// file system changes. It should be launched in a goroutine by main.
+//
+func refreshMetadataLoop() {
+ for {
+ <-refreshMetadataSignal
+ updateMetadata()
+ time.Sleep(10 * time.Second) // at most once every 10 seconds
+ }
+}
+
+// metadataFor returns the *Metadata for a given relative path or nil if none
+// exists.
+//
+func metadataFor(relpath string) *Metadata {
+ if m, _ := docMetadata.get(); m != nil {
+ return m.(map[string]*Metadata)[relpath]
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
// Indexer
// invalidateIndex should be called whenever any of the file systems
@@ -1168,6 +1419,7 @@ func search(w http.ResponseWriter, r *http.Request) {
//
func invalidateIndex() {
fsModified.set(nil)
+ refreshMetadata()
}
// indexUpToDate() returns true if the search index is not older
@@ -1176,7 +1428,7 @@ func invalidateIndex() {
func indexUpToDate() bool {
_, fsTime := fsModified.get()
_, siTime := searchIndex.get()
- return fsTime <= siTime
+ return !fsTime.After(siTime)
}
// feedDirnames feeds the directory names of all directories
@@ -1206,7 +1458,7 @@ func fsDirnames() <-chan string {
return c
}
-func readIndex(filenames string) os.Error {
+func readIndex(filenames string) error {
matches, err := filepath.Glob(filenames)
if err != nil {
return err
@@ -1233,12 +1485,12 @@ func updateIndex() {
if *verbose {
log.Printf("updating index...")
}
- start := time.Nanoseconds()
+ start := time.Now()
index := NewIndex(fsDirnames(), *maxResults > 0, *indexThrottle)
- stop := time.Nanoseconds()
+ stop := time.Now()
searchIndex.set(index)
if *verbose {
- secs := float64((stop-start)/1e6) / 1e3
+ secs := stop.Sub(start).Seconds()
stats := index.Stats()
log.Printf("index updated (%gs, %d bytes of source, %d files, %d lines, %d unique words, %d spots)",
secs, stats.Bytes, stats.Files, stats.Lines, stats.Words, stats.Spots)
@@ -1262,10 +1514,10 @@ func indexer() {
// index possibly out of date - make a new one
updateIndex()
}
- var delay int64 = 60 * 1e9 // by default, try every 60s
+ delay := 60 * time.Second // by default, try every 60s
if *testDir != "" {
// in test mode, try once a second for fast startup
- delay = 1 * 1e9
+ delay = 1 * time.Second
}
time.Sleep(delay)
}
diff --git a/src/cmd/godoc/httpzip.go b/src/cmd/godoc/httpzip.go
index cb8322ee4..9f3da0874 100644
--- a/src/cmd/godoc/httpzip.go
+++ b/src/cmd/godoc/httpzip.go
@@ -26,21 +26,27 @@ package main
import (
"archive/zip"
"fmt"
- "http"
"io"
+ "net/http"
"os"
"path"
"sort"
"strings"
+ "time"
)
-// We cannot import syscall on app engine.
-// TODO(gri) Once we have a truly abstract FileInfo implementation
-// this won't be needed anymore.
-const (
- S_IFDIR = 0x4000 // == syscall.S_IFDIR
- S_IFREG = 0x8000 // == syscall.S_IFREG
-)
+type fileInfo struct {
+ name string
+ mode os.FileMode
+ size int64
+ mtime time.Time
+}
+
+func (fi *fileInfo) Name() string { return fi.name }
+func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
+func (fi *fileInfo) Size() int64 { return fi.size }
+func (fi *fileInfo) ModTime() time.Time { return fi.mtime }
+func (fi *fileInfo) IsDir() bool { return fi.mode.IsDir() }
// httpZipFile is the zip-file based implementation of http.File
type httpZipFile struct {
@@ -50,19 +56,19 @@ type httpZipFile struct {
list zipList
}
-func (f *httpZipFile) Close() os.Error {
- if f.info.IsRegular() {
+func (f *httpZipFile) Close() error {
+ if !f.info.IsDir() {
return f.ReadCloser.Close()
}
f.list = nil
return nil
}
-func (f *httpZipFile) Stat() (*os.FileInfo, os.Error) {
- return &f.info, nil
+func (f *httpZipFile) Stat() (os.FileInfo, error) {
+ return f.info, nil
}
-func (f *httpZipFile) Readdir(count int) ([]os.FileInfo, os.Error) {
+func (f *httpZipFile) Readdir(count int) ([]os.FileInfo, error) {
var list []os.FileInfo
dirname := f.path + "/"
prevname := ""
@@ -76,29 +82,30 @@ func (f *httpZipFile) Readdir(count int) ([]os.FileInfo, os.Error) {
break // not in the same directory anymore
}
name := e.Name[len(dirname):] // local name
- var mode uint32
- var size, mtime_ns int64
+ var mode os.FileMode
+ var size int64
+ var mtime time.Time
if i := strings.IndexRune(name, '/'); i >= 0 {
// We infer directories from files in subdirectories.
// If we have x/y, return a directory entry for x.
name = name[0:i] // keep local directory name only
- mode = S_IFDIR
- // no size or mtime_ns for directories
+ mode = os.ModeDir
+ // no size or mtime for directories
} else {
- mode = S_IFREG
+ mode = 0
size = int64(e.UncompressedSize)
- mtime_ns = e.Mtime_ns()
+ mtime = e.ModTime()
}
// If we have x/y and x/z, don't return two directory entries for x.
// TODO(gri): It should be possible to do this more efficiently
// by determining the (fs.list) range of local directory entries
// (via two binary searches).
if name != prevname {
- list = append(list, os.FileInfo{
- Name: name,
- Mode: mode,
- Size: size,
- Mtime_ns: mtime_ns,
+ list = append(list, &fileInfo{
+ name,
+ mode,
+ size,
+ mtime,
})
prevname = name
count--
@@ -106,14 +113,14 @@ func (f *httpZipFile) Readdir(count int) ([]os.FileInfo, os.Error) {
}
if count >= 0 && len(list) == 0 {
- return nil, os.EOF
+ return nil, io.EOF
}
return list, nil
}
-func (f *httpZipFile) Seek(offset int64, whence int) (int64, os.Error) {
- return 0, fmt.Errorf("Seek not implemented for zip file entry: %s", f.info.Name)
+func (f *httpZipFile) Seek(offset int64, whence int) (int64, error) {
+ return 0, fmt.Errorf("Seek not implemented for zip file entry: %s", f.info.Name())
}
// httpZipFS is the zip-file based implementation of http.FileSystem
@@ -123,7 +130,7 @@ type httpZipFS struct {
root string
}
-func (fs *httpZipFS) Open(name string) (http.File, os.Error) {
+func (fs *httpZipFS) Open(name string) (http.File, error) {
// fs.root does not start with '/'.
path := path.Join(fs.root, name) // path is clean
index, exact := fs.list.lookup(path)
@@ -141,11 +148,11 @@ func (fs *httpZipFS) Open(name string) (http.File, os.Error) {
}
return &httpZipFile{
path,
- os.FileInfo{
- Name: name,
- Mode: S_IFREG,
- Size: int64(f.UncompressedSize),
- Mtime_ns: f.Mtime_ns(),
+ &fileInfo{
+ name,
+ 0,
+ int64(f.UncompressedSize),
+ f.ModTime(),
},
rc,
nil,
@@ -155,17 +162,18 @@ func (fs *httpZipFS) Open(name string) (http.File, os.Error) {
// not an exact match - must be a directory
return &httpZipFile{
path,
- os.FileInfo{
- Name: name,
- Mode: S_IFDIR,
- // no size or mtime_ns for directories
+ &fileInfo{
+ name,
+ os.ModeDir,
+ 0, // no size for directory
+ time.Time{}, // no mtime for directory
},
nil,
fs.list[index:],
}, nil
}
-func (fs *httpZipFS) Close() os.Error {
+func (fs *httpZipFS) Close() error {
fs.list = nil
return fs.ReadCloser.Close()
}
diff --git a/src/cmd/godoc/index.go b/src/cmd/godoc/index.go
index 2543f9216..3d2c3ff96 100644
--- a/src/cmd/godoc/index.go
+++ b/src/cmd/godoc/index.go
@@ -38,19 +38,22 @@
package main
import (
+ "bufio"
"bytes"
+ "encoding/gob"
+ "errors"
"go/ast"
"go/parser"
- "go/token"
"go/scanner"
- "gob"
+ "go/token"
"index/suffixarray"
"io"
"os"
"path/filepath"
- "exp/regexp"
+ "regexp"
"sort"
"strings"
+ "time"
)
// ----------------------------------------------------------------------------
@@ -700,8 +703,8 @@ func isWhitelisted(filename string) bool {
return whitelisted[key]
}
-func (x *Indexer) visitFile(dirname string, f FileInfo, fulltextIndex bool) {
- if !f.IsRegular() {
+func (x *Indexer) visitFile(dirname string, f os.FileInfo, fulltextIndex bool) {
+ if f.IsDir() {
return
}
@@ -765,7 +768,7 @@ func canonical(w string) string { return strings.ToLower(w) }
//
func NewIndex(dirnames <-chan string, fulltextIndex bool, throttle float64) *Index {
var x Indexer
- th := NewThrottle(throttle, 0.1e9) // run at least 0.1s at a time
+ th := NewThrottle(throttle, 100*time.Millisecond) // run at least 0.1s at a time
// initialize Indexer
// (use some reasonably sized maps to start)
@@ -780,7 +783,7 @@ func NewIndex(dirnames <-chan string, fulltextIndex bool, throttle float64) *Ind
continue // ignore this directory
}
for _, f := range list {
- if !f.IsDirectory() {
+ if !f.IsDir() {
x.visitFile(dirname, f, fulltextIndex)
}
th.Throttle()
@@ -840,8 +843,16 @@ type fileIndex struct {
Fulltext bool
}
+func (x *fileIndex) Write(w io.Writer) error {
+ return gob.NewEncoder(w).Encode(x)
+}
+
+func (x *fileIndex) Read(r io.Reader) error {
+ return gob.NewDecoder(r).Decode(x)
+}
+
// Write writes the index x to w.
-func (x *Index) Write(w io.Writer) os.Error {
+func (x *Index) Write(w io.Writer) error {
fulltext := false
if x.suffixes != nil {
fulltext = true
@@ -852,7 +863,7 @@ func (x *Index) Write(w io.Writer) os.Error {
x.snippets,
fulltext,
}
- if err := gob.NewEncoder(w).Encode(fx); err != nil {
+ if err := fx.Write(w); err != nil {
return err
}
if fulltext {
@@ -867,9 +878,14 @@ func (x *Index) Write(w io.Writer) os.Error {
}
// Read reads the index from r into x; x must not be nil.
-func (x *Index) Read(r io.Reader) os.Error {
+// If r does not also implement io.ByteReader, it will be wrapped in a bufio.Reader.
+func (x *Index) Read(r io.Reader) error {
+ // We use the ability to read bytes as a plausible surrogate for buffering.
+ if _, ok := r.(io.ByteReader); !ok {
+ r = bufio.NewReader(r)
+ }
var fx fileIndex
- if err := gob.NewDecoder(r).Decode(&fx); err != nil {
+ if err := fx.Read(r); err != nil {
return err
}
x.words = fx.Words
@@ -920,13 +936,13 @@ func isIdentifier(s string) bool {
// identifier, Lookup returns a list of packages, a LookupResult, and a
// list of alternative spellings, if any. Any and all results may be nil.
// If the query syntax is wrong, an error is reported.
-func (x *Index) Lookup(query string) (paks HitList, match *LookupResult, alt *AltWords, err os.Error) {
+func (x *Index) Lookup(query string) (paks HitList, match *LookupResult, alt *AltWords, err error) {
ss := strings.Split(query, ".")
// check query syntax
for _, s := range ss {
if !isIdentifier(s) {
- err = os.NewError("all query parts must be identifiers")
+ err = errors.New("all query parts must be identifiers")
return
}
}
@@ -954,7 +970,7 @@ func (x *Index) Lookup(query string) (paks HitList, match *LookupResult, alt *Al
}
default:
- err = os.NewError("query is not a (qualified) identifier")
+ err = errors.New("query is not a (qualified) identifier")
}
return
diff --git a/src/cmd/godoc/main.go b/src/cmd/godoc/main.go
index 15d70c49b..f74b6f404 100644
--- a/src/cmd/godoc/main.go
+++ b/src/cmd/godoc/main.go
@@ -28,23 +28,23 @@ package main
import (
"archive/zip"
"bytes"
+ "errors"
_ "expvar" // to serve /debug/vars
"flag"
"fmt"
"go/ast"
"go/build"
- "http"
- _ "http/pprof" // to serve /debug/pprof/*
"io"
"log"
+ "net/http"
+ _ "net/http/pprof" // to serve /debug/pprof/*
"os"
"path"
"path/filepath"
- "exp/regexp"
+ "regexp"
"runtime"
"strings"
"time"
- "url"
)
const defaultAddr = ":6060" // default webserver address
@@ -74,7 +74,7 @@ var (
query = flag.Bool("q", false, "arguments are considered search queries")
)
-func serveError(w http.ResponseWriter, r *http.Request, relpath string, err os.Error) {
+func serveError(w http.ResponseWriter, r *http.Request, relpath string, err error) {
contents := applyTemplate(errorHTML, "errorHTML", err) // err may contain an absolute path!
w.WriteHeader(http.StatusNotFound)
servePage(w, "File "+relpath, "", "", contents)
@@ -141,10 +141,10 @@ func dosync(w http.ResponseWriter, r *http.Request) {
case 1:
// sync failed because no files changed;
// don't change the package tree
- syncDelay.set(*syncMin) // revert to regular sync schedule
+ syncDelay.set(time.Duration(*syncMin) * time.Minute) // revert to regular sync schedule
default:
// sync failed because of an error - back off exponentially, but try at least once a day
- syncDelay.backoff(24 * 60)
+ syncDelay.backoff(24 * time.Hour)
}
}
@@ -163,9 +163,7 @@ func loggingHandler(h http.Handler) http.Handler {
})
}
-func remoteSearch(query string) (res *http.Response, err os.Error) {
- search := "/search?f=text&q=" + url.QueryEscape(query)
-
+func remoteSearch(query string) (res *http.Response, err error) {
// list of addresses to try
var addrs []string
if *serverAddr != "" {
@@ -179,6 +177,7 @@ func remoteSearch(query string) (res *http.Response, err os.Error) {
}
// remote search
+ search := remoteSearchURL(query, *html)
for _, addr := range addrs {
url := "http://" + addr + search
res, err = http.Get(url)
@@ -188,7 +187,7 @@ func remoteSearch(query string) (res *http.Response, err os.Error) {
}
if err == nil && res.StatusCode != http.StatusOK {
- err = os.NewError(res.Status)
+ err = errors.New(res.Status)
}
return
@@ -329,14 +328,20 @@ func main() {
for {
dosync(nil, nil)
delay, _ := syncDelay.get()
+ dt := delay.(time.Duration)
if *verbose {
- log.Printf("next sync in %dmin", delay.(int))
+ log.Printf("next sync in %s", dt)
}
- time.Sleep(int64(delay.(int)) * 60e9)
+ time.Sleep(dt)
}
}()
}
+ // Immediately update metadata.
+ updateMetadata()
+ // Periodically refresh metadata.
+ go refreshMetadataLoop()
+
// Initialize search index.
if *indexEnabled {
go indexer()
@@ -387,13 +392,15 @@ func main() {
}
var mode PageInfoMode
+ if relpath == builtinPkgPath {
+ mode = noFiltering
+ }
if *srcMode {
// only filter exports if we don't have explicit command-line filter arguments
- if flag.NArg() == 1 {
- mode |= exportsOnly
+ if flag.NArg() > 1 {
+ mode |= noFiltering
}
- } else {
- mode = exportsOnly | genDoc
+ mode |= showSource
}
// TODO(gri): Provide a mechanism (flag?) to select a package
// if there are multiple packages in a directory.
diff --git a/src/cmd/godoc/mapping.go b/src/cmd/godoc/mapping.go
index 51f23ab98..89e531e2f 100644
--- a/src/cmd/godoc/mapping.go
+++ b/src/cmd/godoc/mapping.go
@@ -139,13 +139,18 @@ func (m *Mapping) Fprint(w io.Writer) {
}
}
+// splitFirst splits a path at the first path separator and returns
+// the path's head (the top-most directory specified by the path) and
+// its tail (the rest of the path). If there is no path separator,
+// splitFirst returns path as head, and the the empty string as tail.
+// Specifically, splitFirst("foo") == splitFirst("foo/").
+//
func splitFirst(path string) (head, tail string) {
- i := strings.Index(path, string(filepath.Separator))
- if i > 0 {
+ if i := strings.Index(path, string(filepath.Separator)); i > 0 {
// 0 < i < len(path)
return path[0:i], path[i+1:]
}
- return "", path
+ return path, ""
}
// ToAbsolute maps a slash-separated relative path to an absolute filesystem
@@ -156,20 +161,14 @@ func (m *Mapping) ToAbsolute(spath string) string {
fpath := filepath.FromSlash(spath)
prefix, tail := splitFirst(fpath)
for _, e := range m.list {
- switch {
- case e.prefix == prefix:
- // use tail
- case e.prefix == "":
- tail = fpath
- default:
- continue // no match
- }
- abspath := filepath.Join(e.path, tail)
- if _, err := fs.Stat(abspath); err == nil {
- return abspath
+ if e.prefix == prefix {
+ // found potential mapping
+ abspath := filepath.Join(e.path, tail)
+ if _, err := fs.Stat(abspath); err == nil {
+ return abspath
+ }
}
}
-
return "" // no match
}
diff --git a/src/cmd/godoc/parser.go b/src/cmd/godoc/parser.go
index a2920539f..da38c5265 100644
--- a/src/cmd/godoc/parser.go
+++ b/src/cmd/godoc/parser.go
@@ -17,7 +17,7 @@ import (
"path/filepath"
)
-func parseFile(fset *token.FileSet, filename string, mode uint) (*ast.File, os.Error) {
+func parseFile(fset *token.FileSet, filename string, mode parser.Mode) (*ast.File, error) {
src, err := ReadFile(fs, filename)
if err != nil {
return nil, err
@@ -25,7 +25,7 @@ func parseFile(fset *token.FileSet, filename string, mode uint) (*ast.File, os.E
return parser.ParseFile(fset, filename, src, mode)
}
-func parseFiles(fset *token.FileSet, filenames []string) (pkgs map[string]*ast.Package, first os.Error) {
+func parseFiles(fset *token.FileSet, filenames []string) (pkgs map[string]*ast.Package, first error) {
pkgs = make(map[string]*ast.Package)
for _, filename := range filenames {
file, err := parseFile(fset, filename, parser.ParseComments)
@@ -48,7 +48,7 @@ func parseFiles(fset *token.FileSet, filenames []string) (pkgs map[string]*ast.P
return
}
-func parseDir(fset *token.FileSet, path string, filter func(FileInfo) bool) (map[string]*ast.Package, os.Error) {
+func parseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool) (map[string]*ast.Package, error) {
list, err := fs.ReadDir(path)
if err != nil {
return nil, err
diff --git a/src/cmd/godoc/setup-godoc-app.bash b/src/cmd/godoc/setup-godoc-app.bash
new file mode 100644
index 000000000..755d965d5
--- /dev/null
+++ b/src/cmd/godoc/setup-godoc-app.bash
@@ -0,0 +1,121 @@
+#!/usr/bin/env bash
+
+# Copyright 2011 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script creates the .zip, index, and configuration files for running
+# godoc on app-engine.
+#
+# If an argument is provided it is assumed to be the app-engine godoc directory.
+# Without an argument, $APPDIR is used instead. If GOROOT is not set, the
+# current working directory is assumed to be $GOROOT. Various sanity checks
+# prevent accidents.
+#
+# The script creates a .zip file representing the $GOROOT file system
+# and computes the correspondig search index files. These files are then
+# copied to $APPDIR. A corresponding godoc configuration file is created
+# in $APPDIR/appconfig.go.
+
+ZIPFILE=godoc.zip
+INDEXFILE=godoc.index
+SPLITFILES=index.split.
+CONFIGFILE=godoc/appconfig.go
+
+error() {
+ echo "error: $1"
+ exit 2
+}
+
+getArgs() {
+ if [ -z $GOROOT ]; then
+ GOROOT=$(pwd)
+ echo "GOROOT not set, using cwd instead"
+ fi
+ if [ -z $APPDIR ]; then
+ if [ $# == 0 ]; then
+ error "APPDIR not set, and no argument provided"
+ fi
+ APPDIR=$1
+ echo "APPDIR not set, using argument instead"
+ fi
+
+ # safety checks
+ if [ ! -d $GOROOT ]; then
+ error "$GOROOT is not a directory"
+ fi
+ if [ ! -x $GOROOT/src/cmd/godoc/godoc ]; then
+ error "$GOROOT/src/cmd/godoc/godoc does not exist or is not executable"
+ fi
+ if [ ! -d $APPDIR ]; then
+ error "$APPDIR is not a directory"
+ fi
+ if [ ! -e $APPDIR/app.yaml ]; then
+ error "$APPDIR is not an app-engine directory; missing file app.yaml"
+ fi
+ if [ ! -d $APPDIR/godoc ]; then
+ error "$APPDIR is missing directory godoc"
+ fi
+
+ # reporting
+ echo "GOROOT = $GOROOT"
+ echo "APPDIR = $APPDIR"
+}
+
+cleanup() {
+ echo "*** cleanup $APPDIR"
+ rm $APPDIR/$ZIPFILE
+ rm $APPDIR/$INDEXFILE
+ rm $APPDIR/$SPLITFILES*
+ rm $APPDIR/$CONFIGFILE
+}
+
+makeZipfile() {
+ echo "*** make $APPDIR/$ZIPFILE"
+ zip -q -r $APPDIR/$ZIPFILE $GOROOT -i \*.go -i \*.html -i \*.css -i \*.js -i \*.txt -i \*.c -i \*.h -i \*.s -i \*.png -i \*.jpg -i \*.sh -i \*.ico
+}
+
+makeIndexfile() {
+ echo "*** make $APPDIR/$INDEXFILE"
+ OUT=/tmp/godoc.out
+ $GOROOT/src/cmd/godoc/godoc -write_index -index_files=$APPDIR/$INDEXFILE -zip=$APPDIR/$ZIPFILE 2> $OUT
+ if [ $? != 0 ]; then
+ error "$GOROOT/src/cmd/godoc/godoc failed - see $OUT for details"
+ fi
+}
+
+splitIndexfile() {
+ echo "*** split $APPDIR/$INDEXFILE"
+ split -b8m $APPDIR/$INDEXFILE $APPDIR/$SPLITFILES
+}
+
+makeConfigfile() {
+ echo "*** make $APPDIR/$CONFIGFILE"
+ cat > $APPDIR/$CONFIGFILE <<EOF
+package main
+
+// GENERATED FILE - DO NOT MODIFY BY HAND.
+// (generated by $GOROOT/src/cmd/godoc/setup-godoc-app.bash)
+
+const (
+ // .zip filename
+ zipFilename = "$ZIPFILE"
+
+ // goroot directory in .zip file
+ zipGoroot = "$GOROOT"
+
+ // glob pattern describing search index files
+ // (if empty, the index is built at run-time)
+ indexFilenames = "$SPLITFILES*"
+)
+EOF
+}
+
+getArgs "$@"
+cleanup
+makeZipfile
+makeIndexfile
+splitIndexfile
+makeConfigfile
+
+echo "*** setup complete"
diff --git a/src/cmd/godoc/snippet.go b/src/cmd/godoc/snippet.go
index 68e27d9a0..c2b74ee52 100644
--- a/src/cmd/godoc/snippet.go
+++ b/src/cmd/godoc/snippet.go
@@ -11,9 +11,9 @@ package main
import (
"bytes"
+ "fmt"
"go/ast"
"go/token"
- "fmt"
)
type Snippet struct {
diff --git a/src/cmd/godoc/spec.go b/src/cmd/godoc/spec.go
index 3f69add86..c11f25d20 100644
--- a/src/cmd/godoc/spec.go
+++ b/src/cmd/godoc/spec.go
@@ -2,118 +2,103 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+package main
+
// This file contains the mechanism to "linkify" html source
// text containing EBNF sections (as found in go_spec.html).
// The result is the input source text with the EBNF sections
// modified such that identifiers are linked to the respective
// definitions.
-package main
-
import (
"bytes"
"fmt"
- "go/scanner"
- "go/token"
"io"
+ "text/scanner"
)
type ebnfParser struct {
- out io.Writer // parser output
- src []byte // parser source
- file *token.File // for position information
+ out io.Writer // parser output
+ src []byte // parser input
scanner scanner.Scanner
- prev int // offset of previous token
- pos token.Pos // token position
- tok token.Token // one token look-ahead
- lit string // token literal
+ prev int // offset of previous token
+ pos int // offset of current token
+ tok rune // one token look-ahead
+ lit string // token literal
}
func (p *ebnfParser) flush() {
- offs := p.file.Offset(p.pos)
- p.out.Write(p.src[p.prev:offs])
- p.prev = offs
+ p.out.Write(p.src[p.prev:p.pos])
+ p.prev = p.pos
}
func (p *ebnfParser) next() {
- if p.pos.IsValid() {
- p.flush()
- }
- p.pos, p.tok, p.lit = p.scanner.Scan()
- if p.tok.IsKeyword() {
- // TODO Should keyword mapping always happen outside scanner?
- // Or should there be a flag to scanner to enable keyword mapping?
- p.tok = token.IDENT
- }
+ p.tok = p.scanner.Scan()
+ p.pos = p.scanner.Position.Offset
+ p.lit = p.scanner.TokenText()
}
-func (p *ebnfParser) Error(pos token.Position, msg string) {
- fmt.Fprintf(p.out, `<span class="alert">error: %s</span>`, msg)
+func (p *ebnfParser) printf(format string, args ...interface{}) {
+ p.flush()
+ fmt.Fprintf(p.out, format, args...)
}
-func (p *ebnfParser) errorExpected(pos token.Pos, msg string) {
- msg = "expected " + msg
- if pos == p.pos {
- // the error happened at the current position;
- // make the error message more specific
- msg += ", found '" + p.tok.String() + "'"
- if p.tok.IsLiteral() {
- msg += " " + p.lit
- }
- }
- p.Error(p.file.Position(pos), msg)
+func (p *ebnfParser) errorExpected(msg string) {
+ p.printf(`<span class="highlight">error: expected %s, found %s</span>`, msg, scanner.TokenString(p.tok))
}
-func (p *ebnfParser) expect(tok token.Token) token.Pos {
- pos := p.pos
+func (p *ebnfParser) expect(tok rune) {
if p.tok != tok {
- p.errorExpected(pos, "'"+tok.String()+"'")
+ p.errorExpected(scanner.TokenString(tok))
}
p.next() // make progress in any case
- return pos
}
func (p *ebnfParser) parseIdentifier(def bool) {
- name := p.lit
- p.expect(token.IDENT)
- if def {
- fmt.Fprintf(p.out, `<a id="%s">%s</a>`, name, name)
+ if p.tok == scanner.Ident {
+ name := p.lit
+ if def {
+ p.printf(`<a id="%s">%s</a>`, name, name)
+ } else {
+ p.printf(`<a href="#%s" class="noline">%s</a>`, name, name)
+ }
+ p.prev += len(name) // skip identifier when printing next time
+ p.next()
} else {
- fmt.Fprintf(p.out, `<a href="#%s" class="noline">%s</a>`, name, name)
+ p.expect(scanner.Ident)
}
- p.prev += len(name) // skip identifier when calling flush
}
func (p *ebnfParser) parseTerm() bool {
switch p.tok {
- case token.IDENT:
+ case scanner.Ident:
p.parseIdentifier(false)
- case token.STRING:
+ case scanner.String:
p.next()
- const ellipsis = "…" // U+2026, the horizontal ellipsis character
- if p.tok == token.ILLEGAL && p.lit == ellipsis {
+ const ellipsis = '…' // U+2026, the horizontal ellipsis character
+ if p.tok == ellipsis {
p.next()
- p.expect(token.STRING)
+ p.expect(scanner.String)
}
- case token.LPAREN:
+ case '(':
p.next()
p.parseExpression()
- p.expect(token.RPAREN)
+ p.expect(')')
- case token.LBRACK:
+ case '[':
p.next()
p.parseExpression()
- p.expect(token.RBRACK)
+ p.expect(']')
- case token.LBRACE:
+ case '{':
p.next()
p.parseExpression()
- p.expect(token.RBRACE)
+ p.expect('}')
default:
- return false
+ return false // no term found
}
return true
@@ -121,7 +106,7 @@ func (p *ebnfParser) parseTerm() bool {
func (p *ebnfParser) parseSequence() {
if !p.parseTerm() {
- p.errorExpected(p.pos, "term")
+ p.errorExpected("term")
}
for p.parseTerm() {
}
@@ -130,7 +115,7 @@ func (p *ebnfParser) parseSequence() {
func (p *ebnfParser) parseExpression() {
for {
p.parseSequence()
- if p.tok != token.OR {
+ if p.tok != '|' {
break
}
p.next()
@@ -139,23 +124,22 @@ func (p *ebnfParser) parseExpression() {
func (p *ebnfParser) parseProduction() {
p.parseIdentifier(true)
- p.expect(token.ASSIGN)
- if p.tok != token.PERIOD {
+ p.expect('=')
+ if p.tok != '.' {
p.parseExpression()
}
- p.expect(token.PERIOD)
+ p.expect('.')
}
-func (p *ebnfParser) parse(fset *token.FileSet, out io.Writer, src []byte) {
+func (p *ebnfParser) parse(out io.Writer, src []byte) {
// initialize ebnfParser
p.out = out
p.src = src
- p.file = fset.AddFile("", fset.Base(), len(src))
- p.scanner.Init(p.file, src, p, scanner.AllowIllegalChars)
+ p.scanner.Init(bytes.NewBuffer(src))
p.next() // initializes pos, tok, lit
// process source
- for p.tok != token.EOF {
+ for p.tok != scanner.EOF {
p.parseProduction()
}
p.flush()
@@ -167,32 +151,29 @@ var (
closeTag = []byte(`</pre>`)
)
-func linkify(out io.Writer, src []byte) {
- fset := token.NewFileSet()
+func Linkify(out io.Writer, src []byte) {
for len(src) > 0 {
- n := len(src)
-
// i: beginning of EBNF text (or end of source)
i := bytes.Index(src, openTag)
if i < 0 {
- i = n - len(openTag)
+ i = len(src) - len(openTag)
}
i += len(openTag)
// j: end of EBNF text (or end of source)
- j := bytes.Index(src[i:n], closeTag) // close marker
+ j := bytes.Index(src[i:], closeTag) // close marker
if j < 0 {
- j = n - i
+ j = len(src) - i
}
j += i
// write text before EBNF
out.Write(src[0:i])
- // parse and write EBNF
+ // process EBNF
var p ebnfParser
- p.parse(fset, out, src[i:j])
+ p.parse(out, src[i:j])
// advance
- src = src[j:n]
+ src = src[j:]
}
}
diff --git a/src/cmd/godoc/throttle.go b/src/cmd/godoc/throttle.go
index 193492802..ac18b44e0 100644
--- a/src/cmd/godoc/throttle.go
+++ b/src/cmd/godoc/throttle.go
@@ -10,15 +10,15 @@ import "time"
// calling the Throttle method repeatedly.
//
type Throttle struct {
- f float64 // f = (1-r)/r for 0 < r < 1
- tm int64 // minimum run time slice; >= 0
- tr int64 // accumulated time running
- ts int64 // accumulated time stopped
- tt int64 // earliest throttle time (= time Throttle returned + tm)
+ f float64 // f = (1-r)/r for 0 < r < 1
+ dt time.Duration // minimum run time slice; >= 0
+ tr time.Duration // accumulated time running
+ ts time.Duration // accumulated time stopped
+ tt time.Time // earliest throttle time (= time Throttle returned + tm)
}
// NewThrottle creates a new Throttle with a throttle value r and
-// a minimum allocated run time slice of tm nanoseconds:
+// a minimum allocated run time slice of dt:
//
// r == 0: "empty" throttle; the goroutine is always sleeping
// r == 1: full throttle; the goroutine is never sleeping
@@ -26,9 +26,9 @@ type Throttle struct {
// A value of r == 0.6 throttles a goroutine such that it runs
// approx. 60% of the time, and sleeps approx. 40% of the time.
// Values of r < 0 or r > 1 are clamped down to values between 0 and 1.
-// Values of tm < 0 are set to 0.
+// Values of dt < 0 are set to 0.
//
-func NewThrottle(r float64, tm int64) *Throttle {
+func NewThrottle(r float64, dt time.Duration) *Throttle {
var f float64
switch {
case r <= 0:
@@ -39,10 +39,10 @@ func NewThrottle(r float64, tm int64) *Throttle {
// 0 < r < 1
f = (1 - r) / r
}
- if tm < 0 {
- tm = 0
+ if dt < 0 {
+ dt = 0
}
- return &Throttle{f: f, tm: tm, tt: time.Nanoseconds() + tm}
+ return &Throttle{f: f, dt: dt, tt: time.Now().Add(dt)}
}
// Throttle calls time.Sleep such that over time the ratio tr/ts between
@@ -55,13 +55,13 @@ func (p *Throttle) Throttle() {
select {} // always sleep
}
- t0 := time.Nanoseconds()
- if t0 < p.tt {
+ t0 := time.Now()
+ if t0.Before(p.tt) {
return // keep running (minimum time slice not exhausted yet)
}
// accumulate running time
- p.tr += t0 - (p.tt - p.tm)
+ p.tr += t0.Sub(p.tt) + p.dt
// compute sleep time
// Over time we want:
@@ -75,14 +75,14 @@ func (p *Throttle) Throttle() {
// After some incremental run time δr added to the total run time
// tr, the incremental sleep-time δs to get to the same ratio again
// after waking up from time.Sleep is:
- if δs := int64(float64(p.tr)*p.f) - p.ts; δs > 0 {
+ if δs := time.Duration(float64(p.tr)*p.f) - p.ts; δs > 0 {
time.Sleep(δs)
}
// accumulate (actual) sleep time
- t1 := time.Nanoseconds()
- p.ts += t1 - t0
+ t1 := time.Now()
+ p.ts += t1.Sub(t0)
// set earliest next throttle time
- p.tt = t1 + p.tm
+ p.tt = t1.Add(p.dt)
}
diff --git a/src/cmd/godoc/utils.go b/src/cmd/godoc/utils.go
index 11e46aee5..be0bdc306 100644
--- a/src/cmd/godoc/utils.go
+++ b/src/cmd/godoc/utils.go
@@ -15,7 +15,7 @@ import (
"strings"
"sync"
"time"
- "utf8"
+ "unicode/utf8"
)
// An RWValue wraps a value and permits mutually exclusive
@@ -24,17 +24,17 @@ import (
type RWValue struct {
mutex sync.RWMutex
value interface{}
- timestamp int64 // time of last set(), in seconds since epoch
+ timestamp time.Time // time of last set()
}
func (v *RWValue) set(value interface{}) {
v.mutex.Lock()
v.value = value
- v.timestamp = time.Seconds()
+ v.timestamp = time.Now()
v.mutex.Unlock()
}
-func (v *RWValue) get() (interface{}, int64) {
+func (v *RWValue) get() (interface{}, time.Time) {
v.mutex.RLock()
defer v.mutex.RUnlock()
return v.value, v.timestamp
@@ -93,7 +93,7 @@ func canonicalizePaths(list []string, filter func(path string) bool) []string {
// writeFileAtomically writes data to a temporary file and then
// atomically renames that file to the file named by filename.
//
-func writeFileAtomically(filename string, data []byte) os.Error {
+func writeFileAtomically(filename string, data []byte) error {
// TODO(gri) this won't work on appengine
f, err := ioutil.TempFile(filepath.Split(filename))
if err != nil {
diff --git a/src/cmd/godoc/zip.go b/src/cmd/godoc/zip.go
index 86cd79b17..cd38ed92b 100644
--- a/src/cmd/godoc/zip.go
+++ b/src/cmd/godoc/zip.go
@@ -26,6 +26,7 @@ import (
"path"
"sort"
"strings"
+ "time"
)
// zipFI is the zip-file based implementation of FileInfo
@@ -45,19 +46,23 @@ func (fi zipFI) Size() int64 {
return 0 // directory
}
-func (fi zipFI) Mtime_ns() int64 {
+func (fi zipFI) ModTime() time.Time {
if f := fi.file; f != nil {
- return f.Mtime_ns()
+ return f.ModTime()
}
- return 0 // directory has no modified time entry
+ return time.Time{} // directory has no modified time entry
}
-func (fi zipFI) IsDirectory() bool {
- return fi.file == nil
+func (fi zipFI) Mode() os.FileMode {
+ if fi.file == nil {
+ // Unix directories typically are executable, hence 555.
+ return os.ModeDir | 0555
+ }
+ return 0444
}
-func (fi zipFI) IsRegular() bool {
- return fi.file != nil
+func (fi zipFI) IsDir() bool {
+ return fi.file == nil
}
// zipFS is the zip-file based implementation of FileSystem
@@ -66,7 +71,7 @@ type zipFS struct {
list zipList
}
-func (fs *zipFS) Close() os.Error {
+func (fs *zipFS) Close() error {
fs.list = nil
return fs.ReadCloser.Close()
}
@@ -79,7 +84,7 @@ func zipPath(name string) string {
return name[1:] // strip leading '/'
}
-func (fs *zipFS) stat(abspath string) (int, zipFI, os.Error) {
+func (fs *zipFS) stat(abspath string) (int, zipFI, error) {
i, exact := fs.list.lookup(abspath)
if i < 0 {
// abspath has leading '/' stripped - print it explicitly
@@ -93,38 +98,38 @@ func (fs *zipFS) stat(abspath string) (int, zipFI, os.Error) {
return i, zipFI{name, file}, nil
}
-func (fs *zipFS) Open(abspath string) (io.ReadCloser, os.Error) {
+func (fs *zipFS) Open(abspath string) (io.ReadCloser, error) {
_, fi, err := fs.stat(zipPath(abspath))
if err != nil {
return nil, err
}
- if fi.IsDirectory() {
+ if fi.IsDir() {
return nil, fmt.Errorf("Open: %s is a directory", abspath)
}
return fi.file.Open()
}
-func (fs *zipFS) Lstat(abspath string) (FileInfo, os.Error) {
+func (fs *zipFS) Lstat(abspath string) (os.FileInfo, error) {
_, fi, err := fs.stat(zipPath(abspath))
return fi, err
}
-func (fs *zipFS) Stat(abspath string) (FileInfo, os.Error) {
+func (fs *zipFS) Stat(abspath string) (os.FileInfo, error) {
_, fi, err := fs.stat(zipPath(abspath))
return fi, err
}
-func (fs *zipFS) ReadDir(abspath string) ([]FileInfo, os.Error) {
+func (fs *zipFS) ReadDir(abspath string) ([]os.FileInfo, error) {
path := zipPath(abspath)
i, fi, err := fs.stat(path)
if err != nil {
return nil, err
}
- if !fi.IsDirectory() {
+ if !fi.IsDir() {
return nil, fmt.Errorf("ReadDir: %s is not a directory", abspath)
}
- var list []FileInfo
+ var list []os.FileInfo
dirname := path + "/"
prevname := ""
for _, e := range fs.list[i:] {