diff options
Diffstat (limited to 'src/cmd/godoc/spec.go')
-rw-r--r-- | src/cmd/godoc/spec.go | 122 |
1 files changed, 61 insertions, 61 deletions
diff --git a/src/cmd/godoc/spec.go b/src/cmd/godoc/spec.go index 15ce2fb9e..15f3cba20 100644 --- a/src/cmd/godoc/spec.go +++ b/src/cmd/godoc/spec.go @@ -11,35 +11,35 @@ package main import ( - "bytes"; - "fmt"; - "go/scanner"; - "go/token"; - "io"; - "strings"; + "bytes" + "fmt" + "go/scanner" + "go/token" + "io" + "strings" ) type ebnfParser struct { - out io.Writer; // parser output - src []byte; // parser source - scanner scanner.Scanner; - prev int; // offset of previous token - pos token.Position; // token position - tok token.Token; // one token look-ahead - lit []byte; // token literal + out io.Writer // parser output + src []byte // parser source + scanner scanner.Scanner + prev int // offset of previous token + pos token.Position // token position + tok token.Token // one token look-ahead + lit []byte // token literal } func (p *ebnfParser) flush() { - p.out.Write(p.src[p.prev:p.pos.Offset]); - p.prev = p.pos.Offset; + p.out.Write(p.src[p.prev:p.pos.Offset]) + p.prev = p.pos.Offset } func (p *ebnfParser) next() { - p.flush(); - p.pos, p.tok, p.lit = p.scanner.Scan(); + p.flush() + p.pos, p.tok, p.lit = p.scanner.Scan() if p.tok.IsKeyword() { // TODO Should keyword mapping always happen outside scanner? // Or should there be a flag to scanner to enable keyword mapping? @@ -54,38 +54,38 @@ func (p *ebnfParser) Error(pos token.Position, msg string) { func (p *ebnfParser) errorExpected(pos token.Position, msg string) { - msg = "expected " + msg; + msg = "expected " + msg if pos.Offset == p.pos.Offset { // the error happened at the current position; // make the error message more specific - msg += ", found '" + p.tok.String() + "'"; + msg += ", found '" + p.tok.String() + "'" if p.tok.IsLiteral() { msg += " " + string(p.lit) } } - p.Error(pos, msg); + p.Error(pos, msg) } func (p *ebnfParser) expect(tok token.Token) token.Position { - pos := p.pos; + pos := p.pos if p.tok != tok { p.errorExpected(pos, "'"+tok.String()+"'") } - p.next(); // make progress in any case - return pos; + p.next() // make progress in any case + return pos } func (p *ebnfParser) parseIdentifier(def bool) { - name := string(p.lit); - p.expect(token.IDENT); + name := string(p.lit) + p.expect(token.IDENT) if def { fmt.Fprintf(p.out, `<a id="%s">%s</a>`, name, name) } else { fmt.Fprintf(p.out, `<a href="#%s" class="noline">%s</a>`, name, name) } - p.prev += len(name); // skip identifier when calling flush + p.prev += len(name) // skip identifier when calling flush } @@ -95,32 +95,32 @@ func (p *ebnfParser) parseTerm() bool { p.parseIdentifier(false) case token.STRING: - p.next(); + p.next() if p.tok == token.ELLIPSIS { - p.next(); - p.expect(token.STRING); + p.next() + p.expect(token.STRING) } case token.LPAREN: - p.next(); - p.parseExpression(); - p.expect(token.RPAREN); + p.next() + p.parseExpression() + p.expect(token.RPAREN) case token.LBRACK: - p.next(); - p.parseExpression(); - p.expect(token.RBRACK); + p.next() + p.parseExpression() + p.expect(token.RBRACK) case token.LBRACE: - p.next(); - p.parseExpression(); - p.expect(token.RBRACE); + p.next() + p.parseExpression() + p.expect(token.RBRACE) default: return false } - return true; + return true } @@ -132,70 +132,70 @@ func (p *ebnfParser) parseSequence() { func (p *ebnfParser) parseExpression() { for { - p.parseSequence(); + p.parseSequence() if p.tok != token.OR { break } - p.next(); + p.next() } } func (p *ebnfParser) parseProduction() { - p.parseIdentifier(true); - p.expect(token.ASSIGN); - p.parseExpression(); - p.expect(token.PERIOD); + p.parseIdentifier(true) + p.expect(token.ASSIGN) + p.parseExpression() + p.expect(token.PERIOD) } func (p *ebnfParser) parse(out io.Writer, src []byte) { // initialize ebnfParser - p.out = out; - p.src = src; - p.scanner.Init("", src, p, 0); - p.next(); // initializes pos, tok, lit + p.out = out + p.src = src + p.scanner.Init("", src, p, 0) + p.next() // initializes pos, tok, lit // process source for p.tok != token.EOF { p.parseProduction() } - p.flush(); + p.flush() } // Markers around EBNF sections var ( - openTag = strings.Bytes(`<pre class="ebnf">`); - closeTag = strings.Bytes(`</pre>`); + openTag = strings.Bytes(`<pre class="ebnf">`) + closeTag = strings.Bytes(`</pre>`) ) func linkify(out io.Writer, src []byte) { for len(src) > 0 { - n := len(src); + n := len(src) // i: beginning of EBNF text (or end of source) - i := bytes.Index(src, openTag); + i := bytes.Index(src, openTag) if i < 0 { i = n - len(openTag) } - i += len(openTag); + i += len(openTag) // j: end of EBNF text (or end of source) - j := bytes.Index(src[i:n], closeTag); // close marker + j := bytes.Index(src[i:n], closeTag) // close marker if j < 0 { j = n - i } - j += i; + j += i // write text before EBNF - out.Write(src[0:i]); + out.Write(src[0:i]) // parse and write EBNF - var p ebnfParser; - p.parse(out, src[i:j]); + var p ebnfParser + p.parse(out, src[i:j]) // advance - src = src[j:n]; + src = src[j:n] } } |