summaryrefslogtreecommitdiff
path: root/src/text/template/parse
diff options
context:
space:
mode:
Diffstat (limited to 'src/text/template/parse')
-rw-r--r--src/text/template/parse/lex.go551
-rw-r--r--src/text/template/parse/lex_test.go465
-rw-r--r--src/text/template/parse/node.go834
-rw-r--r--src/text/template/parse/parse.go677
-rw-r--r--src/text/template/parse/parse_test.go423
5 files changed, 2950 insertions, 0 deletions
diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
new file mode 100644
index 000000000..1674aaf9c
--- /dev/null
+++ b/src/text/template/parse/lex.go
@@ -0,0 +1,551 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos Pos // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case i.typ > itemKeyword:
+ return fmt.Sprintf("<%s>", i.val)
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemColonEquals // colon-equals (':=') introducing a declaration
+ itemEOF
+ itemField // alphanumeric identifier starting with '.'
+ itemIdentifier // alphanumeric identifier not starting with '.'
+ itemLeftDelim // left action delimiter
+ itemLeftParen // '(' inside action
+ itemNumber // simple number, including imaginary
+ itemPipe // pipe symbol
+ itemRawString // raw quoted string (includes quotes)
+ itemRightDelim // right action delimiter
+ itemRightParen // ')' inside action
+ itemSpace // run of spaces separating arguments
+ itemString // quoted string (includes quotes)
+ itemText // plain text
+ itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+ itemEnd // end keyword
+ itemIf // if keyword
+ itemNil // the untyped nil constant, easiest to treat as a keyword
+ itemRange // range keyword
+ itemTemplate // template keyword
+ itemWith // with keyword
+)
+
+var key = map[string]itemType{
+ ".": itemDot,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+ "if": itemIf,
+ "range": itemRange,
+ "nil": itemNil,
+ "template": itemTemplate,
+ "with": itemWith,
+}
+
+const eof = -1
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ name string // the name of the input; used only for error reports
+ input string // the string being scanned
+ leftDelim string // start of action
+ rightDelim string // end of action
+ state stateFn // the next lexing function to enter
+ pos Pos // current position in the input
+ start Pos // start position of this item
+ width Pos // width of last rune read from input
+ lastPos Pos // position of most recent item returned by nextItem
+ items chan item // channel of scanned items
+ parenDepth int // nesting depth of ( ) exprs
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if int(l.pos) >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = Pos(w)
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.start, l.input[l.start:l.pos]}
+ l.start = l.pos
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.IndexRune(valid, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ item := <-l.items
+ l.lastPos = item.pos
+ return item
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+ if right == "" {
+ right = rightDelim
+ }
+ l := &lexer{
+ name: name,
+ input: input,
+ leftDelim: left,
+ rightDelim: right,
+ items: make(chan item),
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for l.state = lexText; l.state != nil; {
+ l.state = l.state(l)
+ }
+}
+
+// state functions
+
+const (
+ leftDelim = "{{"
+ rightDelim = "}}"
+ leftComment = "/*"
+ rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+ for {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ return lexLeftDelim
+ }
+ if l.next() == eof {
+ break
+ }
+ }
+ // Correctly reached EOF.
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present.
+func lexLeftDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.leftDelim))
+ if strings.HasPrefix(l.input[l.pos:], leftComment) {
+ return lexComment
+ }
+ l.emit(itemLeftDelim)
+ l.parenDepth = 0
+ return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+ l.pos += Pos(len(leftComment))
+ i := strings.Index(l.input[l.pos:], rightComment)
+ if i < 0 {
+ return l.errorf("unclosed comment")
+ }
+ l.pos += Pos(i + len(rightComment))
+ if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ return l.errorf("comment ends before closing delimiter")
+
+ }
+ l.pos += Pos(len(l.rightDelim))
+ l.ignore()
+ return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present.
+func lexRightDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.rightDelim))
+ l.emit(itemRightDelim)
+ return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+ // Either number, quoted string, or identifier.
+ // Spaces separate arguments; runs of spaces turn into itemSpace.
+ // Pipe symbols separate and are emitted.
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ if l.parenDepth == 0 {
+ return lexRightDelim
+ }
+ return l.errorf("unclosed left paren")
+ }
+ switch r := l.next(); {
+ case r == eof || isEndOfLine(r):
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ return lexSpace
+ case r == ':':
+ if l.next() != '=' {
+ return l.errorf("expected :=")
+ }
+ l.emit(itemColonEquals)
+ case r == '|':
+ l.emit(itemPipe)
+ case r == '"':
+ return lexQuote
+ case r == '`':
+ return lexRawQuote
+ case r == '$':
+ return lexVariable
+ case r == '\'':
+ return lexChar
+ case r == '.':
+ // special look-ahead for ".field" so we don't break l.backup().
+ if l.pos < Pos(len(l.input)) {
+ r := l.input[l.pos]
+ if r < '0' || '9' < r {
+ return lexField
+ }
+ }
+ fallthrough // '.' can start a number.
+ case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+ l.backup()
+ return lexNumber
+ case isAlphaNumeric(r):
+ l.backup()
+ return lexIdentifier
+ case r == '(':
+ l.emit(itemLeftParen)
+ l.parenDepth++
+ return lexInsideAction
+ case r == ')':
+ l.emit(itemRightParen)
+ l.parenDepth--
+ if l.parenDepth < 0 {
+ return l.errorf("unexpected right paren %#U", r)
+ }
+ return lexInsideAction
+ case r <= unicode.MaxASCII && unicode.IsPrint(r):
+ l.emit(itemChar)
+ return lexInsideAction
+ default:
+ return l.errorf("unrecognized character in action: %#U", r)
+ }
+ return lexInsideAction
+}
+
+// lexSpace scans a run of space characters.
+// One space has already been seen.
+func lexSpace(l *lexer) stateFn {
+ for isSpace(l.peek()) {
+ l.next()
+ }
+ l.emit(itemSpace)
+ return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case isAlphaNumeric(r):
+ // absorb.
+ default:
+ l.backup()
+ word := l.input[l.start:l.pos]
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ switch {
+ case key[word] > itemKeyword:
+ l.emit(key[word])
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+ l.emit(itemBool)
+ default:
+ l.emit(itemIdentifier)
+ }
+ break Loop
+ }
+ }
+ return lexInsideAction
+}
+
+// lexField scans a field: .Alphanumeric.
+// The . has been scanned.
+func lexField(l *lexer) stateFn {
+ return lexFieldOrVariable(l, itemField)
+}
+
+// lexVariable scans a Variable: $Alphanumeric.
+// The $ has been scanned.
+func lexVariable(l *lexer) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "$".
+ l.emit(itemVariable)
+ return lexInsideAction
+ }
+ return lexFieldOrVariable(l, itemVariable)
+}
+
+// lexVariable scans a field or variable: [.$]Alphanumeric.
+// The . or $ has been scanned.
+func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "." or "$".
+ if typ == itemVariable {
+ l.emit(itemVariable)
+ } else {
+ l.emit(itemDot)
+ }
+ return lexInsideAction
+ }
+ var r rune
+ for {
+ r = l.next()
+ if !isAlphaNumeric(r) {
+ l.backup()
+ break
+ }
+ }
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ l.emit(typ)
+ return lexInsideAction
+}
+
+// atTerminator reports whether the input is at valid termination character to
+// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
+// like "$x+2" not being acceptable without a space, in case we decide one
+// day to implement arithmetic.
+func (l *lexer) atTerminator() bool {
+ r := l.peek()
+ if isSpace(r) || isEndOfLine(r) {
+ return true
+ }
+ switch r {
+ case eof, '.', ',', '|', ':', ')', '(':
+ return true
+ }
+ // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
+ // succeed but should fail) but only in extremely rare cases caused by willfully
+ // bad choice of delimiter.
+ if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
+ return true
+ }
+ return false
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parser.
+func lexChar(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated character constant")
+ case '\'':
+ break Loop
+ }
+ }
+ l.emit(itemCharConstant)
+ return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ if sign := l.peek(); sign == '+' || sign == '-' {
+ // Complex: 1+2i. No spaces, must end in 'i'.
+ if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(itemComplex)
+ } else {
+ l.emit(itemNumber)
+ }
+ return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+ // Optional leading sign.
+ l.accept("+-")
+ // Is it hex?
+ digits := "0123456789"
+ if l.accept("0") && l.accept("xX") {
+ digits = "0123456789abcdefABCDEF"
+ }
+ l.acceptRun(digits)
+ if l.accept(".") {
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun("0123456789")
+ }
+ // Is it imaginary?
+ l.accept("i")
+ // Next thing mustn't be alphanumeric.
+ if isAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated quoted string")
+ case '"':
+ break Loop
+ }
+ }
+ l.emit(itemString)
+ return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case eof, '\n':
+ return l.errorf("unterminated raw quoted string")
+ case '`':
+ break Loop
+ }
+ }
+ l.emit(itemRawString)
+ return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+// isEndOfLine reports whether r is an end-of-line character.
+func isEndOfLine(r rune) bool {
+ return r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
new file mode 100644
index 000000000..d251ccffb
--- /dev/null
+++ b/src/text/template/parse/lex_test.go
@@ -0,0 +1,465 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "testing"
+)
+
+// Make the types prettyprint.
+var itemName = map[itemType]string{
+ itemError: "error",
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
+ itemComplex: "complex",
+ itemColonEquals: ":=",
+ itemEOF: "EOF",
+ itemField: "field",
+ itemIdentifier: "identifier",
+ itemLeftDelim: "left delim",
+ itemLeftParen: "(",
+ itemNumber: "number",
+ itemPipe: "pipe",
+ itemRawString: "raw string",
+ itemRightDelim: "right delim",
+ itemRightParen: ")",
+ itemSpace: "space",
+ itemString: "string",
+ itemVariable: "variable",
+
+ // keywords
+ itemDot: ".",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+ itemEnd: "end",
+ itemNil: "nil",
+ itemRange: "range",
+ itemTemplate: "template",
+ itemWith: "with",
+}
+
+func (i itemType) String() string {
+ s := itemName[i]
+ if s == "" {
+ return fmt.Sprintf("item%d", int(i))
+ }
+ return s
+}
+
+type lexTest struct {
+ name string
+ input string
+ items []item
+}
+
+var (
+ tEOF = item{itemEOF, 0, ""}
+ tFor = item{itemIdentifier, 0, "for"}
+ tLeft = item{itemLeftDelim, 0, "{{"}
+ tLpar = item{itemLeftParen, 0, "("}
+ tPipe = item{itemPipe, 0, "|"}
+ tQuote = item{itemString, 0, `"abc \n\t\" "`}
+ tRange = item{itemRange, 0, "range"}
+ tRight = item{itemRightDelim, 0, "}}"}
+ tRpar = item{itemRightParen, 0, ")"}
+ tSpace = item{itemSpace, 0, " "}
+ raw = "`" + `abc\n\t\" ` + "`"
+ tRawQuote = item{itemRawString, 0, raw}
+)
+
+var lexTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"spaces", " \t\n", []item{{itemText, 0, " \t\n"}, tEOF}},
+ {"text", `now is the time`, []item{{itemText, 0, "now is the time"}, tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemText, 0, "-world"},
+ tEOF,
+ }},
+ {"punctuation", "{{,@% }}", []item{
+ tLeft,
+ {itemChar, 0, ","},
+ {itemChar, 0, "@"},
+ {itemChar, 0, "%"},
+ tSpace,
+ tRight,
+ tEOF,
+ }},
+ {"parens", "{{((3))}}", []item{
+ tLeft,
+ tLpar,
+ tLpar,
+ {itemNumber, 0, "3"},
+ tRpar,
+ tRpar,
+ tRight,
+ tEOF,
+ }},
+ {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
+ {"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}},
+ {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
+ {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
+ {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{
+ tLeft,
+ {itemNumber, 0, "1"},
+ tSpace,
+ {itemNumber, 0, "02"},
+ tSpace,
+ {itemNumber, 0, "0x14"},
+ tSpace,
+ {itemNumber, 0, "-7.2i"},
+ tSpace,
+ {itemNumber, 0, "1e3"},
+ tSpace,
+ {itemNumber, 0, "+1.2e-4"},
+ tSpace,
+ {itemNumber, 0, "4.2i"},
+ tSpace,
+ {itemComplex, 0, "1+2i"},
+ tRight,
+ tEOF,
+ }},
+ {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
+ tLeft,
+ {itemCharConstant, 0, `'a'`},
+ tSpace,
+ {itemCharConstant, 0, `'\n'`},
+ tSpace,
+ {itemCharConstant, 0, `'\''`},
+ tSpace,
+ {itemCharConstant, 0, `'\\'`},
+ tSpace,
+ {itemCharConstant, 0, `'\u00FF'`},
+ tSpace,
+ {itemCharConstant, 0, `'\xFF'`},
+ tSpace,
+ {itemCharConstant, 0, `'本'`},
+ tRight,
+ tEOF,
+ }},
+ {"bools", "{{true false}}", []item{
+ tLeft,
+ {itemBool, 0, "true"},
+ tSpace,
+ {itemBool, 0, "false"},
+ tRight,
+ tEOF,
+ }},
+ {"dot", "{{.}}", []item{
+ tLeft,
+ {itemDot, 0, "."},
+ tRight,
+ tEOF,
+ }},
+ {"nil", "{{nil}}", []item{
+ tLeft,
+ {itemNil, 0, "nil"},
+ tRight,
+ tEOF,
+ }},
+ {"dots", "{{.x . .2 .x.y.z}}", []item{
+ tLeft,
+ {itemField, 0, ".x"},
+ tSpace,
+ {itemDot, 0, "."},
+ tSpace,
+ {itemNumber, 0, ".2"},
+ tSpace,
+ {itemField, 0, ".x"},
+ {itemField, 0, ".y"},
+ {itemField, 0, ".z"},
+ tRight,
+ tEOF,
+ }},
+ {"keywords", "{{range if else end with}}", []item{
+ tLeft,
+ {itemRange, 0, "range"},
+ tSpace,
+ {itemIf, 0, "if"},
+ tSpace,
+ {itemElse, 0, "else"},
+ tSpace,
+ {itemEnd, 0, "end"},
+ tSpace,
+ {itemWith, 0, "with"},
+ tRight,
+ tEOF,
+ }},
+ {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
+ tLeft,
+ {itemVariable, 0, "$c"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemIdentifier, 0, "printf"},
+ tSpace,
+ {itemVariable, 0, "$"},
+ tSpace,
+ {itemVariable, 0, "$hello"},
+ tSpace,
+ {itemVariable, 0, "$23"},
+ tSpace,
+ {itemVariable, 0, "$"},
+ tSpace,
+ {itemVariable, 0, "$var"},
+ {itemField, 0, ".Field"},
+ tSpace,
+ {itemField, 0, ".Method"},
+ tRight,
+ tEOF,
+ }},
+ {"variable invocation", "{{$x 23}}", []item{
+ tLeft,
+ {itemVariable, 0, "$x"},
+ tSpace,
+ {itemNumber, 0, "23"},
+ tRight,
+ tEOF,
+ }},
+ {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
+ {itemText, 0, "intro "},
+ tLeft,
+ {itemIdentifier, 0, "echo"},
+ tSpace,
+ {itemIdentifier, 0, "hi"},
+ tSpace,
+ {itemNumber, 0, "1.2"},
+ tSpace,
+ tPipe,
+ {itemIdentifier, 0, "noargs"},
+ tPipe,
+ {itemIdentifier, 0, "args"},
+ tSpace,
+ {itemNumber, 0, "1"},
+ tSpace,
+ {itemString, 0, `"hi"`},
+ tRight,
+ {itemText, 0, " outro"},
+ tEOF,
+ }},
+ {"declaration", "{{$v := 3}}", []item{
+ tLeft,
+ {itemVariable, 0, "$v"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemNumber, 0, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"2 declarations", "{{$v , $w := 3}}", []item{
+ tLeft,
+ {itemVariable, 0, "$v"},
+ tSpace,
+ {itemChar, 0, ","},
+ tSpace,
+ {itemVariable, 0, "$w"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemNumber, 0, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"field of parenthesized expression", "{{(.X).Y}}", []item{
+ tLeft,
+ tLpar,
+ {itemField, 0, ".X"},
+ tRpar,
+ {itemField, 0, ".Y"},
+ tRight,
+ tEOF,
+ }},
+ // errors
+ {"badchar", "#{{\x01}}", []item{
+ {itemText, 0, "#"},
+ tLeft,
+ {itemError, 0, "unrecognized character in action: U+0001"},
+ }},
+ {"unclosed action", "{{\n}}", []item{
+ tLeft,
+ {itemError, 0, "unclosed action"},
+ }},
+ {"EOF in action", "{{range", []item{
+ tLeft,
+ tRange,
+ {itemError, 0, "unclosed action"},
+ }},
+ {"unclosed quote", "{{\"\n\"}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated quoted string"},
+ }},
+ {"unclosed raw quote", "{{`xx\n`}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated raw quoted string"},
+ }},
+ {"unclosed char constant", "{{'\n}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated character constant"},
+ }},
+ {"bad number", "{{3k}}", []item{
+ tLeft,
+ {itemError, 0, `bad number syntax: "3k"`},
+ }},
+ {"unclosed paren", "{{(3}}", []item{
+ tLeft,
+ tLpar,
+ {itemNumber, 0, "3"},
+ {itemError, 0, `unclosed left paren`},
+ }},
+ {"extra right paren", "{{3)}}", []item{
+ tLeft,
+ {itemNumber, 0, "3"},
+ tRpar,
+ {itemError, 0, `unexpected right paren U+0029 ')'`},
+ }},
+
+ // Fixed bugs
+ // Many elements in an action blew the lookahead until
+ // we made lexInsideAction not loop.
+ {"long pipeline deadlock", "{{|||||}}", []item{
+ tLeft,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tRight,
+ tEOF,
+ }},
+ {"text with bad comment", "hello-{{/*/}}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemError, 0, `unclosed comment`},
+ }},
+ {"text with comment close separted from delim", "hello-{{/* */ }}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemError, 0, `comment ends before closing delimiter`},
+ }},
+ // This one is an error that we can't catch because it breaks templates with
+ // minimized JavaScript. Should have fixed it before Go 1.1.
+ {"unmatched right delimiter", "hello-{.}}-world", []item{
+ {itemText, 0, "hello-{.}}-world"},
+ tEOF,
+ }},
+}
+
+// collect gathers the emitted items into a slice.
+func collect(t *lexTest, left, right string) (items []item) {
+ l := lex(t.name, t.input, left, right)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+ if item.typ == itemEOF || item.typ == itemError {
+ break
+ }
+ }
+ return
+}
+
+func equal(i1, i2 []item, checkPos bool) bool {
+ if len(i1) != len(i2) {
+ return false
+ }
+ for k := range i1 {
+ if i1[k].typ != i2[k].typ {
+ return false
+ }
+ if i1[k].val != i2[k].val {
+ return false
+ }
+ if checkPos && i1[k].pos != i2[k].pos {
+ return false
+ }
+ }
+ return true
+}
+
+func TestLex(t *testing.T) {
+ for _, test := range lexTests {
+ items := collect(&test, "", "")
+ if !equal(items, test.items, false) {
+ t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+// Some easy cases from above, but with delimiters $$ and @@
+var lexDelimTests = []lexTest{
+ {"punctuation", "$$,@%{{}}@@", []item{
+ tLeftDelim,
+ {itemChar, 0, ","},
+ {itemChar, 0, "@"},
+ {itemChar, 0, "%"},
+ {itemChar, 0, "{"},
+ {itemChar, 0, "{"},
+ {itemChar, 0, "}"},
+ {itemChar, 0, "}"},
+ tRightDelim,
+ tEOF,
+ }},
+ {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
+ {"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
+ {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
+ {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
+}
+
+var (
+ tLeftDelim = item{itemLeftDelim, 0, "$$"}
+ tRightDelim = item{itemRightDelim, 0, "@@"}
+)
+
+func TestDelims(t *testing.T) {
+ for _, test := range lexDelimTests {
+ items := collect(&test, "$$", "@@")
+ if !equal(items, test.items, false) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+var lexPosTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"punctuation", "{{,@%#}}", []item{
+ {itemLeftDelim, 0, "{{"},
+ {itemChar, 2, ","},
+ {itemChar, 3, "@"},
+ {itemChar, 4, "%"},
+ {itemChar, 5, "#"},
+ {itemRightDelim, 6, "}}"},
+ {itemEOF, 8, ""},
+ }},
+ {"sample", "0123{{hello}}xyz", []item{
+ {itemText, 0, "0123"},
+ {itemLeftDelim, 4, "{{"},
+ {itemIdentifier, 6, "hello"},
+ {itemRightDelim, 11, "}}"},
+ {itemText, 13, "xyz"},
+ {itemEOF, 16, ""},
+ }},
+}
+
+// The other tests don't check position, to make the test cases easier to construct.
+// This one does.
+func TestPos(t *testing.T) {
+ for _, test := range lexPosTests {
+ items := collect(&test, "", "")
+ if !equal(items, test.items, true) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ if len(items) == len(test.items) {
+ // Detailed print; avoid item.String() to expose the position value.
+ for i := range items {
+ if !equal(items[i:i+1], test.items[i:i+1], true) {
+ i1 := items[i]
+ i2 := test.items[i]
+ t.Errorf("\t#%d: got {%v %d %q} expected {%v %d %q}", i, i1.typ, i1.pos, i1.val, i2.typ, i2.pos, i2.val)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
new file mode 100644
index 000000000..55c37f6db
--- /dev/null
+++ b/src/text/template/parse/node.go
@@ -0,0 +1,834 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse nodes.
+
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var textFormat = "%s" // Changed to "%q" in tests for better error messages.
+
+// A Node is an element in the parse tree. The interface is trivial.
+// The interface contains an unexported method so that only
+// types local to this package can satisfy it.
+type Node interface {
+ Type() NodeType
+ String() string
+ // Copy does a deep copy of the Node and all its components.
+ // To avoid type assertions, some XxxNodes also have specialized
+ // CopyXxx methods that return *XxxNode.
+ Copy() Node
+ Position() Pos // byte position of start of node in full original input string
+ // tree returns the containing *Tree.
+ // It is unexported so all implementations of Node are in this package.
+ tree() *Tree
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Pos represents a byte position in the original input text from which
+// this template was parsed.
+type Pos int
+
+func (p Pos) Position() Pos {
+ return p
+}
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+const (
+ NodeText NodeType = iota // Plain text.
+ NodeAction // A non-control action such as a field evaluation.
+ NodeBool // A boolean constant.
+ NodeChain // A sequence of field accesses.
+ NodeCommand // An element of a pipeline.
+ NodeDot // The cursor, dot.
+ nodeElse // An else action. Not added to tree.
+ nodeEnd // An end action. Not added to tree.
+ NodeField // A field or method name.
+ NodeIdentifier // An identifier; always a function name.
+ NodeIf // An if action.
+ NodeList // A list of Nodes.
+ NodeNil // An untyped nil constant.
+ NodeNumber // A numerical constant.
+ NodePipe // A pipeline of commands.
+ NodeRange // A range action.
+ NodeString // A string constant.
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+)
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Nodes []Node // The element nodes in lexical order.
+}
+
+func (t *Tree) newList(pos Pos) *ListNode {
+ return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
+}
+
+func (l *ListNode) append(n Node) {
+ l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) tree() *Tree {
+ return l.tr
+}
+
+func (l *ListNode) String() string {
+ b := new(bytes.Buffer)
+ for _, n := range l.Nodes {
+ fmt.Fprint(b, n)
+ }
+ return b.String()
+}
+
+func (l *ListNode) CopyList() *ListNode {
+ if l == nil {
+ return l
+ }
+ n := l.tr.newList(l.Pos)
+ for _, elem := range l.Nodes {
+ n.append(elem.Copy())
+ }
+ return n
+}
+
+func (l *ListNode) Copy() Node {
+ return l.CopyList()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Text []byte // The text; may span newlines.
+}
+
+func (t *Tree) newText(pos Pos, text string) *TextNode {
+ return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
+}
+
+func (t *TextNode) String() string {
+ return fmt.Sprintf(textFormat, t.Text)
+}
+
+func (t *TextNode) tree() *Tree {
+ return t.tr
+}
+
+func (t *TextNode) Copy() Node {
+ return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
+}
+
+// PipeNode holds a pipeline with optional declaration
+type PipeNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Decl []*VariableNode // Variable declarations in lexical order.
+ Cmds []*CommandNode // The commands in lexical order.
+}
+
+func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
+ return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
+}
+
+func (p *PipeNode) append(command *CommandNode) {
+ p.Cmds = append(p.Cmds, command)
+}
+
+func (p *PipeNode) String() string {
+ s := ""
+ if len(p.Decl) > 0 {
+ for i, v := range p.Decl {
+ if i > 0 {
+ s += ", "
+ }
+ s += v.String()
+ }
+ s += " := "
+ }
+ for i, c := range p.Cmds {
+ if i > 0 {
+ s += " | "
+ }
+ s += c.String()
+ }
+ return s
+}
+
+func (p *PipeNode) tree() *Tree {
+ return p.tr
+}
+
+func (p *PipeNode) CopyPipe() *PipeNode {
+ if p == nil {
+ return p
+ }
+ var decl []*VariableNode
+ for _, d := range p.Decl {
+ decl = append(decl, d.Copy().(*VariableNode))
+ }
+ n := p.tr.newPipeline(p.Pos, p.Line, decl)
+ for _, c := range p.Cmds {
+ n.append(c.Copy().(*CommandNode))
+ }
+ return n
+}
+
+func (p *PipeNode) Copy() Node {
+ return p.CopyPipe()
+}
+
+// ActionNode holds an action (something bounded by delimiters).
+// Control actions have their own nodes; ActionNode represents simple
+// ones such as field evaluations and parenthesized pipelines.
+type ActionNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Pipe *PipeNode // The pipeline in the action.
+}
+
+func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
+ return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
+}
+
+func (a *ActionNode) String() string {
+ return fmt.Sprintf("{{%s}}", a.Pipe)
+
+}
+
+func (a *ActionNode) tree() *Tree {
+ return a.tr
+}
+
+func (a *ActionNode) Copy() Node {
+ return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
+
+}
+
+// CommandNode holds a command (a pipeline inside an evaluating action).
+type CommandNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Args []Node // Arguments in lexical order: Identifier, field, or constant.
+}
+
+func (t *Tree) newCommand(pos Pos) *CommandNode {
+ return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
+}
+
+func (c *CommandNode) append(arg Node) {
+ c.Args = append(c.Args, arg)
+}
+
+func (c *CommandNode) String() string {
+ s := ""
+ for i, arg := range c.Args {
+ if i > 0 {
+ s += " "
+ }
+ if arg, ok := arg.(*PipeNode); ok {
+ s += "(" + arg.String() + ")"
+ continue
+ }
+ s += arg.String()
+ }
+ return s
+}
+
+func (c *CommandNode) tree() *Tree {
+ return c.tr
+}
+
+func (c *CommandNode) Copy() Node {
+ if c == nil {
+ return c
+ }
+ n := c.tr.newCommand(c.Pos)
+ for _, c := range c.Args {
+ n.append(c.Copy())
+ }
+ return n
+}
+
+// IdentifierNode holds an identifier.
+type IdentifierNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident string // The identifier's name.
+}
+
+// NewIdentifier returns a new IdentifierNode with the given identifier name.
+func NewIdentifier(ident string) *IdentifierNode {
+ return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
+}
+
+// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
+ i.Pos = pos
+ return i
+}
+
+// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
+ i.tr = t
+ return i
+}
+
+func (i *IdentifierNode) String() string {
+ return i.Ident
+}
+
+func (i *IdentifierNode) tree() *Tree {
+ return i.tr
+}
+
+func (i *IdentifierNode) Copy() Node {
+ return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
+}
+
+// VariableNode holds a list of variable names, possibly with chained field
+// accesses. The dollar sign is part of the (first) name.
+type VariableNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident []string // Variable name and fields in lexical order.
+}
+
+func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
+ return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
+}
+
+func (v *VariableNode) String() string {
+ s := ""
+ for i, id := range v.Ident {
+ if i > 0 {
+ s += "."
+ }
+ s += id
+ }
+ return s
+}
+
+func (v *VariableNode) tree() *Tree {
+ return v.tr
+}
+
+func (v *VariableNode) Copy() Node {
+ return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
+}
+
+// DotNode holds the special identifier '.'.
+type DotNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newDot(pos Pos) *DotNode {
+ return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
+}
+
+func (d *DotNode) Type() NodeType {
+ // Override method on embedded NodeType for API compatibility.
+ // TODO: Not really a problem; could change API without effect but
+ // api tool complains.
+ return NodeDot
+}
+
+func (d *DotNode) String() string {
+ return "."
+}
+
+func (d *DotNode) tree() *Tree {
+ return d.tr
+}
+
+func (d *DotNode) Copy() Node {
+ return d.tr.newDot(d.Pos)
+}
+
+// NilNode holds the special identifier 'nil' representing an untyped nil constant.
+type NilNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newNil(pos Pos) *NilNode {
+ return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
+}
+
+func (n *NilNode) Type() NodeType {
+ // Override method on embedded NodeType for API compatibility.
+ // TODO: Not really a problem; could change API without effect but
+ // api tool complains.
+ return NodeNil
+}
+
+func (n *NilNode) String() string {
+ return "nil"
+}
+
+func (n *NilNode) tree() *Tree {
+ return n.tr
+}
+
+func (n *NilNode) Copy() Node {
+ return n.tr.newNil(n.Pos)
+}
+
+// FieldNode holds a field (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The period is dropped from each ident.
+type FieldNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newField(pos Pos, ident string) *FieldNode {
+ return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
+}
+
+func (f *FieldNode) String() string {
+ s := ""
+ for _, id := range f.Ident {
+ s += "." + id
+ }
+ return s
+}
+
+func (f *FieldNode) tree() *Tree {
+ return f.tr
+}
+
+func (f *FieldNode) Copy() Node {
+ return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
+}
+
+// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The periods are dropped from each ident.
+type ChainNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Node Node
+ Field []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
+ return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
+}
+
+// Add adds the named field (which should start with a period) to the end of the chain.
+func (c *ChainNode) Add(field string) {
+ if len(field) == 0 || field[0] != '.' {
+ panic("no dot in field")
+ }
+ field = field[1:] // Remove leading dot.
+ if field == "" {
+ panic("empty field")
+ }
+ c.Field = append(c.Field, field)
+}
+
+func (c *ChainNode) String() string {
+ s := c.Node.String()
+ if _, ok := c.Node.(*PipeNode); ok {
+ s = "(" + s + ")"
+ }
+ for _, field := range c.Field {
+ s += "." + field
+ }
+ return s
+}
+
+func (c *ChainNode) tree() *Tree {
+ return c.tr
+}
+
+func (c *ChainNode) Copy() Node {
+ return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
+}
+
+// BoolNode holds a boolean constant.
+type BoolNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ True bool // The value of the boolean constant.
+}
+
+func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
+ return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
+}
+
+func (b *BoolNode) String() string {
+ if b.True {
+ return "true"
+ }
+ return "false"
+}
+
+func (b *BoolNode) tree() *Tree {
+ return b.tr
+}
+
+func (b *BoolNode) Copy() Node {
+ return b.tr.newBool(b.Pos, b.True)
+}
+
+// NumberNode holds a number: signed or unsigned integer, float, or complex.
+// The value is parsed and stored under all the types that can represent the value.
+// This simulates in a small amount of code the behavior of Go's ideal constants.
+type NumberNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ IsInt bool // Number has an integral value.
+ IsUint bool // Number has an unsigned integral value.
+ IsFloat bool // Number has a floating-point value.
+ IsComplex bool // Number is complex.
+ Int64 int64 // The signed integer value.
+ Uint64 uint64 // The unsigned integer value.
+ Float64 float64 // The floating-point value.
+ Complex128 complex128 // The complex value.
+ Text string // The original textual representation from the input.
+}
+
+func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
+ n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
+ switch typ {
+ case itemCharConstant:
+ rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
+ if err != nil {
+ return nil, err
+ }
+ if tail != "'" {
+ return nil, fmt.Errorf("malformed character constant: %s", text)
+ }
+ n.Int64 = int64(rune)
+ n.IsInt = true
+ n.Uint64 = uint64(rune)
+ n.IsUint = true
+ n.Float64 = float64(rune) // odd but those are the rules.
+ n.IsFloat = true
+ return n, nil
+ case itemComplex:
+ // fmt.Sscan can parse the pair, so let it do the work.
+ if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
+ return nil, err
+ }
+ n.IsComplex = true
+ n.simplifyComplex()
+ return n, nil
+ }
+ // Imaginary constants can only be complex unless they are zero.
+ if len(text) > 0 && text[len(text)-1] == 'i' {
+ f, err := strconv.ParseFloat(text[:len(text)-1], 64)
+ if err == nil {
+ n.IsComplex = true
+ n.Complex128 = complex(0, f)
+ n.simplifyComplex()
+ return n, nil
+ }
+ }
+ // Do integer test first so we get 0x123 etc.
+ u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
+ if err == nil {
+ n.IsUint = true
+ n.Uint64 = u
+ }
+ i, err := strconv.ParseInt(text, 0, 64)
+ if err == nil {
+ n.IsInt = true
+ n.Int64 = i
+ if i == 0 {
+ n.IsUint = true // in case of -0.
+ n.Uint64 = u
+ }
+ }
+ // If an integer extraction succeeded, promote the float.
+ if n.IsInt {
+ n.IsFloat = true
+ n.Float64 = float64(n.Int64)
+ } else if n.IsUint {
+ n.IsFloat = true
+ n.Float64 = float64(n.Uint64)
+ } else {
+ f, err := strconv.ParseFloat(text, 64)
+ if err == nil {
+ n.IsFloat = true
+ n.Float64 = f
+ // If a floating-point extraction succeeded, extract the int if needed.
+ if !n.IsInt && float64(int64(f)) == f {
+ n.IsInt = true
+ n.Int64 = int64(f)
+ }
+ if !n.IsUint && float64(uint64(f)) == f {
+ n.IsUint = true
+ n.Uint64 = uint64(f)
+ }
+ }
+ }
+ if !n.IsInt && !n.IsUint && !n.IsFloat {
+ return nil, fmt.Errorf("illegal number syntax: %q", text)
+ }
+ return n, nil
+}
+
+// simplifyComplex pulls out any other types that are represented by the complex number.
+// These all require that the imaginary part be zero.
+func (n *NumberNode) simplifyComplex() {
+ n.IsFloat = imag(n.Complex128) == 0
+ if n.IsFloat {
+ n.Float64 = real(n.Complex128)
+ n.IsInt = float64(int64(n.Float64)) == n.Float64
+ if n.IsInt {
+ n.Int64 = int64(n.Float64)
+ }
+ n.IsUint = float64(uint64(n.Float64)) == n.Float64
+ if n.IsUint {
+ n.Uint64 = uint64(n.Float64)
+ }
+ }
+}
+
+func (n *NumberNode) String() string {
+ return n.Text
+}
+
+func (n *NumberNode) tree() *Tree {
+ return n.tr
+}
+
+func (n *NumberNode) Copy() Node {
+ nn := new(NumberNode)
+ *nn = *n // Easy, fast, correct.
+ return nn
+}
+
+// StringNode holds a string constant. The value has been "unquoted".
+type StringNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Quoted string // The original text of the string, with quotes.
+ Text string // The string, after quote processing.
+}
+
+func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
+ return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
+}
+
+func (s *StringNode) String() string {
+ return s.Quoted
+}
+
+func (s *StringNode) tree() *Tree {
+ return s.tr
+}
+
+func (s *StringNode) Copy() Node {
+ return s.tr.newString(s.Pos, s.Quoted, s.Text)
+}
+
+// endNode represents an {{end}} action.
+// It does not appear in the final parse tree.
+type endNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newEnd(pos Pos) *endNode {
+ return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
+}
+
+func (e *endNode) String() string {
+ return "{{end}}"
+}
+
+func (e *endNode) tree() *Tree {
+ return e.tr
+}
+
+func (e *endNode) Copy() Node {
+ return e.tr.newEnd(e.Pos)
+}
+
+// elseNode represents an {{else}} action. Does not appear in the final tree.
+type elseNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+}
+
+func (t *Tree) newElse(pos Pos, line int) *elseNode {
+ return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
+}
+
+func (e *elseNode) Type() NodeType {
+ return nodeElse
+}
+
+func (e *elseNode) String() string {
+ return "{{else}}"
+}
+
+func (e *elseNode) tree() *Tree {
+ return e.tr
+}
+
+func (e *elseNode) Copy() Node {
+ return e.tr.newElse(e.Pos, e.Line)
+}
+
+// BranchNode is the common representation of if, range, and with.
+type BranchNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Pipe *PipeNode // The pipeline to be evaluated.
+ List *ListNode // What to execute if the value is non-empty.
+ ElseList *ListNode // What to execute if the value is empty (nil if absent).
+}
+
+func (b *BranchNode) String() string {
+ name := ""
+ switch b.NodeType {
+ case NodeIf:
+ name = "if"
+ case NodeRange:
+ name = "range"
+ case NodeWith:
+ name = "with"
+ default:
+ panic("unknown branch type")
+ }
+ if b.ElseList != nil {
+ return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
+ }
+ return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
+}
+
+func (b *BranchNode) tree() *Tree {
+ return b.tr
+}
+
+func (b *BranchNode) Copy() Node {
+ switch b.NodeType {
+ case NodeIf:
+ return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ case NodeRange:
+ return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ case NodeWith:
+ return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ default:
+ panic("unknown branch type")
+ }
+}
+
+// IfNode represents an {{if}} action and its commands.
+type IfNode struct {
+ BranchNode
+}
+
+func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
+ return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (i *IfNode) Copy() Node {
+ return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+}
+
+// RangeNode represents a {{range}} action and its commands.
+type RangeNode struct {
+ BranchNode
+}
+
+func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
+ return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (r *RangeNode) Copy() Node {
+ return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
+}
+
+// WithNode represents a {{with}} action and its commands.
+type WithNode struct {
+ BranchNode
+}
+
+func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
+ return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (w *WithNode) Copy() Node {
+ return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
+}
+
+// TemplateNode represents a {{template}} action.
+type TemplateNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Name string // The name of the template (unquoted).
+ Pipe *PipeNode // The command to evaluate as dot for the template.
+}
+
+func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
+ return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
+}
+
+func (t *TemplateNode) String() string {
+ if t.Pipe == nil {
+ return fmt.Sprintf("{{template %q}}", t.Name)
+ }
+ return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
+}
+
+func (t *TemplateNode) tree() *Tree {
+ return t.tr
+}
+
+func (t *TemplateNode) Copy() Node {
+ return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
+}
diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
new file mode 100644
index 000000000..af33880c1
--- /dev/null
+++ b/src/text/template/parse/parse.go
@@ -0,0 +1,677 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates as defined by text/template
+// and html/template. Clients should use those packages to construct templates
+// rather than this one, which provides shared internal data structures not
+// intended for general use.
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Tree is the representation of a single parsed template.
+type Tree struct {
+ Name string // name of the template represented by the tree.
+ ParseName string // name of the top-level template during parsing, for error messages.
+ Root *ListNode // top-level root of the tree.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+ lex *lexer
+ token [3]item // three-token lookahead for parser.
+ peekCount int
+ vars []string // variables defined at the moment.
+}
+
+// Copy returns a copy of the Tree. Any parsing state is discarded.
+func (t *Tree) Copy() *Tree {
+ if t == nil {
+ return nil
+ }
+ return &Tree{
+ Name: t.Name,
+ ParseName: t.ParseName,
+ Root: t.Root.CopyList(),
+ text: t.text,
+ }
+}
+
+// Parse returns a map from template name to parse.Tree, created by parsing the
+// templates described in the argument string. The top-level template will be
+// given the specified name. If an error is encountered, parsing stops and an
+// empty map is returned with the error.
+func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
+ treeSet = make(map[string]*Tree)
+ t := New(name)
+ t.text = text
+ _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
+ return
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+ if t.peekCount > 0 {
+ t.peekCount--
+ } else {
+ t.token[0] = t.lex.nextItem()
+ }
+ return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+ t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens.
+// The zeroth token is already there.
+func (t *Tree) backup2(t1 item) {
+ t.token[1] = t1
+ t.peekCount = 2
+}
+
+// backup3 backs the input stream up three tokens
+// The zeroth token is already there.
+func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
+ t.token[1] = t1
+ t.token[2] = t2
+ t.peekCount = 3
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+ if t.peekCount > 0 {
+ return t.token[t.peekCount-1]
+ }
+ t.peekCount = 1
+ t.token[0] = t.lex.nextItem()
+ return t.token[0]
+}
+
+// nextNonSpace returns the next non-space token.
+func (t *Tree) nextNonSpace() (token item) {
+ for {
+ token = t.next()
+ if token.typ != itemSpace {
+ break
+ }
+ }
+ return token
+}
+
+// peekNonSpace returns but does not consume the next non-space token.
+func (t *Tree) peekNonSpace() (token item) {
+ for {
+ token = t.next()
+ if token.typ != itemSpace {
+ break
+ }
+ }
+ t.backup()
+ return token
+}
+
+// Parsing.
+
+// New allocates a new parse tree with the given name.
+func New(name string, funcs ...map[string]interface{}) *Tree {
+ return &Tree{
+ Name: name,
+ funcs: funcs,
+ }
+}
+
+// ErrorContext returns a textual representation of the location of the node in the input text.
+// The receiver is only used when the node does not have a pointer to the tree inside,
+// which can occur in old code.
+func (t *Tree) ErrorContext(n Node) (location, context string) {
+ pos := int(n.Position())
+ tree := n.tree()
+ if tree == nil {
+ tree = t
+ }
+ text := tree.text[:pos]
+ byteNum := strings.LastIndex(text, "\n")
+ if byteNum == -1 {
+ byteNum = pos // On first line.
+ } else {
+ byteNum++ // After the newline.
+ byteNum = pos - byteNum
+ }
+ lineNum := 1 + strings.Count(text, "\n")
+ context = n.String()
+ if len(context) > 20 {
+ context = fmt.Sprintf("%.20s...", context)
+ }
+ return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+ t.Root = nil
+ format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+ t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// expectOneOf consumes the next token and guarantees it has one of the required types.
+func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected1 && token.typ != expected2 {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+ t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ if t != nil {
+ t.stopParse()
+ }
+ *errp = e.(error)
+ }
+ return
+}
+
+// startParse initializes the parser, using the lexer.
+func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
+ t.Root = nil
+ t.lex = lex
+ t.vars = []string{"$"}
+ t.funcs = funcs
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+ t.lex = nil
+ t.vars = nil
+ t.funcs = nil
+}
+
+// Parse parses the template definition string to construct a representation of
+// the template for execution. If either action delimiter string is empty, the
+// default ("{{" or "}}") is used. Embedded template definitions are added to
+// the treeSet map.
+func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.ParseName = t.Name
+ t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
+ t.text = text
+ t.parse(treeSet)
+ t.add(treeSet)
+ t.stopParse()
+ return t, nil
+}
+
+// add adds tree to the treeSet.
+func (t *Tree) add(treeSet map[string]*Tree) {
+ tree := treeSet[t.Name]
+ if tree == nil || IsEmptyTree(tree.Root) {
+ treeSet[t.Name] = t
+ return
+ }
+ if !IsEmptyTree(t.Root) {
+ t.errorf("template: multiple definition of template %q", t.Name)
+ }
+}
+
+// IsEmptyTree reports whether this tree (node) is empty of everything but space.
+func IsEmptyTree(n Node) bool {
+ switch n := n.(type) {
+ case nil:
+ return true
+ case *ActionNode:
+ case *IfNode:
+ case *ListNode:
+ for _, node := range n.Nodes {
+ if !IsEmptyTree(node) {
+ return false
+ }
+ }
+ return true
+ case *RangeNode:
+ case *TemplateNode:
+ case *TextNode:
+ return len(bytes.TrimSpace(n.Text)) == 0
+ case *WithNode:
+ default:
+ panic("unknown node: " + n.String())
+ }
+ return false
+}
+
+// parse is the top-level parser for a template, essentially the same
+// as itemList except it also parses {{define}} actions.
+// It runs to EOF.
+func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
+ t.Root = t.newList(t.peek().pos)
+ for t.peek().typ != itemEOF {
+ if t.peek().typ == itemLeftDelim {
+ delim := t.next()
+ if t.nextNonSpace().typ == itemDefine {
+ newT := New("definition") // name will be updated once we know it.
+ newT.text = t.text
+ newT.ParseName = t.ParseName
+ newT.startParse(t.funcs, t.lex)
+ newT.parseDefinition(treeSet)
+ continue
+ }
+ t.backup2(delim)
+ }
+ n := t.textOrAction()
+ if n.Type() == nodeEnd {
+ t.errorf("unexpected %s", n)
+ }
+ t.Root.append(n)
+ }
+ return nil
+}
+
+// parseDefinition parses a {{define}} ... {{end}} template definition and
+// installs the definition in the treeSet map. The "define" keyword has already
+// been scanned.
+func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
+ const context = "define clause"
+ name := t.expectOneOf(itemString, itemRawString, context)
+ var err error
+ t.Name, err = strconv.Unquote(name.val)
+ if err != nil {
+ t.error(err)
+ }
+ t.expect(itemRightDelim, context)
+ var end Node
+ t.Root, end = t.itemList()
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ t.add(treeSet)
+ t.stopParse()
+}
+
+// itemList:
+// textOrAction*
+// Terminates at {{end}} or {{else}}, returned separately.
+func (t *Tree) itemList() (list *ListNode, next Node) {
+ list = t.newList(t.peekNonSpace().pos)
+ for t.peekNonSpace().typ != itemEOF {
+ n := t.textOrAction()
+ switch n.Type() {
+ case nodeEnd, nodeElse:
+ return list, n
+ }
+ list.append(n)
+ }
+ t.errorf("unexpected EOF")
+ return
+}
+
+// textOrAction:
+// text | action
+func (t *Tree) textOrAction() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
+ return t.action()
+ default:
+ t.unexpected(token, "input")
+ }
+ return nil
+}
+
+// Action:
+// control
+// command ("|" command)*
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+ return t.endControl()
+ case itemIf:
+ return t.ifControl()
+ case itemRange:
+ return t.rangeControl()
+ case itemTemplate:
+ return t.templateControl()
+ case itemWith:
+ return t.withControl()
+ }
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
+}
+
+// Pipeline:
+// declarations? command ('|' command)*
+func (t *Tree) pipeline(context string) (pipe *PipeNode) {
+ var decl []*VariableNode
+ pos := t.peekNonSpace().pos
+ // Are there declarations?
+ for {
+ if v := t.peekNonSpace(); v.typ == itemVariable {
+ t.next()
+ // Since space is a token, we need 3-token look-ahead here in the worst case:
+ // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
+ // argument variable rather than a declaration. So remember the token
+ // adjacent to the variable so we can push it back if necessary.
+ tokenAfterVariable := t.peek()
+ if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
+ t.nextNonSpace()
+ variable := t.newVariable(v.pos, v.val)
+ decl = append(decl, variable)
+ t.vars = append(t.vars, v.val)
+ if next.typ == itemChar && next.val == "," {
+ if context == "range" && len(decl) < 2 {
+ continue
+ }
+ t.errorf("too many declarations in %s", context)
+ }
+ } else if tokenAfterVariable.typ == itemSpace {
+ t.backup3(v, tokenAfterVariable)
+ } else {
+ t.backup2(v)
+ }
+ }
+ break
+ }
+ pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
+ for {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemRightDelim, itemRightParen:
+ if len(pipe.Cmds) == 0 {
+ t.errorf("missing value for %s", context)
+ }
+ if token.typ == itemRightParen {
+ t.backup()
+ }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+ t.backup()
+ pipe.append(t.command())
+ default:
+ t.unexpected(token, context)
+ }
+ }
+}
+
+func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ line = t.lex.lineNumber()
+ pipe = t.pipeline(context)
+ var next Node
+ list, next = t.itemList()
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+ if allowElseIf {
+ // Special case for "else if". If the "else" is followed immediately by an "if",
+ // the elseControl will have left the "if" token pending. Treat
+ // {{if a}}_{{else if b}}_{{end}}
+ // as
+ // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
+ // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
+ // is assumed. This technique works even for long if-else-if chains.
+ // TODO: Should we allow else-if in with and range?
+ if t.peek().typ == itemIf {
+ t.next() // Consume the "if" token.
+ elseList = t.newList(next.Position())
+ elseList.append(t.ifControl())
+ // Do not consume the next item - only one {{end}} required.
+ break
+ }
+ }
+ elseList, next = t.itemList()
+ if next.Type() != nodeEnd {
+ t.errorf("expected end; found %s", next)
+ }
+ }
+ return pipe.Position(), line, pipe, list, elseList
+}
+
+// If:
+// {{if pipeline}} itemList {{end}}
+// {{if pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+ return t.newIf(t.parseControl(true, "if"))
+}
+
+// Range:
+// {{range pipeline}} itemList {{end}}
+// {{range pipeline}} itemList {{else}} itemList {{end}}
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+ return t.newRange(t.parseControl(false, "range"))
+}
+
+// With:
+// {{with pipeline}} itemList {{end}}
+// {{with pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) withControl() Node {
+ return t.newWith(t.parseControl(false, "with"))
+}
+
+// End:
+// {{end}}
+// End keyword is past.
+func (t *Tree) endControl() Node {
+ return t.newEnd(t.expect(itemRightDelim, "end").pos)
+}
+
+// Else:
+// {{else}}
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+ // Special case for "else if".
+ peek := t.peekNonSpace()
+ if peek.typ == itemIf {
+ // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
+ return t.newElse(peek.pos, t.lex.lineNumber())
+ }
+ return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
+}
+
+// Template:
+// {{template stringValue pipeline}}
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+ var name string
+ token := t.nextNonSpace()
+ switch token.typ {
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ name = s
+ default:
+ t.unexpected(token, "template invocation")
+ }
+ var pipe *PipeNode
+ if t.nextNonSpace().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ pipe = t.pipeline("template")
+ }
+ return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
+}
+
+// command:
+// operand (space operand)*
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+ cmd := t.newCommand(t.peekNonSpace().pos)
+ for {
+ t.peekNonSpace() // skip leading spaces.
+ operand := t.operand()
+ if operand != nil {
+ cmd.append(operand)
+ }
+ switch token := t.next(); token.typ {
+ case itemSpace:
+ continue
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemRightDelim, itemRightParen:
+ t.backup()
+ case itemPipe:
+ default:
+ t.errorf("unexpected %s in operand; missing space?", token)
+ }
+ break
+ }
+ if len(cmd.Args) == 0 {
+ t.errorf("empty command")
+ }
+ return cmd
+}
+
+// operand:
+// term .Field*
+// An operand is a space-separated component of a command,
+// a term possibly followed by field accesses.
+// A nil return means the next item is not an operand.
+func (t *Tree) operand() Node {
+ node := t.term()
+ if node == nil {
+ return nil
+ }
+ if t.peek().typ == itemField {
+ chain := t.newChain(t.peek().pos, node)
+ for t.peek().typ == itemField {
+ chain.Add(t.next().val)
+ }
+ // Compatibility with original API: If the term is of type NodeField
+ // or NodeVariable, just put more fields on the original.
+ // Otherwise, keep the Chain node.
+ // TODO: Switch to Chains always when we can.
+ switch node.Type() {
+ case NodeField:
+ node = t.newField(chain.Position(), chain.String())
+ case NodeVariable:
+ node = t.newVariable(chain.Position(), chain.String())
+ default:
+ node = chain
+ }
+ }
+ return node
+}
+
+// term:
+// literal (number, string, nil, boolean)
+// function (identifier)
+// .
+// .Field
+// $
+// '(' pipeline ')'
+// A term is a simple "expression".
+// A nil return means the next item is not a term.
+func (t *Tree) term() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+ }
+ return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
+ case itemDot:
+ return t.newDot(token.pos)
+ case itemNil:
+ return t.newNil(token.pos)
+ case itemVariable:
+ return t.useVar(token.pos, token.val)
+ case itemField:
+ return t.newField(token.pos, token.val)
+ case itemBool:
+ return t.newBool(token.pos, token.val == "true")
+ case itemCharConstant, itemComplex, itemNumber:
+ number, err := t.newNumber(token.pos, token.val, token.typ)
+ if err != nil {
+ t.error(err)
+ }
+ return number
+ case itemLeftParen:
+ pipe := t.pipeline("parenthesized pipeline")
+ if token := t.next(); token.typ != itemRightParen {
+ t.errorf("unclosed right paren: unexpected %s", token)
+ }
+ return pipe
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ return t.newString(token.pos, token.val, s)
+ }
+ t.backup()
+ return nil
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+ for _, funcMap := range t.funcs {
+ if funcMap == nil {
+ continue
+ }
+ if funcMap[name] != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+ t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(pos Pos, name string) Node {
+ v := t.newVariable(pos, name)
+ for _, varName := range t.vars {
+ if varName == v.Ident[0] {
+ return v
+ }
+ }
+ t.errorf("undefined variable %q", v.Ident[0])
+ return nil
+}
diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
new file mode 100644
index 000000000..4a504fa7c
--- /dev/null
+++ b/src/text/template/parse/parse_test.go
@@ -0,0 +1,423 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the main tests")
+
+type numberTest struct {
+ text string
+ isInt bool
+ isUint bool
+ isFloat bool
+ isComplex bool
+ int64
+ uint64
+ float64
+ complex128
+}
+
+var numberTests = []numberTest{
+ // basics
+ {"0", true, true, true, false, 0, 0, 0, 0},
+ {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
+ {"73", true, true, true, false, 73, 73, 73, 0},
+ {"073", true, true, true, false, 073, 073, 073, 0},
+ {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+ {"-73", true, false, true, false, -73, 0, -73, 0},
+ {"+73", true, false, true, false, 73, 0, 73, 0},
+ {"100", true, true, true, false, 100, 100, 100, 0},
+ {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
+ {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
+ {"-1.2", false, false, true, false, 0, 0, -1.2, 0},
+ {"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
+ {"-1e19", false, false, true, false, 0, 0, -1e19, 0},
+ {"4i", false, false, false, true, 0, 0, 0, 4i},
+ {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
+ {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
+ // complex with 0 imaginary are float (and maybe integer)
+ {"0i", true, true, true, true, 0, 0, 0, 0},
+ {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
+ {"-12+0i", true, false, true, true, -12, 0, -12, -12},
+ {"13+0i", true, true, true, true, 13, 13, 13, 13},
+ // funny bases
+ {"0123", true, true, true, false, 0123, 0123, 0123, 0},
+ {"-0x0", true, true, true, false, 0, 0, 0, 0},
+ {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
+ // character constants
+ {`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
+ {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
+ {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
+ {`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
+ {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
+ {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ // some broken syntax
+ {text: "+-2"},
+ {text: "0x123."},
+ {text: "1e."},
+ {text: "0xi."},
+ {text: "1+2."},
+ {text: "'x"},
+ {text: "'xx'"},
+ // Issue 8622 - 0xe parsed as floating point. Very embarrassing.
+ {"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0},
+}
+
+func TestNumberParse(t *testing.T) {
+ for _, test := range numberTests {
+ // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
+ // because imaginary comes out as a number.
+ var c complex128
+ typ := itemNumber
+ var tree *Tree
+ if test.text[0] == '\'' {
+ typ = itemCharConstant
+ } else {
+ _, err := fmt.Sscan(test.text, &c)
+ if err == nil {
+ typ = itemComplex
+ }
+ }
+ n, err := tree.newNumber(0, test.text, typ)
+ ok := test.isInt || test.isUint || test.isFloat || test.isComplex
+ if ok && err != nil {
+ t.Errorf("unexpected error for %q: %s", test.text, err)
+ continue
+ }
+ if !ok && err == nil {
+ t.Errorf("expected error for %q", test.text)
+ continue
+ }
+ if !ok {
+ if *debug {
+ fmt.Printf("%s\n\t%s\n", test.text, err)
+ }
+ continue
+ }
+ if n.IsComplex != test.isComplex {
+ t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
+ }
+ if test.isInt {
+ if !n.IsInt {
+ t.Errorf("expected integer for %q", test.text)
+ }
+ if n.Int64 != test.int64 {
+ t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
+ }
+ } else if n.IsInt {
+ t.Errorf("did not expect integer for %q", test.text)
+ }
+ if test.isUint {
+ if !n.IsUint {
+ t.Errorf("expected unsigned integer for %q", test.text)
+ }
+ if n.Uint64 != test.uint64 {
+ t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
+ }
+ } else if n.IsUint {
+ t.Errorf("did not expect unsigned integer for %q", test.text)
+ }
+ if test.isFloat {
+ if !n.IsFloat {
+ t.Errorf("expected float for %q", test.text)
+ }
+ if n.Float64 != test.float64 {
+ t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
+ }
+ } else if n.IsFloat {
+ t.Errorf("did not expect float for %q", test.text)
+ }
+ if test.isComplex {
+ if !n.IsComplex {
+ t.Errorf("expected complex for %q", test.text)
+ }
+ if n.Complex128 != test.complex128 {
+ t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
+ }
+ } else if n.IsComplex {
+ t.Errorf("did not expect complex for %q", test.text)
+ }
+ }
+}
+
+type parseTest struct {
+ name string
+ input string
+ ok bool
+ result string // what the user would see in an error message.
+}
+
+const (
+ noError = true
+ hasError = false
+)
+
+var parseTests = []parseTest{
+ {"empty", "", noError,
+ ``},
+ {"comment", "{{/*\n\n\n*/}}", noError,
+ ``},
+ {"spaces", " \t\n", noError,
+ `" \t\n"`},
+ {"text", "some text", noError,
+ `"some text"`},
+ {"emptyAction", "{{}}", hasError,
+ `{{}}`},
+ {"field", "{{.X}}", noError,
+ `{{.X}}`},
+ {"simple command", "{{printf}}", noError,
+ `{{printf}}`},
+ {"$ invocation", "{{$}}", noError,
+ "{{$}}"},
+ {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
+ "{{with $x := 3}}{{$x 23}}{{end}}"},
+ {"variable with fields", "{{$.I}}", noError,
+ "{{$.I}}"},
+ {"multi-word command", "{{printf `%d` 23}}", noError,
+ "{{printf `%d` 23}}"},
+ {"pipeline", "{{.X|.Y}}", noError,
+ `{{.X | .Y}}`},
+ {"pipeline with decl", "{{$x := .X|.Y}}", noError,
+ `{{$x := .X | .Y}}`},
+ {"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError,
+ `{{.X (.Y .Z) (.A | .B .C) (.E)}}`},
+ {"field applied to parentheses", "{{(.Y .Z).Field}}", noError,
+ `{{(.Y .Z).Field}}`},
+ {"simple if", "{{if .X}}hello{{end}}", noError,
+ `{{if .X}}"hello"{{end}}`},
+ {"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
+ `{{if .X}}"true"{{else}}"false"{{end}}`},
+ {"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError,
+ `{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`},
+ {"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError,
+ `"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`},
+ {"simple range", "{{range .X}}hello{{end}}", noError,
+ `{{range .X}}"hello"{{end}}`},
+ {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
+ `{{range .X.Y.Z}}"hello"{{end}}`},
+ {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
+ `{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
+ {"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
+ `{{range .X}}"true"{{else}}"false"{{end}}`},
+ {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
+ `{{range .X | .M}}"true"{{else}}"false"{{end}}`},
+ {"range []int", "{{range .SI}}{{.}}{{end}}", noError,
+ `{{range .SI}}{{.}}{{end}}`},
+ {"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError,
+ `{{range $x := .SI}}{{.}}{{end}}`},
+ {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+ `{{range $x, $y := .SI}}{{.}}{{end}}`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+ `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+ {"template", "{{template `x`}}", noError,
+ `{{template "x"}}`},
+ {"template with arg", "{{template `x` .Y}}", noError,
+ `{{template "x" .Y}}`},
+ {"with", "{{with .X}}hello{{end}}", noError,
+ `{{with .X}}"hello"{{end}}`},
+ {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
+ `{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+ {"missing end", "hello{{range .x}}", hasError, ""},
+ {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
+ {"undefined function", "hello{{undefined}}", hasError, ""},
+ {"undefined variable", "{{$x}}", hasError, ""},
+ {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
+ {"variable undefined in template", "{{template $v}}", hasError, ""},
+ {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
+ {"template with field ref", "{{template .X}}", hasError, ""},
+ {"template with var", "{{template $v}}", hasError, ""},
+ {"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
+ {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
+ {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
+ {"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""},
+ {"adjacent args", "{{printf 3`x`}}", hasError, ""},
+ {"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+ {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
+ // Equals (and other chars) do not assignments make (yet).
+ {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+ {"bug0b", "{{$x = 1}}{{$x}}", hasError, ""},
+ {"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""},
+ {"bug0d", "{{$x % 3}}{{$x}}", hasError, ""},
+ // Check the parse fails for := rather than comma.
+ {"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""},
+ // Another bug: variable read must ignore following punctuation.
+ {"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""}, // ! is just illegal here.
+ {"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""}, // $x+2 should not parse as ($x) (+2).
+ {"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space.
+}
+
+var builtins = map[string]interface{}{
+ "printf": fmt.Sprintf,
+}
+
+func testParse(doCopy bool, t *testing.T) {
+ textFormat = "%q"
+ defer func() { textFormat = "%s" }()
+ for _, test := range parseTests {
+ tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ var result string
+ if doCopy {
+ result = tmpl.Root.Copy().String()
+ } else {
+ result = tmpl.Root.String()
+ }
+ if result != test.result {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+ }
+ }
+}
+
+func TestParse(t *testing.T) {
+ testParse(false, t)
+}
+
+// Same as TestParse, but we copy the node first
+func TestParseCopy(t *testing.T) {
+ testParse(true, t)
+}
+
+type isEmptyTest struct {
+ name string
+ input string
+ empty bool
+}
+
+var isEmptyTests = []isEmptyTest{
+ {"empty", ``, true},
+ {"nonempty", `hello`, false},
+ {"spaces only", " \t\n \t\n", true},
+ {"definition", `{{define "x"}}something{{end}}`, true},
+ {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
+ {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
+ {"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false},
+}
+
+func TestIsEmpty(t *testing.T) {
+ if !IsEmptyTree(nil) {
+ t.Errorf("nil tree is not empty")
+ }
+ for _, test := range isEmptyTests {
+ tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil)
+ if err != nil {
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ }
+ if empty := IsEmptyTree(tree.Root); empty != test.empty {
+ t.Errorf("%q: expected %t got %t", test.name, test.empty, empty)
+ }
+ }
+}
+
+func TestErrorContextWithTreeCopy(t *testing.T) {
+ tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil)
+ if err != nil {
+ t.Fatalf("unexpected tree parse failure: %v", err)
+ }
+ treeCopy := tree.Copy()
+ wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0])
+ gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0])
+ if wantLocation != gotLocation {
+ t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation)
+ }
+ if wantContext != gotContext {
+ t.Errorf("wrong error location want %q got %q", wantContext, gotContext)
+ }
+}
+
+// All failures, and the result is a string that must appear in the error message.
+var errorTests = []parseTest{
+ // Check line numbers are accurate.
+ {"unclosed1",
+ "line1\n{{",
+ hasError, `unclosed1:2: unexpected unclosed action in command`},
+ {"unclosed2",
+ "line1\n{{define `x`}}line2\n{{",
+ hasError, `unclosed2:3: unexpected unclosed action in command`},
+ // Specific errors.
+ {"function",
+ "{{foo}}",
+ hasError, `function "foo" not defined`},
+ {"comment",
+ "{{/*}}",
+ hasError, `unclosed comment`},
+ {"lparen",
+ "{{.X (1 2 3}}",
+ hasError, `unclosed left paren`},
+ {"rparen",
+ "{{.X 1 2 3)}}",
+ hasError, `unexpected ")"`},
+ {"space",
+ "{{`x`3}}",
+ hasError, `missing space?`},
+ {"idchar",
+ "{{a#}}",
+ hasError, `'#'`},
+ {"charconst",
+ "{{'a}}",
+ hasError, `unterminated character constant`},
+ {"stringconst",
+ `{{"a}}`,
+ hasError, `unterminated quoted string`},
+ {"rawstringconst",
+ "{{`a}}",
+ hasError, `unterminated raw quoted string`},
+ {"number",
+ "{{0xi}}",
+ hasError, `number syntax`},
+ {"multidefine",
+ "{{define `a`}}a{{end}}{{define `a`}}b{{end}}",
+ hasError, `multiple definition of template`},
+ {"eof",
+ "{{range .X}}",
+ hasError, `unexpected EOF`},
+ {"variable",
+ // Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration.
+ "{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}",
+ hasError, `unexpected ":="`},
+ {"multidecl",
+ "{{$a,$b,$c := 23}}",
+ hasError, `too many declarations`},
+ {"undefvar",
+ "{{$a}}",
+ hasError, `undefined variable`},
+}
+
+func TestErrors(t *testing.T) {
+ for _, test := range errorTests {
+ _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree))
+ if err == nil {
+ t.Errorf("%q: expected error", test.name)
+ continue
+ }
+ if !strings.Contains(err.Error(), test.result) {
+ t.Errorf("%q: error %q does not contain %q", test.name, err, test.result)
+ }
+ }
+}