summaryrefslogtreecommitdiff
path: root/src/text
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-09-08 00:08:51 -0400
committerRuss Cox <rsc@golang.org>2014-09-08 00:08:51 -0400
commit8528da672cc093d4dd06732819abc1f7b6b5a46e (patch)
tree334be80d4a4c85b77db6f6fdb67cbf0528cba5f5 /src/text
parent73bcb69f272cbf34ddcc9daa56427a8683b5a95d (diff)
downloadgo-8528da672cc093d4dd06732819abc1f7b6b5a46e.tar.gz
build: move package sources from src/pkg to src
Preparation was in CL 134570043. This CL contains only the effect of 'hg mv src/pkg/* src'. For more about the move, see golang.org/s/go14nopkg.
Diffstat (limited to 'src/text')
-rw-r--r--src/text/scanner/scanner.go693
-rw-r--r--src/text/scanner/scanner_test.go618
-rw-r--r--src/text/tabwriter/example_test.go38
-rw-r--r--src/text/tabwriter/tabwriter.go558
-rw-r--r--src/text/tabwriter/tabwriter_test.go652
-rw-r--r--src/text/template/doc.go405
-rw-r--r--src/text/template/example_test.go71
-rw-r--r--src/text/template/examplefiles_test.go182
-rw-r--r--src/text/template/examplefunc_test.go54
-rw-r--r--src/text/template/exec.go842
-rw-r--r--src/text/template/exec_test.go994
-rw-r--r--src/text/template/funcs.go580
-rw-r--r--src/text/template/helper.go108
-rw-r--r--src/text/template/multi_test.go292
-rw-r--r--src/text/template/parse/lex.go551
-rw-r--r--src/text/template/parse/lex_test.go465
-rw-r--r--src/text/template/parse/node.go834
-rw-r--r--src/text/template/parse/parse.go677
-rw-r--r--src/text/template/parse/parse_test.go423
-rw-r--r--src/text/template/template.go217
-rw-r--r--src/text/template/testdata/file1.tmpl2
-rw-r--r--src/text/template/testdata/file2.tmpl2
-rw-r--r--src/text/template/testdata/tmpl1.tmpl3
-rw-r--r--src/text/template/testdata/tmpl2.tmpl3
24 files changed, 9264 insertions, 0 deletions
diff --git a/src/text/scanner/scanner.go b/src/text/scanner/scanner.go
new file mode 100644
index 000000000..5199ee4fc
--- /dev/null
+++ b/src/text/scanner/scanner.go
@@ -0,0 +1,693 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
+// It takes an io.Reader providing the source, which then can be tokenized
+// through repeated calls to the Scan function. For compatibility with
+// existing tools, the NUL character is not allowed. If the first character
+// in the source is a UTF-8 encoded byte order mark (BOM), it is discarded.
+//
+// By default, a Scanner skips white space and Go comments and recognizes all
+// literals as defined by the Go language specification. It may be
+// customized to recognize only a subset of those literals and to recognize
+// different identifier and white space characters.
+//
+// Basic usage pattern:
+//
+// var s scanner.Scanner
+// s.Init(src)
+// tok := s.Scan()
+// for tok != scanner.EOF {
+// // do something with tok
+// tok = s.Scan()
+// }
+//
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A source position is represented by a Position value.
+// A position is valid if Line > 0.
+type Position struct {
+ Filename string // filename, if any
+ Offset int // byte offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count per line)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "???"
+ }
+ return s
+}
+
+// Predefined mode bits to control recognition of tokens. For instance,
+// to configure a Scanner such that it only recognizes (Go) identifiers,
+// integers, and skips comments, set the Scanner's Mode field to:
+//
+// ScanIdents | ScanInts | SkipComments
+//
+// With the exceptions of comments, which are skipped if SkipComments is
+// set, unrecognized tokens are not ignored. Instead, the scanner simply
+// returns the respective individual characters (or possibly sub-tokens).
+// For instance, if the mode is ScanIdents (not ScanStrings), the string
+// "foo" is scanned as the token sequence '"' Ident '"'.
+//
+const (
+ ScanIdents = 1 << -Ident
+ ScanInts = 1 << -Int
+ ScanFloats = 1 << -Float // includes Ints
+ ScanChars = 1 << -Char
+ ScanStrings = 1 << -String
+ ScanRawStrings = 1 << -RawString
+ ScanComments = 1 << -Comment
+ SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
+ GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
+)
+
+// The result of Scan is one of the following tokens or a Unicode character.
+const (
+ EOF = -(iota + 1)
+ Ident
+ Int
+ Float
+ Char
+ String
+ RawString
+ Comment
+ skipComment
+)
+
+var tokenString = map[rune]string{
+ EOF: "EOF",
+ Ident: "Ident",
+ Int: "Int",
+ Float: "Float",
+ Char: "Char",
+ String: "String",
+ RawString: "RawString",
+ Comment: "Comment",
+}
+
+// TokenString returns a printable string for a token or Unicode character.
+func TokenString(tok rune) string {
+ if s, found := tokenString[tok]; found {
+ return s
+ }
+ return fmt.Sprintf("%q", string(tok))
+}
+
+// GoWhitespace is the default value for the Scanner's Whitespace field.
+// Its value selects Go's white space characters.
+const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
+
+const bufLen = 1024 // at least utf8.UTFMax
+
+// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
+type Scanner struct {
+ // Input
+ src io.Reader
+
+ // Source buffer
+ srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
+ srcPos int // reading position (srcBuf index)
+ srcEnd int // source end (srcBuf index)
+
+ // Source position
+ srcBufOffset int // byte offset of srcBuf[0] in source
+ line int // line count
+ column int // character count
+ lastLineLen int // length of last line in characters (for correct column reporting)
+ lastCharLen int // length of last character in bytes
+
+ // Token text buffer
+ // Typically, token text is stored completely in srcBuf, but in general
+ // the token text's head may be buffered in tokBuf while the token text's
+ // tail is stored in srcBuf.
+ tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
+ tokPos int // token text tail position (srcBuf index); valid if >= 0
+ tokEnd int // token text tail end (srcBuf index)
+
+ // One character look-ahead
+ ch rune // character before current srcPos
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(s *Scanner, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // The Mode field controls which tokens are recognized. For instance,
+ // to recognize Ints, set the ScanInts bit in Mode. The field may be
+ // changed at any time.
+ Mode uint
+
+ // The Whitespace field controls which characters are recognized
+ // as white space. To recognize a character ch <= ' ' as white space,
+ // set the ch'th bit in Whitespace (the Scanner's behavior is undefined
+ // for values ch > ' '). The field may be changed at any time.
+ Whitespace uint64
+
+ // IsIdentRune is a predicate controlling the characters accepted
+ // as the ith rune in an identifier. The set of valid characters
+ // must not intersect with the set of white space characters.
+ // If no IsIdentRune function is set, regular Go identifiers are
+ // accepted instead. The field may be changed at any time.
+ IsIdentRune func(ch rune, i int) bool
+
+ // Start position of most recently scanned token; set by Scan.
+ // Calling Init or Next invalidates the position (Line == 0).
+ // The Filename field is always left untouched by the Scanner.
+ // If an error is reported (via Error) and Position is invalid,
+ // the scanner is not inside a token. Call Pos to obtain an error
+ // position in that case.
+ Position
+}
+
+// Init initializes a Scanner with a new source and returns s.
+// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
+// and Whitespace is set to GoWhitespace.
+func (s *Scanner) Init(src io.Reader) *Scanner {
+ s.src = src
+
+ // initialize source buffer
+ // (the first call to next() will fill it by calling src.Read)
+ s.srcBuf[0] = utf8.RuneSelf // sentinel
+ s.srcPos = 0
+ s.srcEnd = 0
+
+ // initialize source position
+ s.srcBufOffset = 0
+ s.line = 1
+ s.column = 0
+ s.lastLineLen = 0
+ s.lastCharLen = 0
+
+ // initialize token text buffer
+ // (required for first call to next()).
+ s.tokPos = -1
+
+ // initialize one character look-ahead
+ s.ch = -1 // no char read yet
+
+ // initialize public fields
+ s.Error = nil
+ s.ErrorCount = 0
+ s.Mode = GoTokens
+ s.Whitespace = GoWhitespace
+ s.Line = 0 // invalidate token position
+
+ return s
+}
+
+// next reads and returns the next Unicode character. It is designed such
+// that only a minimal amount of work needs to be done in the common ASCII
+// case (one test to check for both ASCII and end-of-buffer, and one test
+// to check for newlines).
+func (s *Scanner) next() rune {
+ ch, width := rune(s.srcBuf[s.srcPos]), 1
+
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII or not enough bytes
+ for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
+ // not enough bytes: read some more, but first
+ // save away token text if any
+ if s.tokPos >= 0 {
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
+ s.tokPos = 0
+ // s.tokEnd is set by Scan()
+ }
+ // move unread bytes to beginning of buffer
+ copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
+ s.srcBufOffset += s.srcPos
+ // read more bytes
+ // (an io.Reader must return io.EOF when it reaches
+ // the end of what it is reading - simply returning
+ // n == 0 will make this loop retry forever; but the
+ // error is in the reader implementation in that case)
+ i := s.srcEnd - s.srcPos
+ n, err := s.src.Read(s.srcBuf[i:bufLen])
+ s.srcPos = 0
+ s.srcEnd = i + n
+ s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
+ if err != nil {
+ if err != io.EOF {
+ s.error(err.Error())
+ }
+ if s.srcEnd == 0 {
+ if s.lastCharLen > 0 {
+ // previous character was not EOF
+ s.column++
+ }
+ s.lastCharLen = 0
+ return EOF
+ }
+ // If err == EOF, we won't be getting more
+ // bytes; break to avoid infinite loop. If
+ // err is something else, we don't know if
+ // we can get more bytes; thus also break.
+ break
+ }
+ }
+ // at least one byte
+ ch = rune(s.srcBuf[s.srcPos])
+ if ch >= utf8.RuneSelf {
+ // uncommon case: not ASCII
+ ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
+ if ch == utf8.RuneError && width == 1 {
+ // advance for correct error position
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+ s.error("illegal UTF-8 encoding")
+ return ch
+ }
+ }
+ }
+
+ // advance
+ s.srcPos += width
+ s.lastCharLen = width
+ s.column++
+
+ // special situations
+ switch ch {
+ case 0:
+ // for compatibility with other tools
+ s.error("illegal character NUL")
+ case '\n':
+ s.line++
+ s.lastLineLen = s.column
+ s.column = 0
+ }
+
+ return ch
+}
+
+// Next reads and returns the next Unicode character.
+// It returns EOF at the end of the source. It reports
+// a read error by calling s.Error, if not nil; otherwise
+// it prints an error message to os.Stderr. Next does not
+// update the Scanner's Position field; use Pos() to
+// get the current position.
+func (s *Scanner) Next() rune {
+ s.tokPos = -1 // don't collect token text
+ s.Line = 0 // invalidate token position
+ ch := s.Peek()
+ s.ch = s.next()
+ return ch
+}
+
+// Peek returns the next Unicode character in the source without advancing
+// the scanner. It returns EOF if the scanner's position is at the last
+// character of the source.
+func (s *Scanner) Peek() rune {
+ if s.ch < 0 {
+ // this code is only run for the very first character
+ s.ch = s.next()
+ if s.ch == '\uFEFF' {
+ s.ch = s.next() // ignore BOM
+ }
+ }
+ return s.ch
+}
+
+func (s *Scanner) error(msg string) {
+ s.ErrorCount++
+ if s.Error != nil {
+ s.Error(s, msg)
+ return
+ }
+ pos := s.Position
+ if !pos.IsValid() {
+ pos = s.Pos()
+ }
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+func (s *Scanner) isIdentRune(ch rune, i int) bool {
+ if s.IsIdentRune != nil {
+ return s.IsIdentRune(ch, i)
+ }
+ return ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) && i > 0
+}
+
+func (s *Scanner) scanIdentifier() rune {
+ // we know the zero'th rune is OK; start scanning at the next one
+ ch := s.next()
+ for i := 1; s.isIdentRune(ch, i); i++ {
+ ch = s.next()
+ }
+ return ch
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+
+func (s *Scanner) scanMantissa(ch rune) rune {
+ for isDecimal(ch) {
+ ch = s.next()
+ }
+ return ch
+}
+
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.scanMantissa(s.next())
+ }
+ return ch
+}
+
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+func (s *Scanner) scanNumber(ch rune) (rune, rune) {
+ // isDecimal(ch)
+ if ch == '0' {
+ // int or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal int
+ ch = s.next()
+ hasMantissa := false
+ for digitVal(ch) < 16 {
+ ch = s.next()
+ hasMantissa = true
+ }
+ if !hasMantissa {
+ s.error("illegal hexadecimal number")
+ }
+ } else {
+ // octal int or float
+ has8or9 := false
+ for isDecimal(ch) {
+ if ch > '7' {
+ has8or9 = true
+ }
+ ch = s.next()
+ }
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ // octal int
+ if has8or9 {
+ s.error("illegal octal number")
+ }
+ }
+ return Int, ch
+ }
+ // decimal int or float
+ ch = s.scanMantissa(ch)
+ if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
+ // float
+ ch = s.scanFraction(ch)
+ ch = s.scanExponent(ch)
+ return Float, ch
+ }
+ return Int, ch
+}
+
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanEscape(quote rune) rune {
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = s.next()
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.error("illegal char escape")
+ }
+ return ch
+}
+
+func (s *Scanner) scanString(quote rune) (n int) {
+ ch := s.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ if ch == '\\' {
+ ch = s.scanEscape(quote)
+ } else {
+ ch = s.next()
+ }
+ n++
+ }
+ return
+}
+
+func (s *Scanner) scanRawString() {
+ ch := s.next() // read character after '`'
+ for ch != '`' {
+ if ch < 0 {
+ s.error("literal not terminated")
+ return
+ }
+ ch = s.next()
+ }
+}
+
+func (s *Scanner) scanChar() {
+ if s.scanString('\'') != 1 {
+ s.error("illegal char literal")
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) rune {
+ // ch == '/' || ch == '*'
+ if ch == '/' {
+ // line comment
+ ch = s.next() // read character after "//"
+ for ch != '\n' && ch >= 0 {
+ ch = s.next()
+ }
+ return ch
+ }
+
+ // general comment
+ ch = s.next() // read character after "/*"
+ for {
+ if ch < 0 {
+ s.error("comment not terminated")
+ break
+ }
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ ch = s.next()
+ break
+ }
+ }
+ return ch
+}
+
+// Scan reads the next token or Unicode character from source and returns it.
+// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
+// It returns EOF at the end of the source. It reports scanner errors (read and
+// token errors) by calling s.Error, if not nil; otherwise it prints an error
+// message to os.Stderr.
+func (s *Scanner) Scan() rune {
+ ch := s.Peek()
+
+ // reset token text position
+ s.tokPos = -1
+ s.Line = 0
+
+redo:
+ // skip white space
+ for s.Whitespace&(1<<uint(ch)) != 0 {
+ ch = s.next()
+ }
+
+ // start collecting token text
+ s.tokBuf.Reset()
+ s.tokPos = s.srcPos - s.lastCharLen
+
+ // set token position
+ // (this is a slightly optimized version of the code in Pos())
+ s.Offset = s.srcBufOffset + s.tokPos
+ if s.column > 0 {
+ // common case: last character was not a '\n'
+ s.Line = s.line
+ s.Column = s.column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.Line = s.line - 1
+ s.Column = s.lastLineLen
+ }
+
+ // determine token value
+ tok := ch
+ switch {
+ case s.isIdentRune(ch, 0):
+ if s.Mode&ScanIdents != 0 {
+ tok = Ident
+ ch = s.scanIdentifier()
+ } else {
+ ch = s.next()
+ }
+ case isDecimal(ch):
+ if s.Mode&(ScanInts|ScanFloats) != 0 {
+ tok, ch = s.scanNumber(ch)
+ } else {
+ ch = s.next()
+ }
+ default:
+ switch ch {
+ case '"':
+ if s.Mode&ScanStrings != 0 {
+ s.scanString('"')
+ tok = String
+ }
+ ch = s.next()
+ case '\'':
+ if s.Mode&ScanChars != 0 {
+ s.scanChar()
+ tok = Char
+ }
+ ch = s.next()
+ case '.':
+ ch = s.next()
+ if isDecimal(ch) && s.Mode&ScanFloats != 0 {
+ tok = Float
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '/':
+ ch = s.next()
+ if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
+ if s.Mode&SkipComments != 0 {
+ s.tokPos = -1 // don't collect token text
+ ch = s.scanComment(ch)
+ goto redo
+ }
+ ch = s.scanComment(ch)
+ tok = Comment
+ }
+ case '`':
+ if s.Mode&ScanRawStrings != 0 {
+ s.scanRawString()
+ tok = String
+ }
+ ch = s.next()
+ default:
+ ch = s.next()
+ }
+ }
+
+ // end of token text
+ s.tokEnd = s.srcPos - s.lastCharLen
+
+ s.ch = ch
+ return tok
+}
+
+// Pos returns the position of the character immediately after
+// the character or token returned by the last call to Next or Scan.
+func (s *Scanner) Pos() (pos Position) {
+ pos.Filename = s.Filename
+ pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
+ switch {
+ case s.column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.line
+ pos.Column = s.column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ pos.Line = s.line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// TokenText returns the string corresponding to the most recently scanned token.
+// Valid after calling Scan().
+func (s *Scanner) TokenText() string {
+ if s.tokPos < 0 {
+ // no token text
+ return ""
+ }
+
+ if s.tokEnd < 0 {
+ // if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
+ s.tokEnd = s.tokPos
+ }
+
+ if s.tokBuf.Len() == 0 {
+ // common case: the entire token text is still in srcBuf
+ return string(s.srcBuf[s.tokPos:s.tokEnd])
+ }
+
+ // part of the token text was saved in tokBuf: save the rest in
+ // tokBuf as well and return its content
+ s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
+ s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
+ return s.tokBuf.String()
+}
diff --git a/src/text/scanner/scanner_test.go b/src/text/scanner/scanner_test.go
new file mode 100644
index 000000000..702fac2b1
--- /dev/null
+++ b/src/text/scanner/scanner_test.go
@@ -0,0 +1,618 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+// A StringReader delivers its data one string segment at a time via Read.
+type StringReader struct {
+ data []string
+ step int
+}
+
+func (r *StringReader) Read(p []byte) (n int, err error) {
+ if r.step < len(r.data) {
+ s := r.data[r.step]
+ n = copy(p, s)
+ r.step++
+ } else {
+ err = io.EOF
+ }
+ return
+}
+
+func readRuneSegments(t *testing.T, segments []string) {
+ got := ""
+ want := strings.Join(segments, "")
+ s := new(Scanner).Init(&StringReader{data: segments})
+ for {
+ ch := s.Next()
+ if ch == EOF {
+ break
+ }
+ got += string(ch)
+ }
+ if got != want {
+ t.Errorf("segments=%v got=%s want=%s", segments, got, want)
+ }
+}
+
+var segmentList = [][]string{
+ {},
+ {""},
+ {"日", "本語"},
+ {"\u65e5", "\u672c", "\u8a9e"},
+ {"\U000065e5", " ", "\U0000672c", "\U00008a9e"},
+ {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"},
+ {"Hello", ", ", "World", "!"},
+ {"Hello", ", ", "", "World", "!"},
+}
+
+func TestNext(t *testing.T) {
+ for _, s := range segmentList {
+ readRuneSegments(t, s)
+ }
+}
+
+type token struct {
+ tok rune
+ text string
+}
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+var tokenList = []token{
+ {Comment, "// line comments"},
+ {Comment, "//"},
+ {Comment, "////"},
+ {Comment, "// comment"},
+ {Comment, "// /* comment */"},
+ {Comment, "// // comment //"},
+ {Comment, "//" + f100},
+
+ {Comment, "// general comments"},
+ {Comment, "/**/"},
+ {Comment, "/***/"},
+ {Comment, "/* comment */"},
+ {Comment, "/* // comment */"},
+ {Comment, "/* /* comment */"},
+ {Comment, "/*\n comment\n*/"},
+ {Comment, "/*" + f100 + "*/"},
+
+ {Comment, "// identifiers"},
+ {Ident, "a"},
+ {Ident, "a0"},
+ {Ident, "foobar"},
+ {Ident, "abc123"},
+ {Ident, "LGTM"},
+ {Ident, "_"},
+ {Ident, "_abc123"},
+ {Ident, "abc123_"},
+ {Ident, "_abc_123_"},
+ {Ident, "_äöü"},
+ {Ident, "_本"},
+ {Ident, "äöü"},
+ {Ident, "本"},
+ {Ident, "a۰۱۸"},
+ {Ident, "foo६४"},
+ {Ident, "bar9876"},
+ {Ident, f100},
+
+ {Comment, "// decimal ints"},
+ {Int, "0"},
+ {Int, "1"},
+ {Int, "9"},
+ {Int, "42"},
+ {Int, "1234567890"},
+
+ {Comment, "// octal ints"},
+ {Int, "00"},
+ {Int, "01"},
+ {Int, "07"},
+ {Int, "042"},
+ {Int, "01234567"},
+
+ {Comment, "// hexadecimal ints"},
+ {Int, "0x0"},
+ {Int, "0x1"},
+ {Int, "0xf"},
+ {Int, "0x42"},
+ {Int, "0x123456789abcDEF"},
+ {Int, "0x" + f100},
+ {Int, "0X0"},
+ {Int, "0X1"},
+ {Int, "0XF"},
+ {Int, "0X42"},
+ {Int, "0X123456789abcDEF"},
+ {Int, "0X" + f100},
+
+ {Comment, "// floats"},
+ {Float, "0."},
+ {Float, "1."},
+ {Float, "42."},
+ {Float, "01234567890."},
+ {Float, ".0"},
+ {Float, ".1"},
+ {Float, ".42"},
+ {Float, ".0123456789"},
+ {Float, "0.0"},
+ {Float, "1.0"},
+ {Float, "42.0"},
+ {Float, "01234567890.0"},
+ {Float, "0e0"},
+ {Float, "1e0"},
+ {Float, "42e0"},
+ {Float, "01234567890e0"},
+ {Float, "0E0"},
+ {Float, "1E0"},
+ {Float, "42E0"},
+ {Float, "01234567890E0"},
+ {Float, "0e+10"},
+ {Float, "1e-10"},
+ {Float, "42e+10"},
+ {Float, "01234567890e-10"},
+ {Float, "0E+10"},
+ {Float, "1E-10"},
+ {Float, "42E+10"},
+ {Float, "01234567890E-10"},
+
+ {Comment, "// chars"},
+ {Char, `' '`},
+ {Char, `'a'`},
+ {Char, `'本'`},
+ {Char, `'\a'`},
+ {Char, `'\b'`},
+ {Char, `'\f'`},
+ {Char, `'\n'`},
+ {Char, `'\r'`},
+ {Char, `'\t'`},
+ {Char, `'\v'`},
+ {Char, `'\''`},
+ {Char, `'\000'`},
+ {Char, `'\777'`},
+ {Char, `'\x00'`},
+ {Char, `'\xff'`},
+ {Char, `'\u0000'`},
+ {Char, `'\ufA16'`},
+ {Char, `'\U00000000'`},
+ {Char, `'\U0000ffAB'`},
+
+ {Comment, "// strings"},
+ {String, `" "`},
+ {String, `"a"`},
+ {String, `"本"`},
+ {String, `"\a"`},
+ {String, `"\b"`},
+ {String, `"\f"`},
+ {String, `"\n"`},
+ {String, `"\r"`},
+ {String, `"\t"`},
+ {String, `"\v"`},
+ {String, `"\""`},
+ {String, `"\000"`},
+ {String, `"\777"`},
+ {String, `"\x00"`},
+ {String, `"\xff"`},
+ {String, `"\u0000"`},
+ {String, `"\ufA16"`},
+ {String, `"\U00000000"`},
+ {String, `"\U0000ffAB"`},
+ {String, `"` + f100 + `"`},
+
+ {Comment, "// raw strings"},
+ {String, "``"},
+ {String, "`\\`"},
+ {String, "`" + "\n\n/* foobar */\n\n" + "`"},
+ {String, "`" + f100 + "`"},
+
+ {Comment, "// individual characters"},
+ // NUL character is not allowed
+ {'\x01', "\x01"},
+ {' ' - 1, string(' ' - 1)},
+ {'+', "+"},
+ {'/', "/"},
+ {'.', "."},
+ {'~', "~"},
+ {'(', "("},
+}
+
+func makeSource(pattern string) *bytes.Buffer {
+ var buf bytes.Buffer
+ for _, k := range tokenList {
+ fmt.Fprintf(&buf, pattern, k.text)
+ }
+ return &buf
+}
+
+func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) {
+ if got != want {
+ t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
+ }
+ if s.Line != line {
+ t.Errorf("line = %d, want %d for %q", s.Line, line, text)
+ }
+ stext := s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q", stext, text)
+ } else {
+ // check idempotency of TokenText() call
+ stext = s.TokenText()
+ if stext != text {
+ t.Errorf("text = %q, want %q (idempotency check)", stext, text)
+ }
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
+
+func testScan(t *testing.T, mode uint) {
+ s := new(Scanner).Init(makeSource(" \t%s\n"))
+ s.Mode = mode
+ tok := s.Scan()
+ line := 1
+ for _, k := range tokenList {
+ if mode&SkipComments == 0 || k.tok != Comment {
+ checkTok(t, s, line, tok, k.tok, k.text)
+ tok = s.Scan()
+ }
+ line += countNewlines(k.text) + 1 // each token is on a new line
+ }
+ checkTok(t, s, line, tok, EOF, "")
+}
+
+func TestScan(t *testing.T) {
+ testScan(t, GoTokens)
+ testScan(t, GoTokens&^SkipComments)
+}
+
+func TestPosition(t *testing.T) {
+ src := makeSource("\t\t\t\t%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = GoTokens &^ SkipComments
+ s.Scan()
+ pos := Position{"", 4, 1, 5}
+ for _, k := range tokenList {
+ if s.Offset != pos.Offset {
+ t.Errorf("offset = %d, want %d for %q", s.Offset, pos.Offset, k.text)
+ }
+ if s.Line != pos.Line {
+ t.Errorf("line = %d, want %d for %q", s.Line, pos.Line, k.text)
+ }
+ if s.Column != pos.Column {
+ t.Errorf("column = %d, want %d for %q", s.Column, pos.Column, k.text)
+ }
+ pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
+ pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+ s.Scan()
+ }
+ // make sure there were no token-internal errors reported by scanner
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanZeroMode(t *testing.T) {
+ src := makeSource("%s\n")
+ str := src.String()
+ s := new(Scanner).Init(src)
+ s.Mode = 0 // don't recognize any token classes
+ s.Whitespace = 0 // don't skip any whitespace
+ tok := s.Scan()
+ for i, ch := range str {
+ if tok != ch {
+ t.Fatalf("%d. tok = %s, want %s", i, TokenString(tok), TokenString(ch))
+ }
+ tok = s.Scan()
+ }
+ if tok != EOF {
+ t.Fatalf("tok = %s, want EOF", TokenString(tok))
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func testScanSelectedMode(t *testing.T, mode uint, class rune) {
+ src := makeSource("%s\n")
+ s := new(Scanner).Init(src)
+ s.Mode = mode
+ tok := s.Scan()
+ for tok != EOF {
+ if tok < 0 && tok != class {
+ t.Fatalf("tok = %s, want %s", TokenString(tok), TokenString(class))
+ }
+ tok = s.Scan()
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanSelectedMask(t *testing.T) {
+ testScanSelectedMode(t, 0, 0)
+ testScanSelectedMode(t, ScanIdents, Ident)
+ // Don't test ScanInts and ScanNumbers since some parts of
+ // the floats in the source look like (illegal) octal ints
+ // and ScanNumbers may return either Int or Float.
+ testScanSelectedMode(t, ScanChars, Char)
+ testScanSelectedMode(t, ScanStrings, String)
+ testScanSelectedMode(t, SkipComments, 0)
+ testScanSelectedMode(t, ScanComments, Comment)
+}
+
+func TestScanCustomIdent(t *testing.T) {
+ const src = "faab12345 a12b123 a12 3b"
+ s := new(Scanner).Init(strings.NewReader(src))
+ // ident = ( 'a' | 'b' ) { digit } .
+ // digit = '0' .. '3' .
+ // with a maximum length of 4
+ s.IsIdentRune = func(ch rune, i int) bool {
+ return i == 0 && (ch == 'a' || ch == 'b') || 0 < i && i < 4 && '0' <= ch && ch <= '3'
+ }
+ checkTok(t, s, 1, s.Scan(), 'f', "f")
+ checkTok(t, s, 1, s.Scan(), Ident, "a")
+ checkTok(t, s, 1, s.Scan(), Ident, "a")
+ checkTok(t, s, 1, s.Scan(), Ident, "b123")
+ checkTok(t, s, 1, s.Scan(), Int, "45")
+ checkTok(t, s, 1, s.Scan(), Ident, "a12")
+ checkTok(t, s, 1, s.Scan(), Ident, "b123")
+ checkTok(t, s, 1, s.Scan(), Ident, "a12")
+ checkTok(t, s, 1, s.Scan(), Int, "3")
+ checkTok(t, s, 1, s.Scan(), Ident, "b")
+ checkTok(t, s, 1, s.Scan(), EOF, "")
+}
+
+func TestScanNext(t *testing.T) {
+ const BOM = '\uFEFF'
+ BOMs := string(BOM)
+ s := new(Scanner).Init(strings.NewReader(BOMs + "if a == bcd /* com" + BOMs + "ment */ {\n\ta += c\n}" + BOMs + "// line comment ending in eof"))
+ checkTok(t, s, 1, s.Scan(), Ident, "if") // the first BOM is ignored
+ checkTok(t, s, 1, s.Scan(), Ident, "a")
+ checkTok(t, s, 1, s.Scan(), '=', "=")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 0, s.Next(), ' ', "")
+ checkTok(t, s, 0, s.Next(), 'b', "")
+ checkTok(t, s, 1, s.Scan(), Ident, "cd")
+ checkTok(t, s, 1, s.Scan(), '{', "{")
+ checkTok(t, s, 2, s.Scan(), Ident, "a")
+ checkTok(t, s, 2, s.Scan(), '+', "+")
+ checkTok(t, s, 0, s.Next(), '=', "")
+ checkTok(t, s, 2, s.Scan(), Ident, "c")
+ checkTok(t, s, 3, s.Scan(), '}', "}")
+ checkTok(t, s, 3, s.Scan(), BOM, BOMs)
+ checkTok(t, s, 3, s.Scan(), -1, "")
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestScanWhitespace(t *testing.T) {
+ var buf bytes.Buffer
+ var ws uint64
+ // start at 1, NUL character is not allowed
+ for ch := byte(1); ch < ' '; ch++ {
+ buf.WriteByte(ch)
+ ws |= 1 << ch
+ }
+ const orig = 'x'
+ buf.WriteByte(orig)
+
+ s := new(Scanner).Init(&buf)
+ s.Mode = 0
+ s.Whitespace = ws
+ tok := s.Scan()
+ if tok != orig {
+ t.Errorf("tok = %s, want %s", TokenString(tok), TokenString(orig))
+ }
+}
+
+func testError(t *testing.T, src, pos, msg string, tok rune) {
+ s := new(Scanner).Init(strings.NewReader(src))
+ errorCalled := false
+ s.Error = func(s *Scanner, m string) {
+ if !errorCalled {
+ // only look at first error
+ if p := s.Pos().String(); p != pos {
+ t.Errorf("pos = %q, want %q for %q", p, pos, src)
+ }
+ if m != msg {
+ t.Errorf("msg = %q, want %q for %q", m, msg, src)
+ }
+ errorCalled = true
+ }
+ }
+ tk := s.Scan()
+ if tk != tok {
+ t.Errorf("tok = %s, want %s for %q", TokenString(tk), TokenString(tok), src)
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called for %q", src)
+ }
+ if s.ErrorCount == 0 {
+ t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func TestError(t *testing.T) {
+ testError(t, "\x00", "1:1", "illegal character NUL", 0)
+ testError(t, "\x80", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+ testError(t, "\xff", "1:1", "illegal UTF-8 encoding", utf8.RuneError)
+
+ testError(t, "a\x00", "1:2", "illegal character NUL", Ident)
+ testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", Ident)
+ testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", Ident)
+
+ testError(t, `"a`+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, "`a"+"\x00", "1:3", "illegal character NUL", String)
+ testError(t, "`ab"+"\x80", "1:4", "illegal UTF-8 encoding", String)
+ testError(t, "`abc"+"\xff", "1:5", "illegal UTF-8 encoding", String)
+
+ testError(t, `'\"'`, "1:3", "illegal char escape", Char)
+ testError(t, `"\'"`, "1:3", "illegal char escape", String)
+
+ testError(t, `01238`, "1:6", "illegal octal number", Int)
+ testError(t, `01238123`, "1:9", "illegal octal number", Int)
+ testError(t, `0x`, "1:3", "illegal hexadecimal number", Int)
+ testError(t, `0xg`, "1:3", "illegal hexadecimal number", Int)
+ testError(t, `'aa'`, "1:4", "illegal char literal", Char)
+
+ testError(t, `'`, "1:2", "literal not terminated", Char)
+ testError(t, `'`+"\n", "1:2", "literal not terminated", Char)
+ testError(t, `"abc`, "1:5", "literal not terminated", String)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", String)
+ testError(t, "`abc\n", "2:1", "literal not terminated", String)
+ testError(t, `/*/`, "1:4", "comment not terminated", EOF)
+}
+
+// An errReader returns (0, err) where err is not io.EOF.
+type errReader struct{}
+
+func (errReader) Read(b []byte) (int, error) {
+ return 0, io.ErrNoProgress // some error that is not io.EOF
+}
+
+func TestIOError(t *testing.T) {
+ s := new(Scanner).Init(errReader{})
+ errorCalled := false
+ s.Error = func(s *Scanner, msg string) {
+ if !errorCalled {
+ if want := io.ErrNoProgress.Error(); msg != want {
+ t.Errorf("msg = %q, want %q", msg, want)
+ }
+ errorCalled = true
+ }
+ }
+ tok := s.Scan()
+ if tok != EOF {
+ t.Errorf("tok = %s, want EOF", TokenString(tok))
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called")
+ }
+}
+
+func checkPos(t *testing.T, got, want Position) {
+ if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
+ t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
+ got.Offset, got.Line, got.Column, want.Offset, want.Line, want.Column)
+ }
+}
+
+func checkNextPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ if ch := s.Next(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ }
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+}
+
+func checkScanPos(t *testing.T, s *Scanner, offset, line, column int, char rune) {
+ want := Position{Offset: offset, Line: line, Column: column}
+ checkPos(t, s.Pos(), want)
+ if ch := s.Scan(); ch != char {
+ t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
+ if string(ch) != s.TokenText() {
+ t.Errorf("tok = %q, want %q", s.TokenText(), string(ch))
+ }
+ }
+ checkPos(t, s.Position, want)
+}
+
+func TestPos(t *testing.T) {
+ // corner case: empty source
+ s := new(Scanner).Init(strings.NewReader(""))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ s.Peek() // peek doesn't affect the position
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+
+ // corner case: source with only a newline
+ s = new(Scanner).Init(strings.NewReader("\n"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 1, 2, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 1, 2, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // corner case: source with only a single character
+ s = new(Scanner).Init(strings.NewReader("本"))
+ checkPos(t, s.Pos(), Position{Offset: 0, Line: 1, Column: 1})
+ checkNextPos(t, s, 3, 1, 2, '本')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 3, 1, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Next
+ s = new(Scanner).Init(strings.NewReader(" foo६४ \n\n本語\n"))
+ checkNextPos(t, s, 1, 1, 2, ' ')
+ s.Peek() // peek doesn't affect the position
+ checkNextPos(t, s, 2, 1, 3, ' ')
+ checkNextPos(t, s, 3, 1, 4, 'f')
+ checkNextPos(t, s, 4, 1, 5, 'o')
+ checkNextPos(t, s, 5, 1, 6, 'o')
+ checkNextPos(t, s, 8, 1, 7, '६')
+ checkNextPos(t, s, 11, 1, 8, '४')
+ checkNextPos(t, s, 12, 1, 9, ' ')
+ checkNextPos(t, s, 13, 1, 10, ' ')
+ checkNextPos(t, s, 14, 2, 1, '\n')
+ checkNextPos(t, s, 15, 3, 1, '\n')
+ checkNextPos(t, s, 18, 3, 2, '本')
+ checkNextPos(t, s, 21, 3, 3, '語')
+ checkNextPos(t, s, 22, 4, 1, '\n')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 22, 4, 1, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+
+ // positions after calling Scan
+ s = new(Scanner).Init(strings.NewReader("abc\n本語\n\nx"))
+ s.Mode = 0
+ s.Whitespace = 0
+ checkScanPos(t, s, 0, 1, 1, 'a')
+ s.Peek() // peek doesn't affect the position
+ checkScanPos(t, s, 1, 1, 2, 'b')
+ checkScanPos(t, s, 2, 1, 3, 'c')
+ checkScanPos(t, s, 3, 1, 4, '\n')
+ checkScanPos(t, s, 4, 2, 1, '本')
+ checkScanPos(t, s, 7, 2, 2, '語')
+ checkScanPos(t, s, 10, 2, 3, '\n')
+ checkScanPos(t, s, 11, 3, 1, '\n')
+ checkScanPos(t, s, 12, 4, 1, 'x')
+ // after EOF position doesn't change
+ for i := 10; i > 0; i-- {
+ checkScanPos(t, s, 13, 4, 2, EOF)
+ }
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
diff --git a/src/text/tabwriter/example_test.go b/src/text/tabwriter/example_test.go
new file mode 100644
index 000000000..20443cb1f
--- /dev/null
+++ b/src/text/tabwriter/example_test.go
@@ -0,0 +1,38 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tabwriter_test
+
+import (
+ "fmt"
+ "os"
+ "text/tabwriter"
+)
+
+func ExampleWriter_Init() {
+ w := new(tabwriter.Writer)
+
+ // Format in tab-separated columns with a tab stop of 8.
+ w.Init(os.Stdout, 0, 8, 0, '\t', 0)
+ fmt.Fprintln(w, "a\tb\tc\td\t.")
+ fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.")
+ fmt.Fprintln(w)
+ w.Flush()
+
+ // Format right-aligned in space-separated columns of minimal width 5
+ // and at least one blank of padding (so wider column entries do not
+ // touch each other).
+ w.Init(os.Stdout, 5, 0, 1, ' ', tabwriter.AlignRight)
+ fmt.Fprintln(w, "a\tb\tc\td\t.")
+ fmt.Fprintln(w, "123\t12345\t1234567\t123456789\t.")
+ fmt.Fprintln(w)
+ w.Flush()
+
+ // output:
+ // a b c d .
+ // 123 12345 1234567 123456789 .
+ //
+ // a b c d.
+ // 123 12345 1234567 123456789.
+}
diff --git a/src/text/tabwriter/tabwriter.go b/src/text/tabwriter/tabwriter.go
new file mode 100644
index 000000000..c0c32d5de
--- /dev/null
+++ b/src/text/tabwriter/tabwriter.go
@@ -0,0 +1,558 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+ size int // cell size in bytes
+ width int // cell width in runes
+ htab bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+ // configuration
+ output io.Writer
+ minwidth int
+ tabwidth int
+ padding int
+ padbytes [8]byte
+ flags uint
+
+ // current state
+ buf bytes.Buffer // collected text excluding tabs or line breaks
+ pos int // buffer position up to which cell.width of incomplete cell has been computed
+ cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+ endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+ lines [][]cell // list of lines; each line is a list of cells
+ widths []int // list of column widths in runes - re-used during formatting
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+ b.buf.Reset()
+ b.pos = 0
+ b.cell = cell{}
+ b.endChar = 0
+ b.lines = b.lines[0:0]
+ b.widths = b.widths[0:0]
+ b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+// (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+// position pos; html tags and entities are excluded from this width if html
+// filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+// which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+// formatting; it is kept in Writer because it's re-used
+//
+// |<---------- size ---------->|
+// | |
+// |<- width ->|<- ignored ->| |
+// | | | |
+// [---processed---tab------------<tag>...</tag>...]
+// ^ ^ ^
+// | | |
+// buf start of incomplete cell pos
+
+// Formatting can be controlled with these flags.
+const (
+ // Ignore html tags and treat entities (starting with '&'
+ // and ending in ';') as single characters (width = 1).
+ FilterHTML uint = 1 << iota
+
+ // Strip Escape characters bracketing escaped text segments
+ // instead of passing them through unchanged with the text.
+ StripEscape
+
+ // Force right-alignment of cell content.
+ // Default is left-alignment.
+ AlignRight
+
+ // Handle empty columns as if they were not present in
+ // the input in the first place.
+ DiscardEmptyColumns
+
+ // Always use tabs for indentation columns (i.e., padding of
+ // leading empty cells on the left) independent of padchar.
+ TabIndent
+
+ // Print a vertical bar ('|') between columns (after formatting).
+ // Discarded columns appear as zero-width columns ("||").
+ Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+// minwidth minimal cell width including any padding
+// tabwidth width of tab characters (equivalent number of spaces)
+// padding padding added to a cell before computing its width
+// padchar ASCII char used for padding
+// if padchar == '\t', the Writer will assume that the
+// width of a '\t' in the formatted output is tabwidth,
+// and cells are left-aligned independent of align_left
+// (for correct-looking results, tabwidth must correspond
+// to the tab width in the viewer displaying the result)
+// flags formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ if minwidth < 0 || tabwidth < 0 || padding < 0 {
+ panic("negative minwidth, tabwidth, or padding")
+ }
+ b.output = output
+ b.minwidth = minwidth
+ b.tabwidth = tabwidth
+ b.padding = padding
+ for i := range b.padbytes {
+ b.padbytes[i] = padchar
+ }
+ if padchar == '\t' {
+ // tab padding enforces left-alignment
+ flags &^= AlignRight
+ }
+ b.flags = flags
+
+ b.reset()
+
+ return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+ pos := 0
+ for i, line := range b.lines {
+ print("(", i, ") ")
+ for _, c := range line {
+ print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+ pos += c.size
+ }
+ print("\n")
+ }
+ print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+ err error
+}
+
+func (b *Writer) write0(buf []byte) {
+ n, err := b.output.Write(buf)
+ if n != len(buf) && err == nil {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ panic(osError{err})
+ }
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+ for n > len(src) {
+ b.write0(src)
+ n -= len(src)
+ }
+ b.write0(src[0:n])
+}
+
+var (
+ newline = []byte{'\n'}
+ tabs = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+ if b.padbytes[0] == '\t' || useTabs {
+ // padding is done with tabs
+ if b.tabwidth == 0 {
+ return // tabs have no width - can't do any padding
+ }
+ // make cellw the smallest multiple of b.tabwidth
+ cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+ n := cellw - textw // amount of padding
+ if n < 0 {
+ panic("internal error")
+ }
+ b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+ return
+ }
+
+ // padding is done with non-tab characters
+ b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ for i := line0; i < line1; i++ {
+ line := b.lines[i]
+
+ // if TabIndent is set, use tabs to pad leading empty cells
+ useTabs := b.flags&TabIndent != 0
+
+ for j, c := range line {
+ if j > 0 && b.flags&Debug != 0 {
+ // indicate column break
+ b.write0(vbar)
+ }
+
+ if c.size == 0 {
+ // empty cell
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], useTabs)
+ }
+ } else {
+ // non-empty cell
+ useTabs = false
+ if b.flags&AlignRight == 0 { // align left
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ } else { // align right
+ if j < len(b.widths) {
+ b.writePadding(c.width, b.widths[j], false)
+ }
+ b.write0(b.buf.Bytes()[pos : pos+c.size])
+ pos += c.size
+ }
+ }
+ }
+
+ if i+1 == len(b.lines) {
+ // last buffered line - we don't have a newline, so just write
+ // any outstanding buffered data
+ b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+ pos += b.cell.size
+ } else {
+ // not the last line - write newline
+ b.write0(newline)
+ }
+ }
+ return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+ pos = pos0
+ column := len(b.widths)
+ for this := line0; this < line1; this++ {
+ line := b.lines[this]
+
+ if column < len(line)-1 {
+ // cell exists in this column => this line
+ // has more cells than the previous line
+ // (the last cell per line is ignored because cells are
+ // tab-terminated; the last cell per line describes the
+ // text before the newline/formfeed and does not belong
+ // to a column)
+
+ // print unprinted lines until beginning of block
+ pos = b.writeLines(pos, line0, this)
+ line0 = this
+
+ // column block begin
+ width := b.minwidth // minimal column width
+ discardable := true // true if all cells in this column are empty and "soft"
+ for ; this < line1; this++ {
+ line = b.lines[this]
+ if column < len(line)-1 {
+ // cell exists in this column
+ c := line[column]
+ // update width
+ if w := c.width + b.padding; w > width {
+ width = w
+ }
+ // update discardable
+ if c.width > 0 || c.htab {
+ discardable = false
+ }
+ } else {
+ break
+ }
+ }
+ // column block end
+
+ // discard empty columns if necessary
+ if discardable && b.flags&DiscardEmptyColumns != 0 {
+ width = 0
+ }
+
+ // format and print all columns to the right of this column
+ // (we know the widths of this column and all columns to the left)
+ b.widths = append(b.widths, width) // push width
+ pos = b.format(pos, line0, this)
+ b.widths = b.widths[0 : len(b.widths)-1] // pop width
+ line0 = this
+ }
+ }
+
+ // print unprinted lines until end
+ return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+ b.buf.Write(text)
+ b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+ b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()])
+ b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+ switch ch {
+ case Escape:
+ b.endChar = Escape
+ case '<':
+ b.endChar = '>'
+ case '&':
+ b.endChar = ';'
+ }
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+ switch b.endChar {
+ case Escape:
+ b.updateWidth()
+ if b.flags&StripEscape == 0 {
+ b.cell.width -= 2 // don't count the Escape chars
+ }
+ case '>': // tag of zero width
+ case ';':
+ b.cell.width++ // entity, count as one rune
+ }
+ b.pos = b.buf.Len()
+ b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+ b.cell.htab = htab
+ line := &b.lines[len(b.lines)-1]
+ *line = append(*line, b.cell)
+ b.cell = cell{}
+ return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+ if e := recover(); e != nil {
+ if nerr, ok := e.(osError); ok {
+ *err = nerr.err
+ return
+ }
+ panic("tabwriter: panic during " + op)
+ }
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+ defer b.reset() // even in the presence of errors
+ defer handlePanic(&err, "Flush")
+
+ // add current cell if not empty
+ if b.cell.size > 0 {
+ if b.endChar != 0 {
+ // inside escape - terminate it even if incomplete
+ b.endEscape()
+ }
+ b.terminateCell(false)
+ }
+
+ // format contents of buffer
+ b.format(0, 0, len(b.lines))
+
+ return
+}
+
+var hbar = []byte("---\n")
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+ defer handlePanic(&err, "Write")
+
+ // split text into cells
+ n = 0
+ for i, ch := range buf {
+ if b.endChar == 0 {
+ // outside escape
+ switch ch {
+ case '\t', '\v', '\n', '\f':
+ // end of cell
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i + 1 // ch consumed
+ ncells := b.terminateCell(ch == '\t')
+ if ch == '\n' || ch == '\f' {
+ // terminate line
+ b.addLine()
+ if ch == '\f' || ncells == 1 {
+ // A '\f' always forces a flush. Otherwise, if the previous
+ // line has only one cell which does not have an impact on
+ // the formatting of the following lines (the last cell per
+ // line is ignored by format()), thus we can flush the
+ // Writer contents.
+ if err = b.Flush(); err != nil {
+ return
+ }
+ if ch == '\f' && b.flags&Debug != 0 {
+ // indicate section break
+ b.write0(hbar)
+ }
+ }
+ }
+
+ case Escape:
+ // start of escaped sequence
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ if b.flags&StripEscape != 0 {
+ n++ // strip Escape
+ }
+ b.startEscape(Escape)
+
+ case '<', '&':
+ // possibly an html tag/entity
+ if b.flags&FilterHTML != 0 {
+ // begin of tag/entity
+ b.append(buf[n:i])
+ b.updateWidth()
+ n = i
+ b.startEscape(ch)
+ }
+ }
+
+ } else {
+ // inside escape
+ if ch == b.endChar {
+ // end of tag/entity
+ j := i + 1
+ if ch == Escape && b.flags&StripEscape != 0 {
+ j = i // strip Escape
+ }
+ b.append(buf[n:j])
+ n = i + 1 // ch consumed
+ b.endEscape()
+ }
+ }
+ }
+
+ // append leftover text
+ b.append(buf[n:])
+ n = len(buf)
+ return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+ return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/src/text/tabwriter/tabwriter_test.go b/src/text/tabwriter/tabwriter_test.go
new file mode 100644
index 000000000..9d3111e2c
--- /dev/null
+++ b/src/text/tabwriter/tabwriter_test.go
@@ -0,0 +1,652 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tabwriter_test
+
+import (
+ "io"
+ "testing"
+ . "text/tabwriter"
+)
+
+type buffer struct {
+ a []byte
+}
+
+func (b *buffer) init(n int) { b.a = make([]byte, 0, n) }
+
+func (b *buffer) clear() { b.a = b.a[0:0] }
+
+func (b *buffer) Write(buf []byte) (written int, err error) {
+ n := len(b.a)
+ m := len(buf)
+ if n+m <= cap(b.a) {
+ b.a = b.a[0 : n+m]
+ for i := 0; i < m; i++ {
+ b.a[n+i] = buf[i]
+ }
+ } else {
+ panic("buffer.Write: buffer too small")
+ }
+ return len(buf), nil
+}
+
+func (b *buffer) String() string { return string(b.a) }
+
+func write(t *testing.T, testname string, w *Writer, src string) {
+ written, err := io.WriteString(w, src)
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- write error: %v\n", testname, src, err)
+ }
+ if written != len(src) {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- written = %d, len(src) = %d\n", testname, src, written, len(src))
+ }
+}
+
+func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
+ err := w.Flush()
+ if err != nil {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- flush error: %v\n", testname, src, err)
+ }
+
+ res := b.String()
+ if res != expected {
+ t.Errorf("--- test: %s\n--- src:\n%q\n--- found:\n%q\n--- expected:\n%q\n", testname, src, res, expected)
+ }
+}
+
+func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
+ var b buffer
+ b.init(1000)
+
+ var w Writer
+ w.Init(&b, minwidth, tabwidth, padding, padchar, flags)
+
+ // write all at once
+ title := testname + " (written all at once)"
+ b.clear()
+ write(t, title, &w, src)
+ verify(t, title, &w, &b, src, expected)
+
+ // write byte-by-byte
+ title = testname + " (written byte-by-byte)"
+ b.clear()
+ for i := 0; i < len(src); i++ {
+ write(t, title, &w, src[i:i+1])
+ }
+ verify(t, title, &w, &b, src, expected)
+
+ // write using Fibonacci slice sizes
+ title = testname + " (written in fibonacci slices)"
+ b.clear()
+ for i, d := 0, 0; i < len(src); {
+ write(t, title, &w, src[i:i+d])
+ i, d = i+d, d+1
+ if i+d > len(src) {
+ d = len(src) - i
+ }
+ }
+ verify(t, title, &w, &b, src, expected)
+}
+
+var tests = []struct {
+ testname string
+ minwidth, tabwidth, padding int
+ padchar byte
+ flags uint
+ src, expected string
+}{
+ {
+ "1a",
+ 8, 0, 1, '.', 0,
+ "",
+ "",
+ },
+
+ {
+ "1a debug",
+ 8, 0, 1, '.', Debug,
+ "",
+ "",
+ },
+
+ {
+ "1b esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\xff",
+ "",
+ },
+
+ {
+ "1b esc",
+ 8, 0, 1, '.', 0,
+ "\xff\xff",
+ "\xff\xff",
+ },
+
+ {
+ "1c esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\t\xff",
+ "\t",
+ },
+
+ {
+ "1c esc",
+ 8, 0, 1, '.', 0,
+ "\xff\t\xff",
+ "\xff\t\xff",
+ },
+
+ {
+ "1d esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\"foo\t\n\tbar\"",
+ },
+
+ {
+ "1d esc",
+ 8, 0, 1, '.', 0,
+ "\xff\"foo\t\n\tbar\"\xff",
+ "\xff\"foo\t\n\tbar\"\xff",
+ },
+
+ {
+ "1e esc stripped",
+ 8, 0, 1, '.', StripEscape,
+ "abc\xff\tdef", // unterminated escape
+ "abc\tdef",
+ },
+
+ {
+ "1e esc",
+ 8, 0, 1, '.', 0,
+ "abc\xff\tdef", // unterminated escape
+ "abc\xff\tdef",
+ },
+
+ {
+ "2",
+ 8, 0, 1, '.', 0,
+ "\n\n\n",
+ "\n\n\n",
+ },
+
+ {
+ "3",
+ 8, 0, 1, '.', 0,
+ "a\nb\nc",
+ "a\nb\nc",
+ },
+
+ {
+ "4a",
+ 8, 0, 1, '.', 0,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "4b",
+ 8, 0, 1, '.', AlignRight,
+ "\t", // '\t' terminates an empty cell on last line - nothing to print
+ "",
+ },
+
+ {
+ "5",
+ 8, 0, 1, '.', 0,
+ "*\t*",
+ "*.......*",
+ },
+
+ {
+ "5b",
+ 8, 0, 1, '.', 0,
+ "*\t*\n",
+ "*.......*\n",
+ },
+
+ {
+ "5c",
+ 8, 0, 1, '.', 0,
+ "*\t*\t",
+ "*.......*",
+ },
+
+ {
+ "5c debug",
+ 8, 0, 1, '.', Debug,
+ "*\t*\t",
+ "*.......|*",
+ },
+
+ {
+ "5d",
+ 8, 0, 1, '.', AlignRight,
+ "*\t*\t",
+ ".......**",
+ },
+
+ {
+ "6",
+ 8, 0, 1, '.', 0,
+ "\t\n",
+ "........\n",
+ },
+
+ {
+ "7a",
+ 8, 0, 1, '.', 0,
+ "a) foo",
+ "a) foo",
+ },
+
+ {
+ "7b",
+ 8, 0, 1, ' ', 0,
+ "b) foo\tbar",
+ "b) foo bar",
+ },
+
+ {
+ "7c",
+ 8, 0, 1, '.', 0,
+ "c) foo\tbar\t",
+ "c) foo..bar",
+ },
+
+ {
+ "7d",
+ 8, 0, 1, '.', 0,
+ "d) foo\tbar\n",
+ "d) foo..bar\n",
+ },
+
+ {
+ "7e",
+ 8, 0, 1, '.', 0,
+ "e) foo\tbar\t\n",
+ "e) foo..bar.....\n",
+ },
+
+ {
+ "7f",
+ 8, 0, 1, '.', FilterHTML,
+ "f) f&lt;o\t<b>bar</b>\t\n",
+ "f) f&lt;o..<b>bar</b>.....\n",
+ },
+
+ {
+ "7g",
+ 8, 0, 1, '.', FilterHTML,
+ "g) f&lt;o\t<b>bar</b>\t non-terminated entity &amp",
+ "g) f&lt;o..<b>bar</b>..... non-terminated entity &amp",
+ },
+
+ {
+ "7g debug",
+ 8, 0, 1, '.', FilterHTML | Debug,
+ "g) f&lt;o\t<b>bar</b>\t non-terminated entity &amp",
+ "g) f&lt;o..|<b>bar</b>.....| non-terminated entity &amp",
+ },
+
+ {
+ "8",
+ 8, 0, 1, '*', 0,
+ "Hello, world!\n",
+ "Hello, world!\n",
+ },
+
+ {
+ "9a",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\n" +
+ "11\t222\t3333\t44444\n",
+
+ "1.2..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9b",
+ 1, 0, 0, '.', FilterHTML,
+ "1\t2<!---\f--->\t3\t4\n" + // \f inside HTML is ignored
+ "11\t222\t3333\t44444\n",
+
+ "1.2<!---\f--->..3...4\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c",
+ 1, 0, 0, '.', 0,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1234\n" +
+ "11222333344444\n",
+ },
+
+ {
+ "9c debug",
+ 1, 0, 0, '.', Debug,
+ "1\t2\t3\t4\f" + // \f causes a newline and flush
+ "11\t222\t3333\t44444\n",
+
+ "1|2|3|4\n" +
+ "---\n" +
+ "11|222|3333|44444\n",
+ },
+
+ {
+ "10a",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\n",
+ "1....2....3....4\n",
+ },
+
+ {
+ "10b",
+ 5, 0, 0, '.', 0,
+ "1\t2\t3\t4\t\n",
+ "1....2....3....4....\n",
+ },
+
+ {
+ "11",
+ 8, 0, 1, '.', 0,
+ "本\tb\tc\n" +
+ "aa\t\u672c\u672c\u672c\tcccc\tddddd\n" +
+ "aaa\tbbbb\n",
+
+ "本.......b.......c\n" +
+ "aa......本本本.....cccc....ddddd\n" +
+ "aaa.....bbbb\n",
+ },
+
+ {
+ "12a",
+ 8, 0, 1, ' ', AlignRight,
+ "a\tè\tc\t\n" +
+ "aa\tèèè\tcccc\tddddd\t\n" +
+ "aaa\tèèèè\t\n",
+
+ " a è c\n" +
+ " aa èèè cccc ddddd\n" +
+ " aaa èèèè\n",
+ },
+
+ {
+ "12b",
+ 2, 0, 0, ' ', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a b c\n" +
+ "aa bbbcccc\n" +
+ "aaabbbb\n",
+ },
+
+ {
+ "12c",
+ 8, 0, 1, '_', 0,
+ "a\tb\tc\n" +
+ "aa\tbbb\tcccc\n" +
+ "aaa\tbbbb\n",
+
+ "a_______b_______c\n" +
+ "aa______bbb_____cccc\n" +
+ "aaa_____bbbb\n",
+ },
+
+ {
+ "13a",
+ 4, 0, 1, '-', 0,
+ "4444\t日本語\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444------日本語-22--1---333\n" +
+ "999999999-22\n" +
+ "7---------22\n" +
+ "------------------88888888\n" +
+ "\n" +
+ "666666-666666-666666----4444\n" +
+ "1------1------999999999-0000000000\n",
+ },
+
+ {
+ "13b",
+ 4, 0, 3, '.', 0,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t999999999\t0000000000\n",
+
+ "4444........333...22...1...333\n" +
+ "999999999...22\n" +
+ "7...........22\n" +
+ "....................88888888\n" +
+ "\n" +
+ "666666...666666...666666......4444\n" +
+ "1........1........999999999...0000000000\n",
+ },
+
+ {
+ "13c",
+ 8, 8, 1, '\t', FilterHTML,
+ "4444\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t22\n" +
+ "\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+
+ "4444\t\t333\t22\t1\t333\n" +
+ "999999999\t22\n" +
+ "7\t\t22\n" +
+ "\t\t\t\t88888888\n" +
+ "\n" +
+ "666666\t666666\t666666\t\t4444\n" +
+ "1\t1\t<font color=red attr=日本語>999999999</font>\t0000000000\n",
+ },
+
+ {
+ "14",
+ 1, 0, 2, ' ', AlignRight,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0 .3 2.4 -5.1\n" +
+ " 23.0 12345678.9 2.4 -989.4\n" +
+ " 5.1 12.0 2.4 -7.0\n" +
+ " .0 0.0 332.0 8908.0\n" +
+ " .0 -.3 456.4 22.1\n" +
+ " .0 1.2 44.4 -13.3",
+ },
+
+ {
+ "14 debug",
+ 1, 0, 2, ' ', AlignRight | Debug,
+ ".0\t.3\t2.4\t-5.1\t\n" +
+ "23.0\t12345678.9\t2.4\t-989.4\t\n" +
+ "5.1\t12.0\t2.4\t-7.0\t\n" +
+ ".0\t0.0\t332.0\t8908.0\t\n" +
+ ".0\t-.3\t456.4\t22.1\t\n" +
+ ".0\t1.2\t44.4\t-13.3\t\t",
+
+ " .0| .3| 2.4| -5.1|\n" +
+ " 23.0| 12345678.9| 2.4| -989.4|\n" +
+ " 5.1| 12.0| 2.4| -7.0|\n" +
+ " .0| 0.0| 332.0| 8908.0|\n" +
+ " .0| -.3| 456.4| 22.1|\n" +
+ " .0| 1.2| 44.4| -13.3|",
+ },
+
+ {
+ "15a",
+ 4, 0, 0, '.', 0,
+ "a\t\tb",
+ "a.......b",
+ },
+
+ {
+ "15b",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\t\tb", // htabs - do not discard column
+ "a.......b",
+ },
+
+ {
+ "15c",
+ 4, 0, 0, '.', DiscardEmptyColumns,
+ "a\v\vb",
+ "a...b",
+ },
+
+ {
+ "15d",
+ 4, 0, 0, '.', AlignRight | DiscardEmptyColumns,
+ "a\v\vb",
+ "...ab",
+ },
+
+ {
+ "16a",
+ 100, 100, 0, '\t', 0,
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\tb\td\n" +
+ "a\tb\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16b debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\vb\v\vd\n" +
+ "a\vb\v\vd\ve\n" +
+ "a\n" +
+ "a\vb\vc\vd\n" +
+ "a\vb\vc\vd\ve\n",
+
+ "a\t|b\t||d\n" +
+ "a\t|b\t||d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+
+ {
+ "16c",
+ 100, 100, 0, '\t', DiscardEmptyColumns,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\tb\t\td\n" +
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+ },
+
+ {
+ "16c debug",
+ 100, 100, 0, '\t', DiscardEmptyColumns | Debug,
+ "a\tb\t\td\n" + // hard tabs - do not discard column
+ "a\tb\t\td\te\n" +
+ "a\n" +
+ "a\tb\tc\td\n" +
+ "a\tb\tc\td\te\n",
+
+ "a\t|b\t|\t|d\n" +
+ "a\t|b\t|\t|d\t|e\n" +
+ "a\n" +
+ "a\t|b\t|c\t|d\n" +
+ "a\t|b\t|c\t|d\t|e\n",
+ },
+}
+
+func Test(t *testing.T) {
+ for _, e := range tests {
+ check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
+ }
+}
+
+type panicWriter struct{}
+
+func (panicWriter) Write([]byte) (int, error) {
+ panic("cannot write")
+}
+
+func wantPanicString(t *testing.T, want string) {
+ if e := recover(); e != nil {
+ got, ok := e.(string)
+ switch {
+ case !ok:
+ t.Errorf("got %v (%T), want panic string", e, e)
+ case got != want:
+ t.Errorf("wrong panic message: got %q, want %q", got, want)
+ }
+ }
+}
+
+func TestPanicDuringFlush(t *testing.T) {
+ defer wantPanicString(t, "tabwriter: panic during Flush")
+ var p panicWriter
+ w := new(Writer)
+ w.Init(p, 0, 0, 5, ' ', 0)
+ io.WriteString(w, "a")
+ w.Flush()
+ t.Errorf("failed to panic during Flush")
+}
+
+func TestPanicDuringWrite(t *testing.T) {
+ defer wantPanicString(t, "tabwriter: panic during Write")
+ var p panicWriter
+ w := new(Writer)
+ w.Init(p, 0, 0, 5, ' ', 0)
+ io.WriteString(w, "a\n\n") // the second \n triggers a call to w.Write and thus a panic
+ t.Errorf("failed to panic during Write")
+}
diff --git a/src/text/template/doc.go b/src/text/template/doc.go
new file mode 100644
index 000000000..7c6efd59c
--- /dev/null
+++ b/src/text/template/doc.go
@@ -0,0 +1,405 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package template implements data-driven templates for generating textual output.
+
+To generate HTML output, see package html/template, which has the same interface
+as this package but automatically secures HTML output against certain attacks.
+
+Templates are executed by applying them to a data structure. Annotations in the
+template refer to elements of the data structure (typically a field of a struct
+or a key in a map) to control execution and derive values to be displayed.
+Execution of the template walks the structure and sets the cursor, represented
+by a period '.' and called "dot", to the value at the current location in the
+structure as execution proceeds.
+
+The input text for a template is UTF-8-encoded text in any format.
+"Actions"--data evaluations or control structures--are delimited by
+"{{" and "}}"; all text outside actions is copied to the output unchanged.
+Actions may not span newlines, although comments can.
+
+Once parsed, a template may be executed safely in parallel.
+
+Here is a trivial example that prints "17 items are made of wool".
+
+ type Inventory struct {
+ Material string
+ Count uint
+ }
+ sweaters := Inventory{"wool", 17}
+ tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
+ if err != nil { panic(err) }
+ err = tmpl.Execute(os.Stdout, sweaters)
+ if err != nil { panic(err) }
+
+More intricate examples appear below.
+
+Actions
+
+Here is the list of actions. "Arguments" and "pipelines" are evaluations of
+data, defined in detail below.
+
+*/
+// {{/* a comment */}}
+// A comment; discarded. May contain newlines.
+// Comments do not nest and must start and end at the
+// delimiters, as shown here.
+/*
+
+ {{pipeline}}
+ The default textual representation of the value of the pipeline
+ is copied to the output.
+
+ {{if pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, T1 is executed. The empty values are false, 0, any
+ nil pointer or interface value, and any array, slice, map, or
+ string of length zero.
+ Dot is unaffected.
+
+ {{if pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, T0 is executed;
+ otherwise, T1 is executed. Dot is unaffected.
+
+ {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
+ To simplify the appearance of if-else chains, the else action
+ of an if may include another if directly; the effect is exactly
+ the same as writing
+ {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
+
+ {{range pipeline}} T1 {{end}}
+ The value of the pipeline must be an array, slice, map, or channel.
+ If the value of the pipeline has length zero, nothing is output;
+ otherwise, dot is set to the successive elements of the array,
+ slice, or map and T1 is executed. If the value is a map and the
+ keys are of basic type with a defined order ("comparable"), the
+ elements will be visited in sorted key order.
+
+ {{range pipeline}} T1 {{else}} T0 {{end}}
+ The value of the pipeline must be an array, slice, map, or channel.
+ If the value of the pipeline has length zero, dot is unaffected and
+ T0 is executed; otherwise, dot is set to the successive elements
+ of the array, slice, or map and T1 is executed.
+
+ {{template "name"}}
+ The template with the specified name is executed with nil data.
+
+ {{template "name" pipeline}}
+ The template with the specified name is executed with dot set
+ to the value of the pipeline.
+
+ {{with pipeline}} T1 {{end}}
+ If the value of the pipeline is empty, no output is generated;
+ otherwise, dot is set to the value of the pipeline and T1 is
+ executed.
+
+ {{with pipeline}} T1 {{else}} T0 {{end}}
+ If the value of the pipeline is empty, dot is unaffected and T0
+ is executed; otherwise, dot is set to the value of the pipeline
+ and T1 is executed.
+
+Arguments
+
+An argument is a simple value, denoted by one of the following.
+
+ - A boolean, string, character, integer, floating-point, imaginary
+ or complex constant in Go syntax. These behave like Go's untyped
+ constants, although raw strings may not span newlines.
+ - The keyword nil, representing an untyped Go nil.
+ - The character '.' (period):
+ .
+ The result is the value of dot.
+ - A variable name, which is a (possibly empty) alphanumeric string
+ preceded by a dollar sign, such as
+ $piOver2
+ or
+ $
+ The result is the value of the variable.
+ Variables are described below.
+ - The name of a field of the data, which must be a struct, preceded
+ by a period, such as
+ .Field
+ The result is the value of the field. Field invocations may be
+ chained:
+ .Field1.Field2
+ Fields can also be evaluated on variables, including chaining:
+ $x.Field1.Field2
+ - The name of a key of the data, which must be a map, preceded
+ by a period, such as
+ .Key
+ The result is the map element value indexed by the key.
+ Key invocations may be chained and combined with fields to any
+ depth:
+ .Field1.Key1.Field2.Key2
+ Although the key must be an alphanumeric identifier, unlike with
+ field names they do not need to start with an upper case letter.
+ Keys can also be evaluated on variables, including chaining:
+ $x.key1.key2
+ - The name of a niladic method of the data, preceded by a period,
+ such as
+ .Method
+ The result is the value of invoking the method with dot as the
+ receiver, dot.Method(). Such a method must have one return value (of
+ any type) or two return values, the second of which is an error.
+ If it has two and the returned error is non-nil, execution terminates
+ and an error is returned to the caller as the value of Execute.
+ Method invocations may be chained and combined with fields and keys
+ to any depth:
+ .Field1.Key1.Method1.Field2.Key2.Method2
+ Methods can also be evaluated on variables, including chaining:
+ $x.Method1.Field
+ - The name of a niladic function, such as
+ fun
+ The result is the value of invoking the function, fun(). The return
+ types and values behave as in methods. Functions and function
+ names are described below.
+ - A parenthesized instance of one the above, for grouping. The result
+ may be accessed by a field or map key invocation.
+ print (.F1 arg1) (.F2 arg2)
+ (.StructValuedMethod "arg").Field
+
+Arguments may evaluate to any type; if they are pointers the implementation
+automatically indirects to the base type when required.
+If an evaluation yields a function value, such as a function-valued
+field of a struct, the function is not invoked automatically, but it
+can be used as a truth value for an if action and the like. To invoke
+it, use the call function, defined below.
+
+A pipeline is a possibly chained sequence of "commands". A command is a simple
+value (argument) or a function or method call, possibly with multiple arguments:
+
+ Argument
+ The result is the value of evaluating the argument.
+ .Method [Argument...]
+ The method can be alone or the last element of a chain but,
+ unlike methods in the middle of a chain, it can take arguments.
+ The result is the value of calling the method with the
+ arguments:
+ dot.Method(Argument1, etc.)
+ functionName [Argument...]
+ The result is the value of calling the function associated
+ with the name:
+ function(Argument1, etc.)
+ Functions and function names are described below.
+
+Pipelines
+
+A pipeline may be "chained" by separating a sequence of commands with pipeline
+characters '|'. In a chained pipeline, the result of the each command is
+passed as the last argument of the following command. The output of the final
+command in the pipeline is the value of the pipeline.
+
+The output of a command will be either one value or two values, the second of
+which has type error. If that second value is present and evaluates to
+non-nil, execution terminates and the error is returned to the caller of
+Execute.
+
+Variables
+
+A pipeline inside an action may initialize a variable to capture the result.
+The initialization has syntax
+
+ $variable := pipeline
+
+where $variable is the name of the variable. An action that declares a
+variable produces no output.
+
+If a "range" action initializes a variable, the variable is set to the
+successive elements of the iteration. Also, a "range" may declare two
+variables, separated by a comma:
+
+ range $index, $element := pipeline
+
+in which case $index and $element are set to the successive values of the
+array/slice index or map key and element, respectively. Note that if there is
+only one variable, it is assigned the element; this is opposite to the
+convention in Go range clauses.
+
+A variable's scope extends to the "end" action of the control structure ("if",
+"with", or "range") in which it is declared, or to the end of the template if
+there is no such control structure. A template invocation does not inherit
+variables from the point of its invocation.
+
+When execution begins, $ is set to the data argument passed to Execute, that is,
+to the starting value of dot.
+
+Examples
+
+Here are some example one-line templates demonstrating pipelines and variables.
+All produce the quoted word "output":
+
+ {{"\"output\""}}
+ A string constant.
+ {{`"output"`}}
+ A raw string constant.
+ {{printf "%q" "output"}}
+ A function call.
+ {{"output" | printf "%q"}}
+ A function call whose final argument comes from the previous
+ command.
+ {{printf "%q" (print "out" "put")}}
+ A parenthesized argument.
+ {{"put" | printf "%s%s" "out" | printf "%q"}}
+ A more elaborate call.
+ {{"output" | printf "%s" | printf "%q"}}
+ A longer chain.
+ {{with "output"}}{{printf "%q" .}}{{end}}
+ A with action using dot.
+ {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
+ A with action that creates and uses a variable.
+ {{with $x := "output"}}{{printf "%q" $x}}{{end}}
+ A with action that uses the variable in another action.
+ {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
+ The same, but pipelined.
+
+Functions
+
+During execution functions are found in two function maps: first in the
+template, then in the global function map. By default, no functions are defined
+in the template but the Funcs method can be used to add them.
+
+Predefined global functions are named as follows.
+
+ and
+ Returns the boolean AND of its arguments by returning the
+ first empty argument or the last argument, that is,
+ "and x y" behaves as "if x then y else x". All the
+ arguments are evaluated.
+ call
+ Returns the result of calling the first argument, which
+ must be a function, with the remaining arguments as parameters.
+ Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
+ Y is a func-valued field, map entry, or the like.
+ The first argument must be the result of an evaluation
+ that yields a value of function type (as distinct from
+ a predefined function such as print). The function must
+ return either one or two result values, the second of which
+ is of type error. If the arguments don't match the function
+ or the returned error value is non-nil, execution stops.
+ html
+ Returns the escaped HTML equivalent of the textual
+ representation of its arguments.
+ index
+ Returns the result of indexing its first argument by the
+ following arguments. Thus "index x 1 2 3" is, in Go syntax,
+ x[1][2][3]. Each indexed item must be a map, slice, or array.
+ js
+ Returns the escaped JavaScript equivalent of the textual
+ representation of its arguments.
+ len
+ Returns the integer length of its argument.
+ not
+ Returns the boolean negation of its single argument.
+ or
+ Returns the boolean OR of its arguments by returning the
+ first non-empty argument or the last argument, that is,
+ "or x y" behaves as "if x then x else y". All the
+ arguments are evaluated.
+ print
+ An alias for fmt.Sprint
+ printf
+ An alias for fmt.Sprintf
+ println
+ An alias for fmt.Sprintln
+ urlquery
+ Returns the escaped value of the textual representation of
+ its arguments in a form suitable for embedding in a URL query.
+
+The boolean functions take any zero value to be false and a non-zero
+value to be true.
+
+There is also a set of binary comparison operators defined as
+functions:
+
+ eq
+ Returns the boolean truth of arg1 == arg2
+ ne
+ Returns the boolean truth of arg1 != arg2
+ lt
+ Returns the boolean truth of arg1 < arg2
+ le
+ Returns the boolean truth of arg1 <= arg2
+ gt
+ Returns the boolean truth of arg1 > arg2
+ ge
+ Returns the boolean truth of arg1 >= arg2
+
+For simpler multi-way equality tests, eq (only) accepts two or more
+arguments and compares the second and subsequent to the first,
+returning in effect
+
+ arg1==arg2 || arg1==arg3 || arg1==arg4 ...
+
+(Unlike with || in Go, however, eq is a function call and all the
+arguments will be evaluated.)
+
+The comparison functions work on basic types only (or named basic
+types, such as "type Celsius float32"). They implement the Go rules
+for comparison of values, except that size and exact type are
+ignored, so any integer value may be compared with any other integer
+value, any unsigned integer value may be compared with any other
+unsigned integer value, and so on. However, as usual, one may not
+compare an int with a float32 and so on.
+
+Associated templates
+
+Each template is named by a string specified when it is created. Also, each
+template is associated with zero or more other templates that it may invoke by
+name; such associations are transitive and form a name space of templates.
+
+A template may use a template invocation to instantiate another associated
+template; see the explanation of the "template" action above. The name must be
+that of a template associated with the template that contains the invocation.
+
+Nested template definitions
+
+When parsing a template, another template may be defined and associated with the
+template being parsed. Template definitions must appear at the top level of the
+template, much like global variables in a Go program.
+
+The syntax of such definitions is to surround each template declaration with a
+"define" and "end" action.
+
+The define action names the template being created by providing a string
+constant. Here is a simple example:
+
+ `{{define "T1"}}ONE{{end}}
+ {{define "T2"}}TWO{{end}}
+ {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
+ {{template "T3"}}`
+
+This defines two templates, T1 and T2, and a third T3 that invokes the other two
+when it is executed. Finally it invokes T3. If executed this template will
+produce the text
+
+ ONE TWO
+
+By construction, a template may reside in only one association. If it's
+necessary to have a template addressable from multiple associations, the
+template definition must be parsed multiple times to create distinct *Template
+values, or must be copied with the Clone or AddParseTree method.
+
+Parse may be called multiple times to assemble the various associated templates;
+see the ParseFiles and ParseGlob functions and methods for simple ways to parse
+related templates stored in files.
+
+A template may be executed directly or through ExecuteTemplate, which executes
+an associated template identified by name. To invoke our example above, we
+might write,
+
+ err := tmpl.Execute(os.Stdout, "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+
+or to invoke a particular template explicitly by name,
+
+ err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
+ if err != nil {
+ log.Fatalf("execution failed: %s", err)
+ }
+
+*/
+package template
diff --git a/src/text/template/example_test.go b/src/text/template/example_test.go
new file mode 100644
index 000000000..de1d51851
--- /dev/null
+++ b/src/text/template/example_test.go
@@ -0,0 +1,71 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+ "log"
+ "os"
+ "text/template"
+)
+
+func ExampleTemplate() {
+ // Define a template.
+ const letter = `
+Dear {{.Name}},
+{{if .Attended}}
+It was a pleasure to see you at the wedding.{{else}}
+It is a shame you couldn't make it to the wedding.{{end}}
+{{with .Gift}}Thank you for the lovely {{.}}.
+{{end}}
+Best wishes,
+Josie
+`
+
+ // Prepare some data to insert into the template.
+ type Recipient struct {
+ Name, Gift string
+ Attended bool
+ }
+ var recipients = []Recipient{
+ {"Aunt Mildred", "bone china tea set", true},
+ {"Uncle John", "moleskin pants", false},
+ {"Cousin Rodney", "", false},
+ }
+
+ // Create a new template and parse the letter into it.
+ t := template.Must(template.New("letter").Parse(letter))
+
+ // Execute the template for each recipient.
+ for _, r := range recipients {
+ err := t.Execute(os.Stdout, r)
+ if err != nil {
+ log.Println("executing template:", err)
+ }
+ }
+
+ // Output:
+ // Dear Aunt Mildred,
+ //
+ // It was a pleasure to see you at the wedding.
+ // Thank you for the lovely bone china tea set.
+ //
+ // Best wishes,
+ // Josie
+ //
+ // Dear Uncle John,
+ //
+ // It is a shame you couldn't make it to the wedding.
+ // Thank you for the lovely moleskin pants.
+ //
+ // Best wishes,
+ // Josie
+ //
+ // Dear Cousin Rodney,
+ //
+ // It is a shame you couldn't make it to the wedding.
+ //
+ // Best wishes,
+ // Josie
+}
diff --git a/src/text/template/examplefiles_test.go b/src/text/template/examplefiles_test.go
new file mode 100644
index 000000000..a15c7a62a
--- /dev/null
+++ b/src/text/template/examplefiles_test.go
@@ -0,0 +1,182 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "text/template"
+)
+
+// templateFile defines the contents of a template to be stored in a file, for testing.
+type templateFile struct {
+ name string
+ contents string
+}
+
+func createTestDir(files []templateFile) string {
+ dir, err := ioutil.TempDir("", "template")
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, file := range files {
+ f, err := os.Create(filepath.Join(dir, file.name))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ _, err = io.WriteString(f, file.contents)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ return dir
+}
+
+// Here we demonstrate loading a set of templates from a directory.
+func ExampleTemplate_glob() {
+ // Here we create a temporary directory and populate it with our sample
+ // template definition files; usually the template files would already
+ // exist in some location known to the program.
+ dir := createTestDir([]templateFile{
+ // T0.tmpl is a plain template file that just invokes T1.
+ {"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`},
+ // T1.tmpl defines a template, T1 that invokes T2.
+ {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+ // T2.tmpl defines a template T2.
+ {"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+ })
+ // Clean up after the test; another quirk of running as an example.
+ defer os.RemoveAll(dir)
+
+ // pattern is the glob pattern used to find all the template files.
+ pattern := filepath.Join(dir, "*.tmpl")
+
+ // Here starts the example proper.
+ // T0.tmpl is the first name matched, so it becomes the starting template,
+ // the value returned by ParseGlob.
+ tmpl := template.Must(template.ParseGlob(pattern))
+
+ err := tmpl.Execute(os.Stdout, nil)
+ if err != nil {
+ log.Fatalf("template execution: %s", err)
+ }
+ // Output:
+ // T0 invokes T1: (T1 invokes T2: (This is T2))
+}
+
+// This example demonstrates one way to share some templates
+// and use them in different contexts. In this variant we add multiple driver
+// templates by hand to an existing bundle of templates.
+func ExampleTemplate_helpers() {
+ // Here we create a temporary directory and populate it with our sample
+ // template definition files; usually the template files would already
+ // exist in some location known to the program.
+ dir := createTestDir([]templateFile{
+ // T1.tmpl defines a template, T1 that invokes T2.
+ {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+ // T2.tmpl defines a template T2.
+ {"T2.tmpl", `{{define "T2"}}This is T2{{end}}`},
+ })
+ // Clean up after the test; another quirk of running as an example.
+ defer os.RemoveAll(dir)
+
+ // pattern is the glob pattern used to find all the template files.
+ pattern := filepath.Join(dir, "*.tmpl")
+
+ // Here starts the example proper.
+ // Load the helpers.
+ templates := template.Must(template.ParseGlob(pattern))
+ // Add one driver template to the bunch; we do this with an explicit template definition.
+ _, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}")
+ if err != nil {
+ log.Fatal("parsing driver1: ", err)
+ }
+ // Add another driver template.
+ _, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}")
+ if err != nil {
+ log.Fatal("parsing driver2: ", err)
+ }
+ // We load all the templates before execution. This package does not require
+ // that behavior but html/template's escaping does, so it's a good habit.
+ err = templates.ExecuteTemplate(os.Stdout, "driver1", nil)
+ if err != nil {
+ log.Fatalf("driver1 execution: %s", err)
+ }
+ err = templates.ExecuteTemplate(os.Stdout, "driver2", nil)
+ if err != nil {
+ log.Fatalf("driver2 execution: %s", err)
+ }
+ // Output:
+ // Driver 1 calls T1: (T1 invokes T2: (This is T2))
+ // Driver 2 calls T2: (This is T2)
+}
+
+// This example demonstrates how to use one group of driver
+// templates with distinct sets of helper templates.
+func ExampleTemplate_share() {
+ // Here we create a temporary directory and populate it with our sample
+ // template definition files; usually the template files would already
+ // exist in some location known to the program.
+ dir := createTestDir([]templateFile{
+ // T0.tmpl is a plain template file that just invokes T1.
+ {"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"},
+ // T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined
+ {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`},
+ })
+ // Clean up after the test; another quirk of running as an example.
+ defer os.RemoveAll(dir)
+
+ // pattern is the glob pattern used to find all the template files.
+ pattern := filepath.Join(dir, "*.tmpl")
+
+ // Here starts the example proper.
+ // Load the drivers.
+ drivers := template.Must(template.ParseGlob(pattern))
+
+ // We must define an implementation of the T2 template. First we clone
+ // the drivers, then add a definition of T2 to the template name space.
+
+ // 1. Clone the helper set to create a new name space from which to run them.
+ first, err := drivers.Clone()
+ if err != nil {
+ log.Fatal("cloning helpers: ", err)
+ }
+ // 2. Define T2, version A, and parse it.
+ _, err = first.Parse("{{define `T2`}}T2, version A{{end}}")
+ if err != nil {
+ log.Fatal("parsing T2: ", err)
+ }
+
+ // Now repeat the whole thing, using a different version of T2.
+ // 1. Clone the drivers.
+ second, err := drivers.Clone()
+ if err != nil {
+ log.Fatal("cloning drivers: ", err)
+ }
+ // 2. Define T2, version B, and parse it.
+ _, err = second.Parse("{{define `T2`}}T2, version B{{end}}")
+ if err != nil {
+ log.Fatal("parsing T2: ", err)
+ }
+
+ // Execute the templates in the reverse order to verify the
+ // first is unaffected by the second.
+ err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second")
+ if err != nil {
+ log.Fatalf("second execution: %s", err)
+ }
+ err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first")
+ if err != nil {
+ log.Fatalf("first: execution: %s", err)
+ }
+
+ // Output:
+ // T0 (second version) invokes T1: (T1 invokes T2: (T2, version B))
+ // T0 (first version) invokes T1: (T1 invokes T2: (T2, version A))
+}
diff --git a/src/text/template/examplefunc_test.go b/src/text/template/examplefunc_test.go
new file mode 100644
index 000000000..080b5e3a0
--- /dev/null
+++ b/src/text/template/examplefunc_test.go
@@ -0,0 +1,54 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template_test
+
+import (
+ "log"
+ "os"
+ "strings"
+ "text/template"
+)
+
+// This example demonstrates a custom function to process template text.
+// It installs the strings.Title function and uses it to
+// Make Title Text Look Good In Our Template's Output.
+func ExampleTemplate_func() {
+ // First we create a FuncMap with which to register the function.
+ funcMap := template.FuncMap{
+ // The name "title" is what the function will be called in the template text.
+ "title": strings.Title,
+ }
+
+ // A simple template definition to test our function.
+ // We print the input text several ways:
+ // - the original
+ // - title-cased
+ // - title-cased and then printed with %q
+ // - printed with %q and then title-cased.
+ const templateText = `
+Input: {{printf "%q" .}}
+Output 0: {{title .}}
+Output 1: {{title . | printf "%q"}}
+Output 2: {{printf "%q" . | title}}
+`
+
+ // Create a template, add the function map, and parse the text.
+ tmpl, err := template.New("titleTest").Funcs(funcMap).Parse(templateText)
+ if err != nil {
+ log.Fatalf("parsing: %s", err)
+ }
+
+ // Run the template to verify the output.
+ err = tmpl.Execute(os.Stdout, "the go programming language")
+ if err != nil {
+ log.Fatalf("execution: %s", err)
+ }
+
+ // Output:
+ // Input: "the go programming language"
+ // Output 0: The Go Programming Language
+ // Output 1: "The Go Programming Language"
+ // Output 2: "The Go Programming Language"
+}
diff --git a/src/text/template/exec.go b/src/text/template/exec.go
new file mode 100644
index 000000000..8e155d478
--- /dev/null
+++ b/src/text/template/exec.go
@@ -0,0 +1,842 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "text/template/parse"
+)
+
+// state represents the state of an execution. It's not part of the
+// template so that multiple executions of the same template
+// can execute in parallel.
+type state struct {
+ tmpl *Template
+ wr io.Writer
+ node parse.Node // current node, for errors
+ vars []variable // push-down stack of variable values.
+}
+
+// variable holds the dynamic value of a variable such as $, $x etc.
+type variable struct {
+ name string
+ value reflect.Value
+}
+
+// push pushes a new variable on the stack.
+func (s *state) push(name string, value reflect.Value) {
+ s.vars = append(s.vars, variable{name, value})
+}
+
+// mark returns the length of the variable stack.
+func (s *state) mark() int {
+ return len(s.vars)
+}
+
+// pop pops the variable stack up to the mark.
+func (s *state) pop(mark int) {
+ s.vars = s.vars[0:mark]
+}
+
+// setVar overwrites the top-nth variable on the stack. Used by range iterations.
+func (s *state) setVar(n int, value reflect.Value) {
+ s.vars[len(s.vars)-n].value = value
+}
+
+// varValue returns the value of the named variable.
+func (s *state) varValue(name string) reflect.Value {
+ for i := s.mark() - 1; i >= 0; i-- {
+ if s.vars[i].name == name {
+ return s.vars[i].value
+ }
+ }
+ s.errorf("undefined variable: %s", name)
+ return zero
+}
+
+var zero reflect.Value
+
+// at marks the state to be on node n, for error reporting.
+func (s *state) at(node parse.Node) {
+ s.node = node
+}
+
+// doublePercent returns the string with %'s replaced by %%, if necessary,
+// so it can be used safely inside a Printf format string.
+func doublePercent(str string) string {
+ if strings.Contains(str, "%") {
+ str = strings.Replace(str, "%", "%%", -1)
+ }
+ return str
+}
+
+// errorf formats the error and terminates processing.
+func (s *state) errorf(format string, args ...interface{}) {
+ name := doublePercent(s.tmpl.Name())
+ if s.node == nil {
+ format = fmt.Sprintf("template: %s: %s", name, format)
+ } else {
+ location, context := s.tmpl.ErrorContext(s.node)
+ format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
+ }
+ panic(fmt.Errorf(format, args...))
+}
+
+// errRecover is the handler that turns panics into returns from the top
+// level of Parse.
+func errRecover(errp *error) {
+ e := recover()
+ if e != nil {
+ switch err := e.(type) {
+ case runtime.Error:
+ panic(e)
+ case error:
+ *errp = err
+ default:
+ panic(e)
+ }
+ }
+}
+
+// ExecuteTemplate applies the template associated with t that has the given name
+// to the specified data object and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel.
+func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
+ tmpl := t.tmpl[name]
+ if tmpl == nil {
+ return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
+ }
+ return tmpl.Execute(wr, data)
+}
+
+// Execute applies a parsed template to the specified data object,
+// and writes the output to wr.
+// If an error occurs executing the template or writing its output,
+// execution stops, but partial results may already have been written to
+// the output writer.
+// A template may be executed safely in parallel.
+func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
+ defer errRecover(&err)
+ value := reflect.ValueOf(data)
+ state := &state{
+ tmpl: t,
+ wr: wr,
+ vars: []variable{{"$", value}},
+ }
+ t.init()
+ if t.Tree == nil || t.Root == nil {
+ var b bytes.Buffer
+ for name, tmpl := range t.tmpl {
+ if tmpl.Tree == nil || tmpl.Root == nil {
+ continue
+ }
+ if b.Len() > 0 {
+ b.WriteString(", ")
+ }
+ fmt.Fprintf(&b, "%q", name)
+ }
+ var s string
+ if b.Len() > 0 {
+ s = "; defined templates are: " + b.String()
+ }
+ state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
+ }
+ state.walk(value, t.Root)
+ return
+}
+
+// Walk functions step through the major pieces of the template structure,
+// generating output as they go.
+func (s *state) walk(dot reflect.Value, node parse.Node) {
+ s.at(node)
+ switch node := node.(type) {
+ case *parse.ActionNode:
+ // Do not pop variables so they persist until next end.
+ // Also, if the action declares variables, don't print the result.
+ val := s.evalPipeline(dot, node.Pipe)
+ if len(node.Pipe.Decl) == 0 {
+ s.printValue(node, val)
+ }
+ case *parse.IfNode:
+ s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
+ case *parse.ListNode:
+ for _, node := range node.Nodes {
+ s.walk(dot, node)
+ }
+ case *parse.RangeNode:
+ s.walkRange(dot, node)
+ case *parse.TemplateNode:
+ s.walkTemplate(dot, node)
+ case *parse.TextNode:
+ if _, err := s.wr.Write(node.Text); err != nil {
+ s.errorf("%s", err)
+ }
+ case *parse.WithNode:
+ s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
+ default:
+ s.errorf("unknown node: %s", node)
+ }
+}
+
+// walkIfOrWith walks an 'if' or 'with' node. The two control structures
+// are identical in behavior except that 'with' sets dot.
+func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
+ defer s.pop(s.mark())
+ val := s.evalPipeline(dot, pipe)
+ truth, ok := isTrue(val)
+ if !ok {
+ s.errorf("if/with can't use %v", val)
+ }
+ if truth {
+ if typ == parse.NodeWith {
+ s.walk(val, list)
+ } else {
+ s.walk(dot, list)
+ }
+ } else if elseList != nil {
+ s.walk(dot, elseList)
+ }
+}
+
+// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value.
+func isTrue(val reflect.Value) (truth, ok bool) {
+ if !val.IsValid() {
+ // Something like var x interface{}, never set. It's a form of nil.
+ return false, true
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ truth = val.Len() > 0
+ case reflect.Bool:
+ truth = val.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ truth = val.Complex() != 0
+ case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+ truth = !val.IsNil()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ truth = val.Int() != 0
+ case reflect.Float32, reflect.Float64:
+ truth = val.Float() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ truth = val.Uint() != 0
+ case reflect.Struct:
+ truth = true // Struct values are always true.
+ default:
+ return
+ }
+ return truth, true
+}
+
+func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
+ s.at(r)
+ defer s.pop(s.mark())
+ val, _ := indirect(s.evalPipeline(dot, r.Pipe))
+ // mark top of stack before any variables in the body are pushed.
+ mark := s.mark()
+ oneIteration := func(index, elem reflect.Value) {
+ // Set top var (lexically the second if there are two) to the element.
+ if len(r.Pipe.Decl) > 0 {
+ s.setVar(1, elem)
+ }
+ // Set next var (lexically the first if there are two) to the index.
+ if len(r.Pipe.Decl) > 1 {
+ s.setVar(2, index)
+ }
+ s.walk(elem, r.List)
+ s.pop(mark)
+ }
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ if val.Len() == 0 {
+ break
+ }
+ for i := 0; i < val.Len(); i++ {
+ oneIteration(reflect.ValueOf(i), val.Index(i))
+ }
+ return
+ case reflect.Map:
+ if val.Len() == 0 {
+ break
+ }
+ for _, key := range sortKeys(val.MapKeys()) {
+ oneIteration(key, val.MapIndex(key))
+ }
+ return
+ case reflect.Chan:
+ if val.IsNil() {
+ break
+ }
+ i := 0
+ for ; ; i++ {
+ elem, ok := val.Recv()
+ if !ok {
+ break
+ }
+ oneIteration(reflect.ValueOf(i), elem)
+ }
+ if i == 0 {
+ break
+ }
+ return
+ case reflect.Invalid:
+ break // An invalid value is likely a nil map, etc. and acts like an empty map.
+ default:
+ s.errorf("range can't iterate over %v", val)
+ }
+ if r.ElseList != nil {
+ s.walk(dot, r.ElseList)
+ }
+}
+
+func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
+ s.at(t)
+ tmpl := s.tmpl.tmpl[t.Name]
+ if tmpl == nil {
+ s.errorf("template %q not defined", t.Name)
+ }
+ // Variables declared by the pipeline persist.
+ dot = s.evalPipeline(dot, t.Pipe)
+ newState := *s
+ newState.tmpl = tmpl
+ // No dynamic scoping: template invocations inherit no variables.
+ newState.vars = []variable{{"$", dot}}
+ newState.walk(dot, tmpl.Root)
+}
+
+// Eval functions evaluate pipelines, commands, and their elements and extract
+// values from the data structure by examining fields, calling methods, and so on.
+// The printing of those values happens only through walk functions.
+
+// evalPipeline returns the value acquired by evaluating a pipeline. If the
+// pipeline has a variable declaration, the variable will be pushed on the
+// stack. Callers should therefore pop the stack after they are finished
+// executing commands depending on the pipeline value.
+func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
+ if pipe == nil {
+ return
+ }
+ s.at(pipe)
+ for _, cmd := range pipe.Cmds {
+ value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
+ // If the object has type interface{}, dig down one level to the thing inside.
+ if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
+ value = reflect.ValueOf(value.Interface()) // lovely!
+ }
+ }
+ for _, variable := range pipe.Decl {
+ s.push(variable.Ident[0], value)
+ }
+ return value
+}
+
+func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
+ if len(args) > 1 || final.IsValid() {
+ s.errorf("can't give argument to non-function %s", args[0])
+ }
+}
+
+func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
+ firstWord := cmd.Args[0]
+ switch n := firstWord.(type) {
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, cmd.Args, final)
+ case *parse.ChainNode:
+ return s.evalChainNode(dot, n, cmd.Args, final)
+ case *parse.IdentifierNode:
+ // Must be a function.
+ return s.evalFunction(dot, n, cmd, cmd.Args, final)
+ case *parse.PipeNode:
+ // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
+ return s.evalPipeline(dot, n)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, cmd.Args, final)
+ }
+ s.at(firstWord)
+ s.notAFunction(cmd.Args, final)
+ switch word := firstWord.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(word.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.NilNode:
+ s.errorf("nil is not a command")
+ case *parse.NumberNode:
+ return s.idealConstant(word)
+ case *parse.StringNode:
+ return reflect.ValueOf(word.Text)
+ }
+ s.errorf("can't evaluate command %q", firstWord)
+ panic("not reached")
+}
+
+// idealConstant is called to return the value of a number in a context where
+// we don't know the type. In that case, the syntax of the number tells us
+// its type, and we use Go rules to resolve. Note there is no such thing as
+// a uint ideal constant in this situation - the value must be of int type.
+func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
+ // These are ideal constants but we don't know the type
+ // and we have no context. (If it was a method argument,
+ // we'd know what we need.) The syntax guides us to some extent.
+ s.at(constant)
+ switch {
+ case constant.IsComplex:
+ return reflect.ValueOf(constant.Complex128) // incontrovertible.
+ case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
+ return reflect.ValueOf(constant.Float64)
+ case constant.IsInt:
+ n := int(constant.Int64)
+ if int64(n) != constant.Int64 {
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return reflect.ValueOf(n)
+ case constant.IsUint:
+ s.errorf("%s overflows int", constant.Text)
+ }
+ return zero
+}
+
+func isHexConstant(s string) bool {
+ return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
+}
+
+func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
+ s.at(field)
+ return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
+}
+
+func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
+ s.at(chain)
+ // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
+ pipe := s.evalArg(dot, nil, chain.Node)
+ if len(chain.Field) == 0 {
+ s.errorf("internal error: no fields in evalChainNode")
+ }
+ return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
+}
+
+func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
+ // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
+ s.at(variable)
+ value := s.varValue(variable.Ident[0])
+ if len(variable.Ident) == 1 {
+ s.notAFunction(args, final)
+ return value
+ }
+ return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
+}
+
+// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
+// dot is the environment in which to evaluate arguments, while
+// receiver is the value being walked along the chain.
+func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
+ n := len(ident)
+ for i := 0; i < n-1; i++ {
+ receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
+ }
+ // Now if it's a method, it gets the arguments.
+ return s.evalField(dot, ident[n-1], node, args, final, receiver)
+}
+
+func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
+ s.at(node)
+ name := node.Ident
+ function, ok := findFunction(name, s.tmpl)
+ if !ok {
+ s.errorf("%q is not a defined function", name)
+ }
+ return s.evalCall(dot, function, cmd, name, args, final)
+}
+
+// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
+// The 'final' argument represents the return value from the preceding
+// value of the pipeline, if any.
+func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
+ if !receiver.IsValid() {
+ return zero
+ }
+ typ := receiver.Type()
+ receiver, _ = indirect(receiver)
+ // Unless it's an interface, need to get to a value of type *T to guarantee
+ // we see all methods of T and *T.
+ ptr := receiver
+ if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
+ ptr = ptr.Addr()
+ }
+ if method := ptr.MethodByName(fieldName); method.IsValid() {
+ return s.evalCall(dot, method, node, fieldName, args, final)
+ }
+ hasArgs := len(args) > 1 || final.IsValid()
+ // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
+ receiver, isNil := indirect(receiver)
+ if isNil {
+ s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
+ }
+ switch receiver.Kind() {
+ case reflect.Struct:
+ tField, ok := receiver.Type().FieldByName(fieldName)
+ if ok {
+ field := receiver.FieldByIndex(tField.Index)
+ if tField.PkgPath != "" { // field is unexported
+ s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
+ }
+ // If it's a function, we must call it.
+ if hasArgs {
+ s.errorf("%s has arguments but cannot be invoked as function", fieldName)
+ }
+ return field
+ }
+ s.errorf("%s is not a field of struct type %s", fieldName, typ)
+ case reflect.Map:
+ // If it's a map, attempt to use the field name as a key.
+ nameVal := reflect.ValueOf(fieldName)
+ if nameVal.Type().AssignableTo(receiver.Type().Key()) {
+ if hasArgs {
+ s.errorf("%s is not a method but has arguments", fieldName)
+ }
+ return receiver.MapIndex(nameVal)
+ }
+ }
+ s.errorf("can't evaluate field %s in type %s", fieldName, typ)
+ panic("not reached")
+}
+
+var (
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+ fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
+// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
+// as the function itself.
+func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
+ if args != nil {
+ args = args[1:] // Zeroth arg is function name/node; not passed to function.
+ }
+ typ := fun.Type()
+ numIn := len(args)
+ if final.IsValid() {
+ numIn++
+ }
+ numFixed := len(args)
+ if typ.IsVariadic() {
+ numFixed = typ.NumIn() - 1 // last arg is the variadic one.
+ if numIn < numFixed {
+ s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
+ }
+ } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
+ s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
+ }
+ if !goodFunc(typ) {
+ // TODO: This could still be a confusing error; maybe goodFunc should provide info.
+ s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
+ }
+ // Build the arg list.
+ argv := make([]reflect.Value, numIn)
+ // Args must be evaluated. Fixed args first.
+ i := 0
+ for ; i < numFixed; i++ {
+ argv[i] = s.evalArg(dot, typ.In(i), args[i])
+ }
+ // Now the ... args.
+ if typ.IsVariadic() {
+ argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
+ for ; i < len(args); i++ {
+ argv[i] = s.evalArg(dot, argType, args[i])
+ }
+ }
+ // Add final value if necessary.
+ if final.IsValid() {
+ t := typ.In(typ.NumIn() - 1)
+ if typ.IsVariadic() {
+ t = t.Elem()
+ }
+ argv[i] = s.validateType(final, t)
+ }
+ result := fun.Call(argv)
+ // If we have an error that is not nil, stop execution and return that error to the caller.
+ if len(result) == 2 && !result[1].IsNil() {
+ s.at(node)
+ s.errorf("error calling %s: %s", name, result[1].Interface().(error))
+ }
+ return result[0]
+}
+
+// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
+func canBeNil(typ reflect.Type) bool {
+ switch typ.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return true
+ }
+ return false
+}
+
+// validateType guarantees that the value is valid and assignable to the type.
+func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
+ if !value.IsValid() {
+ if typ == nil || canBeNil(typ) {
+ // An untyped nil interface{}. Accept as a proper nil value.
+ return reflect.Zero(typ)
+ }
+ s.errorf("invalid value; expected %s", typ)
+ }
+ if typ != nil && !value.Type().AssignableTo(typ) {
+ if value.Kind() == reflect.Interface && !value.IsNil() {
+ value = value.Elem()
+ if value.Type().AssignableTo(typ) {
+ return value
+ }
+ // fallthrough
+ }
+ // Does one dereference or indirection work? We could do more, as we
+ // do with method receivers, but that gets messy and method receivers
+ // are much more constrained, so it makes more sense there than here.
+ // Besides, one is almost always all you need.
+ switch {
+ case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
+ value = value.Elem()
+ if !value.IsValid() {
+ s.errorf("dereference of nil pointer of type %s", typ)
+ }
+ case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
+ value = value.Addr()
+ default:
+ s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
+ }
+ }
+ return value
+}
+
+func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ switch arg := n.(type) {
+ case *parse.DotNode:
+ return s.validateType(dot, typ)
+ case *parse.NilNode:
+ if canBeNil(typ) {
+ return reflect.Zero(typ)
+ }
+ s.errorf("cannot assign nil to %s", typ)
+ case *parse.FieldNode:
+ return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
+ case *parse.VariableNode:
+ return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
+ case *parse.PipeNode:
+ return s.validateType(s.evalPipeline(dot, arg), typ)
+ case *parse.IdentifierNode:
+ return s.evalFunction(dot, arg, arg, nil, zero)
+ }
+ switch typ.Kind() {
+ case reflect.Bool:
+ return s.evalBool(typ, n)
+ case reflect.Complex64, reflect.Complex128:
+ return s.evalComplex(typ, n)
+ case reflect.Float32, reflect.Float64:
+ return s.evalFloat(typ, n)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return s.evalInteger(typ, n)
+ case reflect.Interface:
+ if typ.NumMethod() == 0 {
+ return s.evalEmptyInterface(dot, n)
+ }
+ case reflect.String:
+ return s.evalString(typ, n)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return s.evalUnsignedInteger(typ, n)
+ }
+ s.errorf("can't handle %s for arg of type %s", n, typ)
+ panic("not reached")
+}
+
+func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ if n, ok := n.(*parse.BoolNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetBool(n.True)
+ return value
+ }
+ s.errorf("expected bool; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ if n, ok := n.(*parse.StringNode); ok {
+ value := reflect.New(typ).Elem()
+ value.SetString(n.Text)
+ return value
+ }
+ s.errorf("expected string; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
+ value := reflect.New(typ).Elem()
+ value.SetInt(n.Int64)
+ return value
+ }
+ s.errorf("expected integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
+ value := reflect.New(typ).Elem()
+ value.SetUint(n.Uint64)
+ return value
+ }
+ s.errorf("expected unsigned integer; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
+ s.at(n)
+ if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
+ value := reflect.New(typ).Elem()
+ value.SetFloat(n.Float64)
+ return value
+ }
+ s.errorf("expected float; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
+ if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
+ value := reflect.New(typ).Elem()
+ value.SetComplex(n.Complex128)
+ return value
+ }
+ s.errorf("expected complex; found %s", n)
+ panic("not reached")
+}
+
+func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
+ s.at(n)
+ switch n := n.(type) {
+ case *parse.BoolNode:
+ return reflect.ValueOf(n.True)
+ case *parse.DotNode:
+ return dot
+ case *parse.FieldNode:
+ return s.evalFieldNode(dot, n, nil, zero)
+ case *parse.IdentifierNode:
+ return s.evalFunction(dot, n, n, nil, zero)
+ case *parse.NilNode:
+ // NilNode is handled in evalArg, the only place that calls here.
+ s.errorf("evalEmptyInterface: nil (can't happen)")
+ case *parse.NumberNode:
+ return s.idealConstant(n)
+ case *parse.StringNode:
+ return reflect.ValueOf(n.Text)
+ case *parse.VariableNode:
+ return s.evalVariableNode(dot, n, nil, zero)
+ case *parse.PipeNode:
+ return s.evalPipeline(dot, n)
+ }
+ s.errorf("can't handle assignment of %s to empty interface argument", n)
+ panic("not reached")
+}
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+ for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+ if v.IsNil() {
+ return v, true
+ }
+ if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+ break
+ }
+ }
+ return v, false
+}
+
+// printValue writes the textual representation of the value to the output of
+// the template.
+func (s *state) printValue(n parse.Node, v reflect.Value) {
+ s.at(n)
+ iface, ok := printableValue(v)
+ if !ok {
+ s.errorf("can't print %s of type %s", n, v.Type())
+ }
+ fmt.Fprint(s.wr, iface)
+}
+
+// printableValue returns the, possibly indirected, interface value inside v that
+// is best for a call to formatted printer.
+func printableValue(v reflect.Value) (interface{}, bool) {
+ if v.Kind() == reflect.Ptr {
+ v, _ = indirect(v) // fmt.Fprint handles nil.
+ }
+ if !v.IsValid() {
+ return "<no value>", true
+ }
+
+ if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+ if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+ v = v.Addr()
+ } else {
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func:
+ return nil, false
+ }
+ }
+ }
+ return v.Interface(), true
+}
+
+// Types to help sort the keys in a map for reproducible output.
+
+type rvs []reflect.Value
+
+func (x rvs) Len() int { return len(x) }
+func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+type rvInts struct{ rvs }
+
+func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
+
+type rvUints struct{ rvs }
+
+func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
+
+type rvFloats struct{ rvs }
+
+func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
+
+type rvStrings struct{ rvs }
+
+func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
+
+// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
+func sortKeys(v []reflect.Value) []reflect.Value {
+ if len(v) <= 1 {
+ return v
+ }
+ switch v[0].Kind() {
+ case reflect.Float32, reflect.Float64:
+ sort.Sort(rvFloats{v})
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ sort.Sort(rvInts{v})
+ case reflect.String:
+ sort.Sort(rvStrings{v})
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ sort.Sort(rvUints{v})
+ }
+ return v
+}
diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
new file mode 100644
index 000000000..663aaf3af
--- /dev/null
+++ b/src/text/template/exec_test.go
@@ -0,0 +1,994 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the tests")
+
+// T has lots of interesting pieces to use to test execution.
+type T struct {
+ // Basics
+ True bool
+ I int
+ U16 uint16
+ X string
+ FloatZero float64
+ ComplexZero complex128
+ // Nested structs.
+ U *U
+ // Struct with String method.
+ V0 V
+ V1, V2 *V
+ // Struct with Error method.
+ W0 W
+ W1, W2 *W
+ // Slices
+ SI []int
+ SIEmpty []int
+ SB []bool
+ // Maps
+ MSI map[string]int
+ MSIone map[string]int // one element, for deterministic output
+ MSIEmpty map[string]int
+ MXI map[interface{}]int
+ MII map[int]int
+ SMSI []map[string]int
+ // Empty interfaces; used to see if we can dig inside one.
+ Empty0 interface{} // nil
+ Empty1 interface{}
+ Empty2 interface{}
+ Empty3 interface{}
+ Empty4 interface{}
+ // Non-empty interface.
+ NonEmptyInterface I
+ // Stringer.
+ Str fmt.Stringer
+ Err error
+ // Pointers
+ PI *int
+ PS *string
+ PSI *[]int
+ NIL *int
+ // Function (not method)
+ BinaryFunc func(string, string) string
+ VariadicFunc func(...string) string
+ VariadicFuncInt func(int, ...string) string
+ NilOKFunc func(*int) bool
+ ErrFunc func() (string, error)
+ // Template to test evaluation of templates.
+ Tmpl *Template
+ // Unexported field; cannot be accessed by template.
+ unexported int
+}
+
+type U struct {
+ V string
+}
+
+type V struct {
+ j int
+}
+
+func (v *V) String() string {
+ if v == nil {
+ return "nilV"
+ }
+ return fmt.Sprintf("<%d>", v.j)
+}
+
+type W struct {
+ k int
+}
+
+func (w *W) Error() string {
+ if w == nil {
+ return "nilW"
+ }
+ return fmt.Sprintf("[%d]", w.k)
+}
+
+var tVal = &T{
+ True: true,
+ I: 17,
+ U16: 16,
+ X: "x",
+ U: &U{"v"},
+ V0: V{6666},
+ V1: &V{7777}, // leave V2 as nil
+ W0: W{888},
+ W1: &W{999}, // leave W2 as nil
+ SI: []int{3, 4, 5},
+ SB: []bool{true, false},
+ MSI: map[string]int{"one": 1, "two": 2, "three": 3},
+ MSIone: map[string]int{"one": 1},
+ MXI: map[interface{}]int{"one": 1},
+ MII: map[int]int{1: 1},
+ SMSI: []map[string]int{
+ {"one": 1, "two": 2},
+ {"eleven": 11, "twelve": 12},
+ },
+ Empty1: 3,
+ Empty2: "empty2",
+ Empty3: []int{7, 8},
+ Empty4: &U{"UinEmpty"},
+ NonEmptyInterface: new(T),
+ Str: bytes.NewBuffer([]byte("foozle")),
+ Err: errors.New("erroozle"),
+ PI: newInt(23),
+ PS: newString("a string"),
+ PSI: newIntSlice(21, 22, 23),
+ BinaryFunc: func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) },
+ VariadicFunc: func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") },
+ VariadicFuncInt: func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") },
+ NilOKFunc: func(s *int) bool { return s == nil },
+ ErrFunc: func() (string, error) { return "bla", nil },
+ Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
+}
+
+// A non-empty interface.
+type I interface {
+ Method0() string
+}
+
+var iVal I = tVal
+
+// Helpers for creation.
+func newInt(n int) *int {
+ return &n
+}
+
+func newString(s string) *string {
+ return &s
+}
+
+func newIntSlice(n ...int) *[]int {
+ p := new([]int)
+ *p = make([]int, len(n))
+ copy(*p, n)
+ return p
+}
+
+// Simple methods with and without arguments.
+func (t *T) Method0() string {
+ return "M0"
+}
+
+func (t *T) Method1(a int) int {
+ return a
+}
+
+func (t *T) Method2(a uint16, b string) string {
+ return fmt.Sprintf("Method2: %d %s", a, b)
+}
+
+func (t *T) Method3(v interface{}) string {
+ return fmt.Sprintf("Method3: %v", v)
+}
+
+func (t *T) MAdd(a int, b []int) []int {
+ v := make([]int, len(b))
+ for i, x := range b {
+ v[i] = x + a
+ }
+ return v
+}
+
+var myError = errors.New("my error")
+
+// MyError returns a value and an error according to its argument.
+func (t *T) MyError(error bool) (bool, error) {
+ if error {
+ return true, myError
+ }
+ return false, nil
+}
+
+// A few methods to test chaining.
+func (t *T) GetU() *U {
+ return t.U
+}
+
+func (u *U) TrueFalse(b bool) string {
+ if b {
+ return "true"
+ }
+ return ""
+}
+
+func typeOf(arg interface{}) string {
+ return fmt.Sprintf("%T", arg)
+}
+
+type execTest struct {
+ name string
+ input string
+ output string
+ data interface{}
+ ok bool
+}
+
+// bigInt and bigUint are hex string representing numbers either side
+// of the max int boundary.
+// We do it this way so the test doesn't depend on ints being 32 bits.
+var (
+ bigInt = fmt.Sprintf("0x%x", int(1<<uint(reflect.TypeOf(0).Bits()-1)-1))
+ bigUint = fmt.Sprintf("0x%x", uint(1<<uint(reflect.TypeOf(0).Bits()-1)))
+)
+
+var execTests = []execTest{
+ // Trivial cases.
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+ {"nil action", "{{nil}}", "", nil, false},
+
+ // Ideal constants.
+ {"ideal int", "{{typeOf 3}}", "int", 0, true},
+ {"ideal float", "{{typeOf 1.0}}", "float64", 0, true},
+ {"ideal exp float", "{{typeOf 1e1}}", "float64", 0, true},
+ {"ideal complex", "{{typeOf 1i}}", "complex128", 0, true},
+ {"ideal int", "{{typeOf " + bigInt + "}}", "int", 0, true},
+ {"ideal too big", "{{typeOf " + bigUint + "}}", "", 0, false},
+ {"ideal nil without type", "{{nil}}", "", 0, false},
+
+ // Fields of structs.
+ {".X", "-{{.X}}-", "-x-", tVal, true},
+ {".U.V", "-{{.U.V}}-", "-v-", tVal, true},
+ {".unexported", "{{.unexported}}", "", tVal, false},
+
+ // Fields on maps.
+ {"map .one", "{{.MSI.one}}", "1", tVal, true},
+ {"map .two", "{{.MSI.two}}", "2", tVal, true},
+ {"map .NO", "{{.MSI.NO}}", "<no value>", tVal, true},
+ {"map .one interface", "{{.MXI.one}}", "1", tVal, true},
+ {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false},
+ {"map .WRONG type", "{{.MII.one}}", "", tVal, false},
+
+ // Dots of all kinds to test basic evaluation.
+ {"dot int", "<{{.}}>", "<13>", 13, true},
+ {"dot uint", "<{{.}}>", "<14>", uint(14), true},
+ {"dot float", "<{{.}}>", "<15.1>", 15.1, true},
+ {"dot bool", "<{{.}}>", "<true>", true, true},
+ {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true},
+ {"dot string", "<{{.}}>", "<hello>", "hello", true},
+ {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true},
+ {"dot map", "<{{.}}>", "<map[two:22]>", map[string]int{"two": 22}, true},
+ {"dot struct", "<{{.}}>", "<{7 seven}>", struct {
+ a int
+ b string
+ }{7, "seven"}, true},
+
+ // Variables.
+ {"$ int", "{{$}}", "123", 123, true},
+ {"$.I", "{{$.I}}", "17", tVal, true},
+ {"$.U.V", "{{$.U.V}}", "v", tVal, true},
+ {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true},
+
+ // Type with String method.
+ {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true},
+ {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true},
+ {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true},
+
+ // Type with Error method.
+ {"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true},
+ {"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true},
+ {"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true},
+
+ // Pointers.
+ {"*int", "{{.PI}}", "23", tVal, true},
+ {"*string", "{{.PS}}", "a string", tVal, true},
+ {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true},
+ {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true},
+ {"NIL", "{{.NIL}}", "<nil>", tVal, true},
+
+ // Empty interfaces holding values.
+ {"empty nil", "{{.Empty0}}", "<no value>", tVal, true},
+ {"empty with int", "{{.Empty1}}", "3", tVal, true},
+ {"empty with string", "{{.Empty2}}", "empty2", tVal, true},
+ {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true},
+ {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true},
+ {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true},
+
+ // Method calls.
+ {".Method0", "-{{.Method0}}-", "-M0-", tVal, true},
+ {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true},
+ {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true},
+ {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true},
+ {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true},
+ {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: <nil>-", tVal, true},
+ {".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: <nil>-", tVal, true},
+ {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true},
+ {"method on chained var",
+ "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method",
+ "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}",
+ "true", tVal, true},
+ {"chained method on variable",
+ "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}",
+ "true", tVal, true},
+ {".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
+ {".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
+
+ // Function call builtin.
+ {".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
+ {".VariadicFunc0", "{{call .VariadicFunc}}", "<>", tVal, true},
+ {".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "<he+llo>", tVal, true},
+ {".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=<he+llo>", tVal, true},
+ {"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true},
+ {"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true},
+ {"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true},
+ {".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true},
+
+ // Erroneous function calls (check args).
+ {".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false},
+ {".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false},
+ {".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false},
+ {".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false},
+ {".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false},
+ {".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false},
+ {".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false},
+ {".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false},
+
+ // Pipelines.
+ {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true},
+ {"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "-<he+<llo>>-", tVal, true},
+
+ // Parenthesized expressions
+ {"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true},
+
+ // Parenthesized expressions with field accesses
+ {"parens: $ in paren", "{{($).X}}", "x", tVal, true},
+ {"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true},
+ {"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true},
+ {"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true},
+
+ // If.
+ {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true},
+ {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false},
+ {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true},
+ {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true},
+ {"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true},
+ {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true},
+ {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true},
+ {"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true},
+ {"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true},
+
+ // Print etc.
+ {"print", `{{print "hello, print"}}`, "hello, print", tVal, true},
+ {"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true},
+ {"print nil", `{{print nil}}`, "<nil>", tVal, true},
+ {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true},
+ {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true},
+ {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true},
+ {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true},
+ {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true},
+ {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true},
+ {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true},
+ {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true},
+ {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true},
+ {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true},
+ {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true},
+
+ // HTML.
+ {"html", `{{html "<script>alert(\"XSS\");</script>"}}`,
+ "&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+ {"html pipeline", `{{printf "<script>alert(\"XSS\");</script>" | html}}`,
+ "&lt;script&gt;alert(&#34;XSS&#34;);&lt;/script&gt;", nil, true},
+ {"html", `{{html .PS}}`, "a string", tVal, true},
+
+ // JavaScript.
+ {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true},
+
+ // URL query.
+ {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true},
+
+ // Booleans
+ {"not", "{{not true}} {{not false}}", "false true", nil, true},
+ {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true},
+ {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true},
+ {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true},
+ {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true},
+
+ // Indexing.
+ {"slice[0]", "{{index .SI 0}}", "3", tVal, true},
+ {"slice[1]", "{{index .SI 1}}", "4", tVal, true},
+ {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false},
+ {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false},
+ {"map[one]", "{{index .MSI `one`}}", "1", tVal, true},
+ {"map[two]", "{{index .MSI `two`}}", "2", tVal, true},
+ {"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true},
+ {"map[nil]", "{{index .MSI nil}}", "0", tVal, true},
+ {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false},
+ {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true},
+
+ // Len.
+ {"slice", "{{len .SI}}", "3", tVal, true},
+ {"map", "{{len .MSI }}", "3", tVal, true},
+ {"len of int", "{{len 3}}", "", tVal, false},
+ {"len of nothing", "{{len .Empty0}}", "", tVal, false},
+
+ // With.
+ {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true},
+ {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true},
+ {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true},
+ {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true},
+ {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true},
+ {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true},
+ {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true},
+ {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true},
+ {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true},
+ {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true},
+ {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true},
+ {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true},
+ {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true},
+
+ // Range.
+ {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true},
+ {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true},
+ {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true},
+ {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true},
+ {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true},
+ {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true},
+ {"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true},
+ {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true},
+ {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true},
+ {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true},
+ {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true},
+ {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true},
+ {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true},
+ {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "<one=1>", tVal, true},
+ {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true},
+ {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true},
+ {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true},
+
+ // Cute examples.
+ {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true},
+ {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true},
+
+ // Error handling.
+ {"error method, error", "{{.MyError true}}", "", tVal, false},
+ {"error method, no error", "{{.MyError false}}", "false", tVal, true},
+
+ // Fixed bugs.
+ // Must separate dot and receiver; otherwise args are evaluated with dot set to variable.
+ {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true},
+ // Do not loop endlessly in indirect for non-empty interfaces.
+ // The bug appears with *interface only; looped forever.
+ {"bug1", "{{.Method0}}", "M0", &iVal, true},
+ // Was taking address of interface field, so method set was empty.
+ {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true},
+ // Struct values were not legal in with - mere oversight.
+ {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true},
+ // Nil interface values in if.
+ {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true},
+ // Stringer.
+ {"bug5", "{{.Str}}", "foozle", tVal, true},
+ {"bug5a", "{{.Err}}", "erroozle", tVal, true},
+ // Args need to be indirected and dereferenced sometimes.
+ {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true},
+ {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true},
+ {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true},
+ {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true},
+ // Legal parse but illegal execution: non-function should have no arguments.
+ {"bug7a", "{{3 2}}", "", tVal, false},
+ {"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false},
+ {"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false},
+ // Pipelined arg was not being type-checked.
+ {"bug8a", "{{3|oneArg}}", "", tVal, false},
+ {"bug8b", "{{4|dddArg 3}}", "", tVal, false},
+ // A bug was introduced that broke map lookups for lower-case names.
+ {"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true},
+ // Field chain starting with function did not work.
+ {"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true},
+ // Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333.
+ {"bug11", "{{valueString .PS}}", "", T{}, false},
+ // 0xef gave constant type float64. Issue 8622.
+ {"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true},
+ {"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true},
+ {"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true},
+ {"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true},
+}
+
+func zeroArgs() string {
+ return "zeroArgs"
+}
+
+func oneArg(a string) string {
+ return "oneArg=" + a
+}
+
+func dddArg(a int, b ...string) string {
+ return fmt.Sprintln(a, b)
+}
+
+// count returns a channel that will deliver n sequential 1-letter strings starting at "a"
+func count(n int) chan string {
+ if n == 0 {
+ return nil
+ }
+ c := make(chan string)
+ go func() {
+ for i := 0; i < n; i++ {
+ c <- "abcdefghijklmnop"[i : i+1]
+ }
+ close(c)
+ }()
+ return c
+}
+
+// vfunc takes a *V and a V
+func vfunc(V, *V) string {
+ return "vfunc"
+}
+
+// valueString takes a string, not a pointer.
+func valueString(v string) string {
+ return "value is ignored"
+}
+
+func add(args ...int) int {
+ sum := 0
+ for _, x := range args {
+ sum += x
+ }
+ return sum
+}
+
+func echo(arg interface{}) interface{} {
+ return arg
+}
+
+func makemap(arg ...string) map[string]string {
+ if len(arg)%2 != 0 {
+ panic("bad makemap")
+ }
+ m := make(map[string]string)
+ for i := 0; i < len(arg); i += 2 {
+ m[arg[i]] = arg[i+1]
+ }
+ return m
+}
+
+func stringer(s fmt.Stringer) string {
+ return s.String()
+}
+
+func mapOfThree() interface{} {
+ return map[string]int{"three": 3}
+}
+
+func testExecute(execTests []execTest, template *Template, t *testing.T) {
+ b := new(bytes.Buffer)
+ funcs := FuncMap{
+ "add": add,
+ "count": count,
+ "dddArg": dddArg,
+ "echo": echo,
+ "makemap": makemap,
+ "mapOfThree": mapOfThree,
+ "oneArg": oneArg,
+ "stringer": stringer,
+ "typeOf": typeOf,
+ "valueString": valueString,
+ "vfunc": vfunc,
+ "zeroArgs": zeroArgs,
+ }
+ for _, test := range execTests {
+ var tmpl *Template
+ var err error
+ if template == nil {
+ tmpl, err = New(test.name).Funcs(funcs).Parse(test.input)
+ } else {
+ tmpl, err = template.New(test.name).Funcs(funcs).Parse(test.input)
+ }
+ if err != nil {
+ t.Errorf("%s: parse error: %s", test.name, err)
+ continue
+ }
+ b.Reset()
+ err = tmpl.Execute(b, test.data)
+ switch {
+ case !test.ok && err == nil:
+ t.Errorf("%s: expected error; got none", test.name)
+ continue
+ case test.ok && err != nil:
+ t.Errorf("%s: unexpected execute error: %s", test.name, err)
+ continue
+ case !test.ok && err != nil:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ }
+ result := b.String()
+ if result != test.output {
+ t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result)
+ }
+ }
+}
+
+func TestExecute(t *testing.T) {
+ testExecute(execTests, nil, t)
+}
+
+var delimPairs = []string{
+ "", "", // default
+ "{{", "}}", // same as default
+ "<<", ">>", // distinct
+ "|", "|", // same
+ "(日)", "(本)", // peculiar
+}
+
+func TestDelims(t *testing.T) {
+ const hello = "Hello, world"
+ var value = struct{ Str string }{hello}
+ for i := 0; i < len(delimPairs); i += 2 {
+ text := ".Str"
+ left := delimPairs[i+0]
+ trueLeft := left
+ right := delimPairs[i+1]
+ trueRight := right
+ if left == "" { // default case
+ trueLeft = "{{"
+ }
+ if right == "" { // default case
+ trueRight = "}}"
+ }
+ text = trueLeft + text + trueRight
+ // Now add a comment
+ text += trueLeft + "/*comment*/" + trueRight
+ // Now add an action containing a string.
+ text += trueLeft + `"` + trueLeft + `"` + trueRight
+ // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`.
+ tmpl, err := New("delims").Delims(left, right).Parse(text)
+ if err != nil {
+ t.Fatalf("delim %q text %q parse err %s", left, text, err)
+ }
+ var b = new(bytes.Buffer)
+ err = tmpl.Execute(b, value)
+ if err != nil {
+ t.Fatalf("delim %q exec err %s", left, err)
+ }
+ if b.String() != hello+trueLeft {
+ t.Errorf("expected %q got %q", hello+trueLeft, b.String())
+ }
+ }
+}
+
+// Check that an error from a method flows back to the top.
+func TestExecuteError(t *testing.T) {
+ b := new(bytes.Buffer)
+ tmpl := New("error")
+ _, err := tmpl.Parse("{{.MyError true}}")
+ if err != nil {
+ t.Fatalf("parse error: %s", err)
+ }
+ err = tmpl.Execute(b, tVal)
+ if err == nil {
+ t.Errorf("expected error; got none")
+ } else if !strings.Contains(err.Error(), myError.Error()) {
+ if *debug {
+ fmt.Printf("test execute error: %s\n", err)
+ }
+ t.Errorf("expected myError; got %s", err)
+ }
+}
+
+const execErrorText = `line 1
+line 2
+line 3
+{{template "one" .}}
+{{define "one"}}{{template "two" .}}{{end}}
+{{define "two"}}{{template "three" .}}{{end}}
+{{define "three"}}{{index "hi" $}}{{end}}`
+
+// Check that an error from a nested template contains all the relevant information.
+func TestExecError(t *testing.T) {
+ tmpl, err := New("top").Parse(execErrorText)
+ if err != nil {
+ t.Fatal("parse error:", err)
+ }
+ var b bytes.Buffer
+ err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi"
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ const want = `template: top:7:20: executing "three" at <index "hi" $>: error calling index: index out of range: 5`
+ got := err.Error()
+ if got != want {
+ t.Errorf("expected\n%q\ngot\n%q", want, got)
+ }
+}
+
+func TestJSEscaping(t *testing.T) {
+ testCases := []struct {
+ in, exp string
+ }{
+ {`a`, `a`},
+ {`'foo`, `\'foo`},
+ {`Go "jump" \`, `Go \"jump\" \\`},
+ {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`},
+ {"unprintable \uFDFF", `unprintable \uFDFF`},
+ {`<html>`, `\x3Chtml\x3E`},
+ }
+ for _, tc := range testCases {
+ s := JSEscapeString(tc.in)
+ if s != tc.exp {
+ t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp)
+ }
+ }
+}
+
+// A nice example: walk a binary tree.
+
+type Tree struct {
+ Val int
+ Left, Right *Tree
+}
+
+// Use different delimiters to test Set.Delims.
+const treeTemplate = `
+ (define "tree")
+ [
+ (.Val)
+ (with .Left)
+ (template "tree" .)
+ (end)
+ (with .Right)
+ (template "tree" .)
+ (end)
+ ]
+ (end)
+`
+
+func TestTree(t *testing.T) {
+ var tree = &Tree{
+ 1,
+ &Tree{
+ 2, &Tree{
+ 3,
+ &Tree{
+ 4, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 5,
+ &Tree{
+ 6, nil, nil,
+ },
+ nil,
+ },
+ },
+ &Tree{
+ 7,
+ &Tree{
+ 8,
+ &Tree{
+ 9, nil, nil,
+ },
+ nil,
+ },
+ &Tree{
+ 10,
+ &Tree{
+ 11, nil, nil,
+ },
+ nil,
+ },
+ },
+ }
+ tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate)
+ if err != nil {
+ t.Fatal("parse error:", err)
+ }
+ var b bytes.Buffer
+ stripSpace := func(r rune) rune {
+ if r == '\t' || r == '\n' {
+ return -1
+ }
+ return r
+ }
+ const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]"
+ // First by looking up the template.
+ err = tmpl.Lookup("tree").Execute(&b, tree)
+ if err != nil {
+ t.Fatal("exec error:", err)
+ }
+ result := strings.Map(stripSpace, b.String())
+ if result != expect {
+ t.Errorf("expected %q got %q", expect, result)
+ }
+ // Then direct to execution.
+ b.Reset()
+ err = tmpl.ExecuteTemplate(&b, "tree", tree)
+ if err != nil {
+ t.Fatal("exec error:", err)
+ }
+ result = strings.Map(stripSpace, b.String())
+ if result != expect {
+ t.Errorf("expected %q got %q", expect, result)
+ }
+}
+
+func TestExecuteOnNewTemplate(t *testing.T) {
+ // This is issue 3872.
+ _ = New("Name").Templates()
+}
+
+const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}`
+
+func TestMessageForExecuteEmpty(t *testing.T) {
+ // Test a truly empty template.
+ tmpl := New("empty")
+ var b bytes.Buffer
+ err := tmpl.Execute(&b, 0)
+ if err == nil {
+ t.Fatal("expected initial error")
+ }
+ got := err.Error()
+ want := `template: empty: "empty" is an incomplete or empty template`
+ if got != want {
+ t.Errorf("expected error %s got %s", want, got)
+ }
+ // Add a non-empty template to check that the error is helpful.
+ tests, err := New("").Parse(testTemplates)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmpl.AddParseTree("secondary", tests.Tree)
+ err = tmpl.Execute(&b, 0)
+ if err == nil {
+ t.Fatal("expected second error")
+ }
+ got = err.Error()
+ want = `template: empty: "empty" is an incomplete or empty template; defined templates are: "secondary"`
+ if got != want {
+ t.Errorf("expected error %s got %s", want, got)
+ }
+ // Make sure we can execute the secondary.
+ err = tmpl.ExecuteTemplate(&b, "secondary", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+type cmpTest struct {
+ expr string
+ truth string
+ ok bool
+}
+
+var cmpTests = []cmpTest{
+ {"eq true true", "true", true},
+ {"eq true false", "false", true},
+ {"eq 1+2i 1+2i", "true", true},
+ {"eq 1+2i 1+3i", "false", true},
+ {"eq 1.5 1.5", "true", true},
+ {"eq 1.5 2.5", "false", true},
+ {"eq 1 1", "true", true},
+ {"eq 1 2", "false", true},
+ {"eq `xy` `xy`", "true", true},
+ {"eq `xy` `xyz`", "false", true},
+ {"eq .Xuint .Xuint", "true", true},
+ {"eq .Xuint .Yuint", "false", true},
+ {"eq 3 4 5 6 3", "true", true},
+ {"eq 3 4 5 6 7", "false", true},
+ {"ne true true", "false", true},
+ {"ne true false", "true", true},
+ {"ne 1+2i 1+2i", "false", true},
+ {"ne 1+2i 1+3i", "true", true},
+ {"ne 1.5 1.5", "false", true},
+ {"ne 1.5 2.5", "true", true},
+ {"ne 1 1", "false", true},
+ {"ne 1 2", "true", true},
+ {"ne `xy` `xy`", "false", true},
+ {"ne `xy` `xyz`", "true", true},
+ {"ne .Xuint .Xuint", "false", true},
+ {"ne .Xuint .Yuint", "true", true},
+ {"lt 1.5 1.5", "false", true},
+ {"lt 1.5 2.5", "true", true},
+ {"lt 1 1", "false", true},
+ {"lt 1 2", "true", true},
+ {"lt `xy` `xy`", "false", true},
+ {"lt `xy` `xyz`", "true", true},
+ {"lt .Xuint .Xuint", "false", true},
+ {"lt .Xuint .Yuint", "true", true},
+ {"le 1.5 1.5", "true", true},
+ {"le 1.5 2.5", "true", true},
+ {"le 2.5 1.5", "false", true},
+ {"le 1 1", "true", true},
+ {"le 1 2", "true", true},
+ {"le 2 1", "false", true},
+ {"le `xy` `xy`", "true", true},
+ {"le `xy` `xyz`", "true", true},
+ {"le `xyz` `xy`", "false", true},
+ {"le .Xuint .Xuint", "true", true},
+ {"le .Xuint .Yuint", "true", true},
+ {"le .Yuint .Xuint", "false", true},
+ {"gt 1.5 1.5", "false", true},
+ {"gt 1.5 2.5", "false", true},
+ {"gt 1 1", "false", true},
+ {"gt 2 1", "true", true},
+ {"gt 1 2", "false", true},
+ {"gt `xy` `xy`", "false", true},
+ {"gt `xy` `xyz`", "false", true},
+ {"gt .Xuint .Xuint", "false", true},
+ {"gt .Xuint .Yuint", "false", true},
+ {"gt .Yuint .Xuint", "true", true},
+ {"ge 1.5 1.5", "true", true},
+ {"ge 1.5 2.5", "false", true},
+ {"ge 2.5 1.5", "true", true},
+ {"ge 1 1", "true", true},
+ {"ge 1 2", "false", true},
+ {"ge 2 1", "true", true},
+ {"ge `xy` `xy`", "true", true},
+ {"ge `xy` `xyz`", "false", true},
+ {"ge `xyz` `xy`", "true", true},
+ {"ge .Xuint .Xuint", "true", true},
+ {"ge .Xuint .Yuint", "false", true},
+ {"ge .Yuint .Xuint", "true", true},
+ // Errors
+ {"eq `xy` 1", "", false}, // Different types.
+ {"lt true true", "", false}, // Unordered types.
+ {"lt 1+0i 1+0i", "", false}, // Unordered types.
+}
+
+func TestComparison(t *testing.T) {
+ b := new(bytes.Buffer)
+ var cmpStruct = struct {
+ Xuint, Yuint uint
+ }{3, 4}
+ for _, test := range cmpTests {
+ text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr)
+ tmpl, err := New("empty").Parse(text)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b.Reset()
+ err = tmpl.Execute(b, &cmpStruct)
+ if test.ok && err != nil {
+ t.Errorf("%s errored incorrectly: %s", test.expr, err)
+ continue
+ }
+ if !test.ok && err == nil {
+ t.Errorf("%s did not error", test.expr)
+ continue
+ }
+ if b.String() != test.truth {
+ t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String())
+ }
+ }
+}
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
new file mode 100644
index 000000000..e85412262
--- /dev/null
+++ b/src/text/template/funcs.go
@@ -0,0 +1,580 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+type FuncMap map[string]interface{}
+
+var builtins = FuncMap{
+ "and": and,
+ "call": call,
+ "html": HTMLEscaper,
+ "index": index,
+ "js": JSEscaper,
+ "len": length,
+ "not": not,
+ "or": or,
+ "print": fmt.Sprint,
+ "printf": fmt.Sprintf,
+ "println": fmt.Sprintln,
+ "urlquery": URLQueryEscaper,
+
+ // Comparisons
+ "eq": eq, // ==
+ "ge": ge, // >=
+ "gt": gt, // >
+ "le": le, // <=
+ "lt": lt, // <
+ "ne": ne, // !=
+}
+
+var builtinFuncs = createValueFuncs(builtins)
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+ m := make(map[string]reflect.Value)
+ addValueFuncs(m, funcMap)
+ return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+ for name, fn := range in {
+ v := reflect.ValueOf(fn)
+ if v.Kind() != reflect.Func {
+ panic("value for " + name + " not a function")
+ }
+ if !goodFunc(v.Type()) {
+ panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+ }
+ out[name] = v
+ }
+}
+
+// addFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+ for name, fn := range in {
+ out[name] = fn
+ }
+}
+
+// goodFunc checks that the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+ // We allow functions with 1 result or 2 results where the second is an error.
+ switch {
+ case typ.NumOut() == 1:
+ return true
+ case typ.NumOut() == 2 && typ.Out(1) == errorType:
+ return true
+ }
+ return false
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
+ if tmpl != nil && tmpl.common != nil {
+ if fn := tmpl.execFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ }
+ if fn := builtinFuncs[name]; fn.IsValid() {
+ return fn, true
+ }
+ return reflect.Value{}, false
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item interface{}, indices ...interface{}) (interface{}, error) {
+ v := reflect.ValueOf(item)
+ for _, i := range indices {
+ index := reflect.ValueOf(i)
+ var isNil bool
+ if v, isNil = indirect(v); isNil {
+ return nil, fmt.Errorf("index of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ var x int64
+ switch index.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x = index.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x = int64(index.Uint())
+ default:
+ return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+ }
+ if x < 0 || x >= int64(v.Len()) {
+ return nil, fmt.Errorf("index out of range: %d", x)
+ }
+ v = v.Index(int(x))
+ case reflect.Map:
+ if !index.IsValid() {
+ index = reflect.Zero(v.Type().Key())
+ }
+ if !index.Type().AssignableTo(v.Type().Key()) {
+ return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
+ }
+ if x := v.MapIndex(index); x.IsValid() {
+ v = x
+ } else {
+ v = reflect.Zero(v.Type().Elem())
+ }
+ default:
+ return nil, fmt.Errorf("can't index item of type %s", v.Type())
+ }
+ }
+ return v.Interface(), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item interface{}) (int, error) {
+ v, isNil := indirect(reflect.ValueOf(item))
+ if isNil {
+ return 0, fmt.Errorf("len of nil pointer")
+ }
+ switch v.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len(), nil
+ }
+ return 0, fmt.Errorf("len of type %s", v.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn interface{}, args ...interface{}) (interface{}, error) {
+ v := reflect.ValueOf(fn)
+ typ := v.Type()
+ if typ.Kind() != reflect.Func {
+ return nil, fmt.Errorf("non-function of type %s", typ)
+ }
+ if !goodFunc(typ) {
+ return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+ }
+ numIn := typ.NumIn()
+ var dddType reflect.Type
+ if typ.IsVariadic() {
+ if len(args) < numIn-1 {
+ return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+ }
+ dddType = typ.In(numIn - 1).Elem()
+ } else {
+ if len(args) != numIn {
+ return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+ }
+ }
+ argv := make([]reflect.Value, len(args))
+ for i, arg := range args {
+ value := reflect.ValueOf(arg)
+ // Compute the expected type. Clumsy because of variadics.
+ var argType reflect.Type
+ if !typ.IsVariadic() || i < numIn-1 {
+ argType = typ.In(i)
+ } else {
+ argType = dddType
+ }
+ if !value.IsValid() && canBeNil(argType) {
+ value = reflect.Zero(argType)
+ }
+ if !value.Type().AssignableTo(argType) {
+ return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
+ }
+ argv[i] = value
+ }
+ result := v.Call(argv)
+ if len(result) == 2 && !result[1].IsNil() {
+ return result[0].Interface(), result[1].Interface().(error)
+ }
+ return result[0].Interface(), nil
+}
+
+// Boolean logic.
+
+func truth(a interface{}) bool {
+ t, _ := isTrue(reflect.ValueOf(a))
+ return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 interface{}, args ...interface{}) interface{} {
+ if !truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if !truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 interface{}, args ...interface{}) interface{} {
+ if truth(arg0) {
+ return arg0
+ }
+ for i := range args {
+ arg0 = args[i]
+ if truth(arg0) {
+ break
+ }
+ }
+ return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg interface{}) (truth bool) {
+ truth, _ = isTrue(reflect.ValueOf(arg))
+ return !truth
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+ errBadComparisonType = errors.New("invalid type for comparison")
+ errBadComparison = errors.New("incompatible types for comparison")
+ errNoComparison = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+ invalidKind kind = iota
+ boolKind
+ complexKind
+ intKind
+ floatKind
+ integerKind
+ stringKind
+ uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+ switch v.Kind() {
+ case reflect.Bool:
+ return boolKind, nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intKind, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintKind, nil
+ case reflect.Float32, reflect.Float64:
+ return floatKind, nil
+ case reflect.Complex64, reflect.Complex128:
+ return complexKind, nil
+ case reflect.String:
+ return stringKind, nil
+ }
+ return invalidKind, errBadComparisonType
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
+ v1 := reflect.ValueOf(arg1)
+ k1, err := basicKind(v1)
+ if err != nil {
+ return false, err
+ }
+ if len(arg2) == 0 {
+ return false, errNoComparison
+ }
+ for _, arg := range arg2 {
+ v2 := reflect.ValueOf(arg)
+ k2, err := basicKind(v2)
+ if err != nil {
+ return false, err
+ }
+ if k1 != k2 {
+ return false, errBadComparison
+ }
+ truth := false
+ switch k1 {
+ case boolKind:
+ truth = v1.Bool() == v2.Bool()
+ case complexKind:
+ truth = v1.Complex() == v2.Complex()
+ case floatKind:
+ truth = v1.Float() == v2.Float()
+ case intKind:
+ truth = v1.Int() == v2.Int()
+ case stringKind:
+ truth = v1.String() == v2.String()
+ case uintKind:
+ truth = v1.Uint() == v2.Uint()
+ default:
+ panic("invalid kind")
+ }
+ if truth {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 interface{}) (bool, error) {
+ // != is the inverse of ==.
+ equal, err := eq(arg1, arg2)
+ return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 interface{}) (bool, error) {
+ v1 := reflect.ValueOf(arg1)
+ k1, err := basicKind(v1)
+ if err != nil {
+ return false, err
+ }
+ v2 := reflect.ValueOf(arg2)
+ k2, err := basicKind(v2)
+ if err != nil {
+ return false, err
+ }
+ if k1 != k2 {
+ return false, errBadComparison
+ }
+ truth := false
+ switch k1 {
+ case boolKind, complexKind:
+ return false, errBadComparisonType
+ case floatKind:
+ truth = v1.Float() < v2.Float()
+ case intKind:
+ truth = v1.Int() < v2.Int()
+ case stringKind:
+ truth = v1.String() < v2.String()
+ case uintKind:
+ truth = v1.Uint() < v2.Uint()
+ default:
+ panic("invalid kind")
+ }
+ return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 interface{}) (bool, error) {
+ // <= is < or ==.
+ lessThan, err := lt(arg1, arg2)
+ if lessThan || err != nil {
+ return lessThan, err
+ }
+ return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 interface{}) (bool, error) {
+ // > is the inverse of <=.
+ lessOrEqual, err := le(arg1, arg2)
+ if err != nil {
+ return false, err
+ }
+ return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 interface{}) (bool, error) {
+ // >= is the inverse of <.
+ lessThan, err := lt(arg1, arg2)
+ if err != nil {
+ return false, err
+ }
+ return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+ htmlQuot = []byte("&#34;") // shorter than "&quot;"
+ htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+ htmlAmp = []byte("&amp;")
+ htmlLt = []byte("&lt;")
+ htmlGt = []byte("&gt;")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+ last := 0
+ for i, c := range b {
+ var html []byte
+ switch c {
+ case '"':
+ html = htmlQuot
+ case '\'':
+ html = htmlApos
+ case '&':
+ html = htmlAmp
+ case '<':
+ html = htmlLt
+ case '>':
+ html = htmlGt
+ default:
+ continue
+ }
+ w.Write(b[last:i])
+ w.Write(html)
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexAny(s, `'"&<>`) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ HTMLEscape(&b, []byte(s))
+ return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+ return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+ jsLowUni = []byte(`\u00`)
+ hex = []byte("0123456789ABCDEF")
+
+ jsBackslash = []byte(`\\`)
+ jsApos = []byte(`\'`)
+ jsQuot = []byte(`\"`)
+ jsLt = []byte(`\x3C`)
+ jsGt = []byte(`\x3E`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+ last := 0
+ for i := 0; i < len(b); i++ {
+ c := b[i]
+
+ if !jsIsSpecial(rune(c)) {
+ // fast path: nothing to do
+ continue
+ }
+ w.Write(b[last:i])
+
+ if c < utf8.RuneSelf {
+ // Quotes, slashes and angle brackets get quoted.
+ // Control characters get written as \u00XX.
+ switch c {
+ case '\\':
+ w.Write(jsBackslash)
+ case '\'':
+ w.Write(jsApos)
+ case '"':
+ w.Write(jsQuot)
+ case '<':
+ w.Write(jsLt)
+ case '>':
+ w.Write(jsGt)
+ default:
+ w.Write(jsLowUni)
+ t, b := c>>4, c&0x0f
+ w.Write(hex[t : t+1])
+ w.Write(hex[b : b+1])
+ }
+ } else {
+ // Unicode rune.
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.IsPrint(r) {
+ w.Write(b[i : i+size])
+ } else {
+ fmt.Fprintf(w, "\\u%04X", r)
+ }
+ i += size - 1
+ }
+ last = i + 1
+ }
+ w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+ // Avoid allocation if we can.
+ if strings.IndexFunc(s, jsIsSpecial) < 0 {
+ return s
+ }
+ var b bytes.Buffer
+ JSEscape(&b, []byte(s))
+ return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+ switch r {
+ case '\\', '\'', '"', '<', '>':
+ return true
+ }
+ return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+ return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+ return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+// fmt.Sprint(args...)
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []interface{}) string {
+ ok := false
+ var s string
+ // Fast path for simple common case.
+ if len(args) == 1 {
+ s, ok = args[0].(string)
+ }
+ if !ok {
+ for i, arg := range args {
+ a, ok := printableValue(reflect.ValueOf(arg))
+ if ok {
+ args[i] = a
+ } // else left fmt do its thing
+ }
+ s = fmt.Sprint(args...)
+ }
+ return s
+}
diff --git a/src/text/template/helper.go b/src/text/template/helper.go
new file mode 100644
index 000000000..3636fb54d
--- /dev/null
+++ b/src/text/template/helper.go
@@ -0,0 +1,108 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions to make constructing templates easier.
+
+package template
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+)
+
+// Functions and methods to parse templates.
+
+// Must is a helper that wraps a call to a function returning (*Template, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+// var t = template.Must(template.New("name").Parse("text"))
+func Must(t *Template, err error) *Template {
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// ParseFiles creates a new Template and parses the template definitions from
+// the named files. The returned template's name will have the (base) name and
+// (parsed) contents of the first file. There must be at least one file.
+// If an error occurs, parsing stops and the returned *Template is nil.
+func ParseFiles(filenames ...string) (*Template, error) {
+ return parseFiles(nil, filenames...)
+}
+
+// ParseFiles parses the named files and associates the resulting templates with
+// t. If an error occurs, parsing stops and the returned template is nil;
+// otherwise it is t. There must be at least one file.
+func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
+ return parseFiles(t, filenames...)
+}
+
+// parseFiles is the helper for the method and function. If the argument
+// template is nil, it is created from the first file.
+func parseFiles(t *Template, filenames ...string) (*Template, error) {
+ if len(filenames) == 0 {
+ // Not really a problem, but be consistent.
+ return nil, fmt.Errorf("template: no files named in call to ParseFiles")
+ }
+ for _, filename := range filenames {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ s := string(b)
+ name := filepath.Base(filename)
+ // First template becomes return value if not already defined,
+ // and we use that one for subsequent New calls to associate
+ // all the templates together. Also, if this file has the same name
+ // as t, this file becomes the contents of t, so
+ // t, err := New(name).Funcs(xxx).ParseFiles(name)
+ // works. Otherwise we create a new template associated with t.
+ var tmpl *Template
+ if t == nil {
+ t = New(name)
+ }
+ if name == t.Name() {
+ tmpl = t
+ } else {
+ tmpl = t.New(name)
+ }
+ _, err = tmpl.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return t, nil
+}
+
+// ParseGlob creates a new Template and parses the template definitions from the
+// files identified by the pattern, which must match at least one file. The
+// returned template will have the (base) name and (parsed) contents of the
+// first file matched by the pattern. ParseGlob is equivalent to calling
+// ParseFiles with the list of files matched by the pattern.
+func ParseGlob(pattern string) (*Template, error) {
+ return parseGlob(nil, pattern)
+}
+
+// ParseGlob parses the template definitions in the files identified by the
+// pattern and associates the resulting templates with t. The pattern is
+// processed by filepath.Glob and must match at least one file. ParseGlob is
+// equivalent to calling t.ParseFiles with the list of files matched by the
+// pattern.
+func (t *Template) ParseGlob(pattern string) (*Template, error) {
+ return parseGlob(t, pattern)
+}
+
+// parseGlob is the implementation of the function and method ParseGlob.
+func parseGlob(t *Template, pattern string) (*Template, error) {
+ filenames, err := filepath.Glob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if len(filenames) == 0 {
+ return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
+ }
+ return parseFiles(t, filenames...)
+}
diff --git a/src/text/template/multi_test.go b/src/text/template/multi_test.go
new file mode 100644
index 000000000..e4e804880
--- /dev/null
+++ b/src/text/template/multi_test.go
@@ -0,0 +1,292 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+// Tests for mulitple-template parsing and execution.
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+ "text/template/parse"
+)
+
+const (
+ noError = true
+ hasError = false
+)
+
+type multiParseTest struct {
+ name string
+ input string
+ ok bool
+ names []string
+ results []string
+}
+
+var multiParseTests = []multiParseTest{
+ {"empty", "", noError,
+ nil,
+ nil},
+ {"one", `{{define "foo"}} FOO {{end}}`, noError,
+ []string{"foo"},
+ []string{" FOO "}},
+ {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError,
+ []string{"foo", "bar"},
+ []string{" FOO ", " BAR "}},
+ // errors
+ {"missing end", `{{define "foo"}} FOO `, hasError,
+ nil,
+ nil},
+ {"malformed name", `{{define "foo}} FOO `, hasError,
+ nil,
+ nil},
+}
+
+func TestMultiParse(t *testing.T) {
+ for _, test := range multiParseTests {
+ template, err := New("root").Parse(test.input)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ if template == nil {
+ continue
+ }
+ if len(template.tmpl) != len(test.names)+1 { // +1 for root
+ t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl))
+ continue
+ }
+ for i, name := range test.names {
+ tmpl, ok := template.tmpl[name]
+ if !ok {
+ t.Errorf("%s: can't find template %q", test.name, name)
+ continue
+ }
+ result := tmpl.Root.String()
+ if result != test.results[i] {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i])
+ }
+ }
+ }
+}
+
+var multiExecTests = []execTest{
+ {"empty", "", "", nil, true},
+ {"text", "some text", "some text", nil, true},
+ {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true},
+ {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true},
+ {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true},
+ {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true},
+ {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true},
+ {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true},
+ {"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true},
+
+ // User-defined function: test argument evaluator.
+ {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true},
+ {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true},
+}
+
+// These strings are also in testdata/*.
+const multiText1 = `
+ {{define "x"}}TEXT{{end}}
+ {{define "dotV"}}{{.V}}{{end}}
+`
+
+const multiText2 = `
+ {{define "dot"}}{{.}}{{end}}
+ {{define "nested"}}{{template "dot" .}}{{end}}
+`
+
+func TestMultiExecute(t *testing.T) {
+ // Declare a couple of templates first.
+ template, err := New("root").Parse(multiText1)
+ if err != nil {
+ t.Fatalf("parse error for 1: %s", err)
+ }
+ _, err = template.Parse(multiText2)
+ if err != nil {
+ t.Fatalf("parse error for 2: %s", err)
+ }
+ testExecute(multiExecTests, template, t)
+}
+
+func TestParseFiles(t *testing.T) {
+ _, err := ParseFiles("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ template := New("root")
+ _, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(multiExecTests, template, t)
+}
+
+func TestParseGlob(t *testing.T) {
+ _, err := ParseGlob("DOES NOT EXIST")
+ if err == nil {
+ t.Error("expected error for non-existent file; got none")
+ }
+ _, err = New("error").ParseGlob("[x")
+ if err == nil {
+ t.Error("expected error for bad pattern; got none")
+ }
+ template := New("root")
+ _, err = template.ParseGlob("testdata/file*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(multiExecTests, template, t)
+}
+
+// In these tests, actual content (not just template definitions) comes from the parsed files.
+
+var templateFileExecTests = []execTest{
+ {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true},
+}
+
+func TestParseFilesWithData(t *testing.T) {
+ template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, template, t)
+}
+
+func TestParseGlobWithData(t *testing.T) {
+ template, err := New("root").ParseGlob("testdata/tmpl*.tmpl")
+ if err != nil {
+ t.Fatalf("error parsing files: %v", err)
+ }
+ testExecute(templateFileExecTests, template, t)
+}
+
+const (
+ cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}`
+ cloneText2 = `{{define "b"}}b{{end}}`
+ cloneText3 = `{{define "c"}}root{{end}}`
+ cloneText4 = `{{define "c"}}clone{{end}}`
+)
+
+func TestClone(t *testing.T) {
+ // Create some templates and clone the root.
+ root, err := New("root").Parse(cloneText1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = root.Parse(cloneText2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ clone := Must(root.Clone())
+ // Add variants to both.
+ _, err = root.Parse(cloneText3)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = clone.Parse(cloneText4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Verify that the clone is self-consistent.
+ for k, v := range clone.tmpl {
+ if k == clone.name && v.tmpl[k] != clone {
+ t.Error("clone does not contain root")
+ }
+ if v != v.tmpl[v.name] {
+ t.Errorf("clone does not contain self for %q", k)
+ }
+ }
+ // Execute root.
+ var b bytes.Buffer
+ err = root.ExecuteTemplate(&b, "a", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b.String() != "broot" {
+ t.Errorf("expected %q got %q", "broot", b.String())
+ }
+ // Execute copy.
+ b.Reset()
+ err = clone.ExecuteTemplate(&b, "a", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b.String() != "bclone" {
+ t.Errorf("expected %q got %q", "bclone", b.String())
+ }
+}
+
+func TestAddParseTree(t *testing.T) {
+ // Create some templates.
+ root, err := New("root").Parse(cloneText1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = root.Parse(cloneText2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Add a new parse tree.
+ tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins)
+ if err != nil {
+ t.Fatal(err)
+ }
+ added, err := root.AddParseTree("c", tree["c"])
+ // Execute.
+ var b bytes.Buffer
+ err = added.ExecuteTemplate(&b, "a", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if b.String() != "broot" {
+ t.Errorf("expected %q got %q", "broot", b.String())
+ }
+}
+
+// Issue 7032
+func TestAddParseTreeToUnparsedTemplate(t *testing.T) {
+ master := "{{define \"master\"}}{{end}}"
+ tmpl := New("master")
+ tree, err := parse.Parse("master", master, "", "", nil)
+ if err != nil {
+ t.Fatalf("unexpected parse err: %v", err)
+ }
+ masterTree := tree["master"]
+ tmpl.AddParseTree("master", masterTree) // used to panic
+}
+
+func TestRedefinition(t *testing.T) {
+ var tmpl *Template
+ var err error
+ if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil {
+ t.Fatalf("parse 1: %v", err)
+ }
+ if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err == nil {
+ t.Fatal("expected error")
+ }
+ if !strings.Contains(err.Error(), "redefinition") {
+ t.Fatalf("expected redefinition error; got %v", err)
+ }
+ if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err == nil {
+ t.Fatal("expected error")
+ }
+ if !strings.Contains(err.Error(), "redefinition") {
+ t.Fatalf("expected redefinition error; got %v", err)
+ }
+}
diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go
new file mode 100644
index 000000000..1674aaf9c
--- /dev/null
+++ b/src/text/template/parse/lex.go
@@ -0,0 +1,551 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos Pos // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case i.typ > itemKeyword:
+ return fmt.Sprintf("<%s>", i.val)
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemBool // boolean constant
+ itemChar // printable ASCII character; grab bag for comma etc.
+ itemCharConstant // character constant
+ itemComplex // complex constant (1+2i); imaginary is just a number
+ itemColonEquals // colon-equals (':=') introducing a declaration
+ itemEOF
+ itemField // alphanumeric identifier starting with '.'
+ itemIdentifier // alphanumeric identifier not starting with '.'
+ itemLeftDelim // left action delimiter
+ itemLeftParen // '(' inside action
+ itemNumber // simple number, including imaginary
+ itemPipe // pipe symbol
+ itemRawString // raw quoted string (includes quotes)
+ itemRightDelim // right action delimiter
+ itemRightParen // ')' inside action
+ itemSpace // run of spaces separating arguments
+ itemString // quoted string (includes quotes)
+ itemText // plain text
+ itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
+ // Keywords appear after all the rest.
+ itemKeyword // used only to delimit the keywords
+ itemDot // the cursor, spelled '.'
+ itemDefine // define keyword
+ itemElse // else keyword
+ itemEnd // end keyword
+ itemIf // if keyword
+ itemNil // the untyped nil constant, easiest to treat as a keyword
+ itemRange // range keyword
+ itemTemplate // template keyword
+ itemWith // with keyword
+)
+
+var key = map[string]itemType{
+ ".": itemDot,
+ "define": itemDefine,
+ "else": itemElse,
+ "end": itemEnd,
+ "if": itemIf,
+ "range": itemRange,
+ "nil": itemNil,
+ "template": itemTemplate,
+ "with": itemWith,
+}
+
+const eof = -1
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ name string // the name of the input; used only for error reports
+ input string // the string being scanned
+ leftDelim string // start of action
+ rightDelim string // end of action
+ state stateFn // the next lexing function to enter
+ pos Pos // current position in the input
+ start Pos // start position of this item
+ width Pos // width of last rune read from input
+ lastPos Pos // position of most recent item returned by nextItem
+ items chan item // channel of scanned items
+ parenDepth int // nesting depth of ( ) exprs
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if int(l.pos) >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = Pos(w)
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ l.items <- item{t, l.start, l.input[l.start:l.pos]}
+ l.start = l.pos
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.IndexRune(valid, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ item := <-l.items
+ l.lastPos = item.pos
+ return item
+}
+
+// lex creates a new scanner for the input string.
+func lex(name, input, left, right string) *lexer {
+ if left == "" {
+ left = leftDelim
+ }
+ if right == "" {
+ right = rightDelim
+ }
+ l := &lexer{
+ name: name,
+ input: input,
+ leftDelim: left,
+ rightDelim: right,
+ items: make(chan item),
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for l.state = lexText; l.state != nil; {
+ l.state = l.state(l)
+ }
+}
+
+// state functions
+
+const (
+ leftDelim = "{{"
+ rightDelim = "}}"
+ leftComment = "/*"
+ rightComment = "*/"
+)
+
+// lexText scans until an opening action delimiter, "{{".
+func lexText(l *lexer) stateFn {
+ for {
+ if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ return lexLeftDelim
+ }
+ if l.next() == eof {
+ break
+ }
+ }
+ // Correctly reached EOF.
+ if l.pos > l.start {
+ l.emit(itemText)
+ }
+ l.emit(itemEOF)
+ return nil
+}
+
+// lexLeftDelim scans the left delimiter, which is known to be present.
+func lexLeftDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.leftDelim))
+ if strings.HasPrefix(l.input[l.pos:], leftComment) {
+ return lexComment
+ }
+ l.emit(itemLeftDelim)
+ l.parenDepth = 0
+ return lexInsideAction
+}
+
+// lexComment scans a comment. The left comment marker is known to be present.
+func lexComment(l *lexer) stateFn {
+ l.pos += Pos(len(leftComment))
+ i := strings.Index(l.input[l.pos:], rightComment)
+ if i < 0 {
+ return l.errorf("unclosed comment")
+ }
+ l.pos += Pos(i + len(rightComment))
+ if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ return l.errorf("comment ends before closing delimiter")
+
+ }
+ l.pos += Pos(len(l.rightDelim))
+ l.ignore()
+ return lexText
+}
+
+// lexRightDelim scans the right delimiter, which is known to be present.
+func lexRightDelim(l *lexer) stateFn {
+ l.pos += Pos(len(l.rightDelim))
+ l.emit(itemRightDelim)
+ return lexText
+}
+
+// lexInsideAction scans the elements inside action delimiters.
+func lexInsideAction(l *lexer) stateFn {
+ // Either number, quoted string, or identifier.
+ // Spaces separate arguments; runs of spaces turn into itemSpace.
+ // Pipe symbols separate and are emitted.
+ if strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
+ if l.parenDepth == 0 {
+ return lexRightDelim
+ }
+ return l.errorf("unclosed left paren")
+ }
+ switch r := l.next(); {
+ case r == eof || isEndOfLine(r):
+ return l.errorf("unclosed action")
+ case isSpace(r):
+ return lexSpace
+ case r == ':':
+ if l.next() != '=' {
+ return l.errorf("expected :=")
+ }
+ l.emit(itemColonEquals)
+ case r == '|':
+ l.emit(itemPipe)
+ case r == '"':
+ return lexQuote
+ case r == '`':
+ return lexRawQuote
+ case r == '$':
+ return lexVariable
+ case r == '\'':
+ return lexChar
+ case r == '.':
+ // special look-ahead for ".field" so we don't break l.backup().
+ if l.pos < Pos(len(l.input)) {
+ r := l.input[l.pos]
+ if r < '0' || '9' < r {
+ return lexField
+ }
+ }
+ fallthrough // '.' can start a number.
+ case r == '+' || r == '-' || ('0' <= r && r <= '9'):
+ l.backup()
+ return lexNumber
+ case isAlphaNumeric(r):
+ l.backup()
+ return lexIdentifier
+ case r == '(':
+ l.emit(itemLeftParen)
+ l.parenDepth++
+ return lexInsideAction
+ case r == ')':
+ l.emit(itemRightParen)
+ l.parenDepth--
+ if l.parenDepth < 0 {
+ return l.errorf("unexpected right paren %#U", r)
+ }
+ return lexInsideAction
+ case r <= unicode.MaxASCII && unicode.IsPrint(r):
+ l.emit(itemChar)
+ return lexInsideAction
+ default:
+ return l.errorf("unrecognized character in action: %#U", r)
+ }
+ return lexInsideAction
+}
+
+// lexSpace scans a run of space characters.
+// One space has already been seen.
+func lexSpace(l *lexer) stateFn {
+ for isSpace(l.peek()) {
+ l.next()
+ }
+ l.emit(itemSpace)
+ return lexInsideAction
+}
+
+// lexIdentifier scans an alphanumeric.
+func lexIdentifier(l *lexer) stateFn {
+Loop:
+ for {
+ switch r := l.next(); {
+ case isAlphaNumeric(r):
+ // absorb.
+ default:
+ l.backup()
+ word := l.input[l.start:l.pos]
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ switch {
+ case key[word] > itemKeyword:
+ l.emit(key[word])
+ case word[0] == '.':
+ l.emit(itemField)
+ case word == "true", word == "false":
+ l.emit(itemBool)
+ default:
+ l.emit(itemIdentifier)
+ }
+ break Loop
+ }
+ }
+ return lexInsideAction
+}
+
+// lexField scans a field: .Alphanumeric.
+// The . has been scanned.
+func lexField(l *lexer) stateFn {
+ return lexFieldOrVariable(l, itemField)
+}
+
+// lexVariable scans a Variable: $Alphanumeric.
+// The $ has been scanned.
+func lexVariable(l *lexer) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "$".
+ l.emit(itemVariable)
+ return lexInsideAction
+ }
+ return lexFieldOrVariable(l, itemVariable)
+}
+
+// lexVariable scans a field or variable: [.$]Alphanumeric.
+// The . or $ has been scanned.
+func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
+ if l.atTerminator() { // Nothing interesting follows -> "." or "$".
+ if typ == itemVariable {
+ l.emit(itemVariable)
+ } else {
+ l.emit(itemDot)
+ }
+ return lexInsideAction
+ }
+ var r rune
+ for {
+ r = l.next()
+ if !isAlphaNumeric(r) {
+ l.backup()
+ break
+ }
+ }
+ if !l.atTerminator() {
+ return l.errorf("bad character %#U", r)
+ }
+ l.emit(typ)
+ return lexInsideAction
+}
+
+// atTerminator reports whether the input is at valid termination character to
+// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
+// like "$x+2" not being acceptable without a space, in case we decide one
+// day to implement arithmetic.
+func (l *lexer) atTerminator() bool {
+ r := l.peek()
+ if isSpace(r) || isEndOfLine(r) {
+ return true
+ }
+ switch r {
+ case eof, '.', ',', '|', ':', ')', '(':
+ return true
+ }
+ // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
+ // succeed but should fail) but only in extremely rare cases caused by willfully
+ // bad choice of delimiter.
+ if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
+ return true
+ }
+ return false
+}
+
+// lexChar scans a character constant. The initial quote is already
+// scanned. Syntax checking is done by the parser.
+func lexChar(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated character constant")
+ case '\'':
+ break Loop
+ }
+ }
+ l.emit(itemCharConstant)
+ return lexInsideAction
+}
+
+// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
+// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
+// and "089" - but when it's wrong the input is invalid and the parser (via
+// strconv) will notice.
+func lexNumber(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ if sign := l.peek(); sign == '+' || sign == '-' {
+ // Complex: 1+2i. No spaces, must end in 'i'.
+ if !l.scanNumber() || l.input[l.pos-1] != 'i' {
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(itemComplex)
+ } else {
+ l.emit(itemNumber)
+ }
+ return lexInsideAction
+}
+
+func (l *lexer) scanNumber() bool {
+ // Optional leading sign.
+ l.accept("+-")
+ // Is it hex?
+ digits := "0123456789"
+ if l.accept("0") && l.accept("xX") {
+ digits = "0123456789abcdefABCDEF"
+ }
+ l.acceptRun(digits)
+ if l.accept(".") {
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun("0123456789")
+ }
+ // Is it imaginary?
+ l.accept("i")
+ // Next thing mustn't be alphanumeric.
+ if isAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+// lexQuote scans a quoted string.
+func lexQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != eof && r != '\n' {
+ break
+ }
+ fallthrough
+ case eof, '\n':
+ return l.errorf("unterminated quoted string")
+ case '"':
+ break Loop
+ }
+ }
+ l.emit(itemString)
+ return lexInsideAction
+}
+
+// lexRawQuote scans a raw quoted string.
+func lexRawQuote(l *lexer) stateFn {
+Loop:
+ for {
+ switch l.next() {
+ case eof, '\n':
+ return l.errorf("unterminated raw quoted string")
+ case '`':
+ break Loop
+ }
+ }
+ l.emit(itemRawString)
+ return lexInsideAction
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+// isEndOfLine reports whether r is an end-of-line character.
+func isEndOfLine(r rune) bool {
+ return r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+ return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
diff --git a/src/text/template/parse/lex_test.go b/src/text/template/parse/lex_test.go
new file mode 100644
index 000000000..d251ccffb
--- /dev/null
+++ b/src/text/template/parse/lex_test.go
@@ -0,0 +1,465 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "fmt"
+ "testing"
+)
+
+// Make the types prettyprint.
+var itemName = map[itemType]string{
+ itemError: "error",
+ itemBool: "bool",
+ itemChar: "char",
+ itemCharConstant: "charconst",
+ itemComplex: "complex",
+ itemColonEquals: ":=",
+ itemEOF: "EOF",
+ itemField: "field",
+ itemIdentifier: "identifier",
+ itemLeftDelim: "left delim",
+ itemLeftParen: "(",
+ itemNumber: "number",
+ itemPipe: "pipe",
+ itemRawString: "raw string",
+ itemRightDelim: "right delim",
+ itemRightParen: ")",
+ itemSpace: "space",
+ itemString: "string",
+ itemVariable: "variable",
+
+ // keywords
+ itemDot: ".",
+ itemDefine: "define",
+ itemElse: "else",
+ itemIf: "if",
+ itemEnd: "end",
+ itemNil: "nil",
+ itemRange: "range",
+ itemTemplate: "template",
+ itemWith: "with",
+}
+
+func (i itemType) String() string {
+ s := itemName[i]
+ if s == "" {
+ return fmt.Sprintf("item%d", int(i))
+ }
+ return s
+}
+
+type lexTest struct {
+ name string
+ input string
+ items []item
+}
+
+var (
+ tEOF = item{itemEOF, 0, ""}
+ tFor = item{itemIdentifier, 0, "for"}
+ tLeft = item{itemLeftDelim, 0, "{{"}
+ tLpar = item{itemLeftParen, 0, "("}
+ tPipe = item{itemPipe, 0, "|"}
+ tQuote = item{itemString, 0, `"abc \n\t\" "`}
+ tRange = item{itemRange, 0, "range"}
+ tRight = item{itemRightDelim, 0, "}}"}
+ tRpar = item{itemRightParen, 0, ")"}
+ tSpace = item{itemSpace, 0, " "}
+ raw = "`" + `abc\n\t\" ` + "`"
+ tRawQuote = item{itemRawString, 0, raw}
+)
+
+var lexTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"spaces", " \t\n", []item{{itemText, 0, " \t\n"}, tEOF}},
+ {"text", `now is the time`, []item{{itemText, 0, "now is the time"}, tEOF}},
+ {"text with comment", "hello-{{/* this is a comment */}}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemText, 0, "-world"},
+ tEOF,
+ }},
+ {"punctuation", "{{,@% }}", []item{
+ tLeft,
+ {itemChar, 0, ","},
+ {itemChar, 0, "@"},
+ {itemChar, 0, "%"},
+ tSpace,
+ tRight,
+ tEOF,
+ }},
+ {"parens", "{{((3))}}", []item{
+ tLeft,
+ tLpar,
+ tLpar,
+ {itemNumber, 0, "3"},
+ tRpar,
+ tRpar,
+ tRight,
+ tEOF,
+ }},
+ {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}},
+ {"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}},
+ {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}},
+ {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}},
+ {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{
+ tLeft,
+ {itemNumber, 0, "1"},
+ tSpace,
+ {itemNumber, 0, "02"},
+ tSpace,
+ {itemNumber, 0, "0x14"},
+ tSpace,
+ {itemNumber, 0, "-7.2i"},
+ tSpace,
+ {itemNumber, 0, "1e3"},
+ tSpace,
+ {itemNumber, 0, "+1.2e-4"},
+ tSpace,
+ {itemNumber, 0, "4.2i"},
+ tSpace,
+ {itemComplex, 0, "1+2i"},
+ tRight,
+ tEOF,
+ }},
+ {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{
+ tLeft,
+ {itemCharConstant, 0, `'a'`},
+ tSpace,
+ {itemCharConstant, 0, `'\n'`},
+ tSpace,
+ {itemCharConstant, 0, `'\''`},
+ tSpace,
+ {itemCharConstant, 0, `'\\'`},
+ tSpace,
+ {itemCharConstant, 0, `'\u00FF'`},
+ tSpace,
+ {itemCharConstant, 0, `'\xFF'`},
+ tSpace,
+ {itemCharConstant, 0, `'本'`},
+ tRight,
+ tEOF,
+ }},
+ {"bools", "{{true false}}", []item{
+ tLeft,
+ {itemBool, 0, "true"},
+ tSpace,
+ {itemBool, 0, "false"},
+ tRight,
+ tEOF,
+ }},
+ {"dot", "{{.}}", []item{
+ tLeft,
+ {itemDot, 0, "."},
+ tRight,
+ tEOF,
+ }},
+ {"nil", "{{nil}}", []item{
+ tLeft,
+ {itemNil, 0, "nil"},
+ tRight,
+ tEOF,
+ }},
+ {"dots", "{{.x . .2 .x.y.z}}", []item{
+ tLeft,
+ {itemField, 0, ".x"},
+ tSpace,
+ {itemDot, 0, "."},
+ tSpace,
+ {itemNumber, 0, ".2"},
+ tSpace,
+ {itemField, 0, ".x"},
+ {itemField, 0, ".y"},
+ {itemField, 0, ".z"},
+ tRight,
+ tEOF,
+ }},
+ {"keywords", "{{range if else end with}}", []item{
+ tLeft,
+ {itemRange, 0, "range"},
+ tSpace,
+ {itemIf, 0, "if"},
+ tSpace,
+ {itemElse, 0, "else"},
+ tSpace,
+ {itemEnd, 0, "end"},
+ tSpace,
+ {itemWith, 0, "with"},
+ tRight,
+ tEOF,
+ }},
+ {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{
+ tLeft,
+ {itemVariable, 0, "$c"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemIdentifier, 0, "printf"},
+ tSpace,
+ {itemVariable, 0, "$"},
+ tSpace,
+ {itemVariable, 0, "$hello"},
+ tSpace,
+ {itemVariable, 0, "$23"},
+ tSpace,
+ {itemVariable, 0, "$"},
+ tSpace,
+ {itemVariable, 0, "$var"},
+ {itemField, 0, ".Field"},
+ tSpace,
+ {itemField, 0, ".Method"},
+ tRight,
+ tEOF,
+ }},
+ {"variable invocation", "{{$x 23}}", []item{
+ tLeft,
+ {itemVariable, 0, "$x"},
+ tSpace,
+ {itemNumber, 0, "23"},
+ tRight,
+ tEOF,
+ }},
+ {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{
+ {itemText, 0, "intro "},
+ tLeft,
+ {itemIdentifier, 0, "echo"},
+ tSpace,
+ {itemIdentifier, 0, "hi"},
+ tSpace,
+ {itemNumber, 0, "1.2"},
+ tSpace,
+ tPipe,
+ {itemIdentifier, 0, "noargs"},
+ tPipe,
+ {itemIdentifier, 0, "args"},
+ tSpace,
+ {itemNumber, 0, "1"},
+ tSpace,
+ {itemString, 0, `"hi"`},
+ tRight,
+ {itemText, 0, " outro"},
+ tEOF,
+ }},
+ {"declaration", "{{$v := 3}}", []item{
+ tLeft,
+ {itemVariable, 0, "$v"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemNumber, 0, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"2 declarations", "{{$v , $w := 3}}", []item{
+ tLeft,
+ {itemVariable, 0, "$v"},
+ tSpace,
+ {itemChar, 0, ","},
+ tSpace,
+ {itemVariable, 0, "$w"},
+ tSpace,
+ {itemColonEquals, 0, ":="},
+ tSpace,
+ {itemNumber, 0, "3"},
+ tRight,
+ tEOF,
+ }},
+ {"field of parenthesized expression", "{{(.X).Y}}", []item{
+ tLeft,
+ tLpar,
+ {itemField, 0, ".X"},
+ tRpar,
+ {itemField, 0, ".Y"},
+ tRight,
+ tEOF,
+ }},
+ // errors
+ {"badchar", "#{{\x01}}", []item{
+ {itemText, 0, "#"},
+ tLeft,
+ {itemError, 0, "unrecognized character in action: U+0001"},
+ }},
+ {"unclosed action", "{{\n}}", []item{
+ tLeft,
+ {itemError, 0, "unclosed action"},
+ }},
+ {"EOF in action", "{{range", []item{
+ tLeft,
+ tRange,
+ {itemError, 0, "unclosed action"},
+ }},
+ {"unclosed quote", "{{\"\n\"}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated quoted string"},
+ }},
+ {"unclosed raw quote", "{{`xx\n`}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated raw quoted string"},
+ }},
+ {"unclosed char constant", "{{'\n}}", []item{
+ tLeft,
+ {itemError, 0, "unterminated character constant"},
+ }},
+ {"bad number", "{{3k}}", []item{
+ tLeft,
+ {itemError, 0, `bad number syntax: "3k"`},
+ }},
+ {"unclosed paren", "{{(3}}", []item{
+ tLeft,
+ tLpar,
+ {itemNumber, 0, "3"},
+ {itemError, 0, `unclosed left paren`},
+ }},
+ {"extra right paren", "{{3)}}", []item{
+ tLeft,
+ {itemNumber, 0, "3"},
+ tRpar,
+ {itemError, 0, `unexpected right paren U+0029 ')'`},
+ }},
+
+ // Fixed bugs
+ // Many elements in an action blew the lookahead until
+ // we made lexInsideAction not loop.
+ {"long pipeline deadlock", "{{|||||}}", []item{
+ tLeft,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tPipe,
+ tRight,
+ tEOF,
+ }},
+ {"text with bad comment", "hello-{{/*/}}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemError, 0, `unclosed comment`},
+ }},
+ {"text with comment close separted from delim", "hello-{{/* */ }}-world", []item{
+ {itemText, 0, "hello-"},
+ {itemError, 0, `comment ends before closing delimiter`},
+ }},
+ // This one is an error that we can't catch because it breaks templates with
+ // minimized JavaScript. Should have fixed it before Go 1.1.
+ {"unmatched right delimiter", "hello-{.}}-world", []item{
+ {itemText, 0, "hello-{.}}-world"},
+ tEOF,
+ }},
+}
+
+// collect gathers the emitted items into a slice.
+func collect(t *lexTest, left, right string) (items []item) {
+ l := lex(t.name, t.input, left, right)
+ for {
+ item := l.nextItem()
+ items = append(items, item)
+ if item.typ == itemEOF || item.typ == itemError {
+ break
+ }
+ }
+ return
+}
+
+func equal(i1, i2 []item, checkPos bool) bool {
+ if len(i1) != len(i2) {
+ return false
+ }
+ for k := range i1 {
+ if i1[k].typ != i2[k].typ {
+ return false
+ }
+ if i1[k].val != i2[k].val {
+ return false
+ }
+ if checkPos && i1[k].pos != i2[k].pos {
+ return false
+ }
+ }
+ return true
+}
+
+func TestLex(t *testing.T) {
+ for _, test := range lexTests {
+ items := collect(&test, "", "")
+ if !equal(items, test.items, false) {
+ t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+// Some easy cases from above, but with delimiters $$ and @@
+var lexDelimTests = []lexTest{
+ {"punctuation", "$$,@%{{}}@@", []item{
+ tLeftDelim,
+ {itemChar, 0, ","},
+ {itemChar, 0, "@"},
+ {itemChar, 0, "%"},
+ {itemChar, 0, "{"},
+ {itemChar, 0, "{"},
+ {itemChar, 0, "}"},
+ {itemChar, 0, "}"},
+ tRightDelim,
+ tEOF,
+ }},
+ {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}},
+ {"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}},
+ {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}},
+ {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}},
+}
+
+var (
+ tLeftDelim = item{itemLeftDelim, 0, "$$"}
+ tRightDelim = item{itemRightDelim, 0, "@@"}
+)
+
+func TestDelims(t *testing.T) {
+ for _, test := range lexDelimTests {
+ items := collect(&test, "$$", "@@")
+ if !equal(items, test.items, false) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ }
+ }
+}
+
+var lexPosTests = []lexTest{
+ {"empty", "", []item{tEOF}},
+ {"punctuation", "{{,@%#}}", []item{
+ {itemLeftDelim, 0, "{{"},
+ {itemChar, 2, ","},
+ {itemChar, 3, "@"},
+ {itemChar, 4, "%"},
+ {itemChar, 5, "#"},
+ {itemRightDelim, 6, "}}"},
+ {itemEOF, 8, ""},
+ }},
+ {"sample", "0123{{hello}}xyz", []item{
+ {itemText, 0, "0123"},
+ {itemLeftDelim, 4, "{{"},
+ {itemIdentifier, 6, "hello"},
+ {itemRightDelim, 11, "}}"},
+ {itemText, 13, "xyz"},
+ {itemEOF, 16, ""},
+ }},
+}
+
+// The other tests don't check position, to make the test cases easier to construct.
+// This one does.
+func TestPos(t *testing.T) {
+ for _, test := range lexPosTests {
+ items := collect(&test, "", "")
+ if !equal(items, test.items, true) {
+ t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items)
+ if len(items) == len(test.items) {
+ // Detailed print; avoid item.String() to expose the position value.
+ for i := range items {
+ if !equal(items[i:i+1], test.items[i:i+1], true) {
+ i1 := items[i]
+ i2 := test.items[i]
+ t.Errorf("\t#%d: got {%v %d %q} expected {%v %d %q}", i, i1.typ, i1.pos, i1.val, i2.typ, i2.pos, i2.val)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go
new file mode 100644
index 000000000..55c37f6db
--- /dev/null
+++ b/src/text/template/parse/node.go
@@ -0,0 +1,834 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parse nodes.
+
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var textFormat = "%s" // Changed to "%q" in tests for better error messages.
+
+// A Node is an element in the parse tree. The interface is trivial.
+// The interface contains an unexported method so that only
+// types local to this package can satisfy it.
+type Node interface {
+ Type() NodeType
+ String() string
+ // Copy does a deep copy of the Node and all its components.
+ // To avoid type assertions, some XxxNodes also have specialized
+ // CopyXxx methods that return *XxxNode.
+ Copy() Node
+ Position() Pos // byte position of start of node in full original input string
+ // tree returns the containing *Tree.
+ // It is unexported so all implementations of Node are in this package.
+ tree() *Tree
+}
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Pos represents a byte position in the original input text from which
+// this template was parsed.
+type Pos int
+
+func (p Pos) Position() Pos {
+ return p
+}
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+const (
+ NodeText NodeType = iota // Plain text.
+ NodeAction // A non-control action such as a field evaluation.
+ NodeBool // A boolean constant.
+ NodeChain // A sequence of field accesses.
+ NodeCommand // An element of a pipeline.
+ NodeDot // The cursor, dot.
+ nodeElse // An else action. Not added to tree.
+ nodeEnd // An end action. Not added to tree.
+ NodeField // A field or method name.
+ NodeIdentifier // An identifier; always a function name.
+ NodeIf // An if action.
+ NodeList // A list of Nodes.
+ NodeNil // An untyped nil constant.
+ NodeNumber // A numerical constant.
+ NodePipe // A pipeline of commands.
+ NodeRange // A range action.
+ NodeString // A string constant.
+ NodeTemplate // A template invocation action.
+ NodeVariable // A $ variable.
+ NodeWith // A with action.
+)
+
+// Nodes.
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Nodes []Node // The element nodes in lexical order.
+}
+
+func (t *Tree) newList(pos Pos) *ListNode {
+ return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
+}
+
+func (l *ListNode) append(n Node) {
+ l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) tree() *Tree {
+ return l.tr
+}
+
+func (l *ListNode) String() string {
+ b := new(bytes.Buffer)
+ for _, n := range l.Nodes {
+ fmt.Fprint(b, n)
+ }
+ return b.String()
+}
+
+func (l *ListNode) CopyList() *ListNode {
+ if l == nil {
+ return l
+ }
+ n := l.tr.newList(l.Pos)
+ for _, elem := range l.Nodes {
+ n.append(elem.Copy())
+ }
+ return n
+}
+
+func (l *ListNode) Copy() Node {
+ return l.CopyList()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Text []byte // The text; may span newlines.
+}
+
+func (t *Tree) newText(pos Pos, text string) *TextNode {
+ return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
+}
+
+func (t *TextNode) String() string {
+ return fmt.Sprintf(textFormat, t.Text)
+}
+
+func (t *TextNode) tree() *Tree {
+ return t.tr
+}
+
+func (t *TextNode) Copy() Node {
+ return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
+}
+
+// PipeNode holds a pipeline with optional declaration
+type PipeNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Decl []*VariableNode // Variable declarations in lexical order.
+ Cmds []*CommandNode // The commands in lexical order.
+}
+
+func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
+ return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
+}
+
+func (p *PipeNode) append(command *CommandNode) {
+ p.Cmds = append(p.Cmds, command)
+}
+
+func (p *PipeNode) String() string {
+ s := ""
+ if len(p.Decl) > 0 {
+ for i, v := range p.Decl {
+ if i > 0 {
+ s += ", "
+ }
+ s += v.String()
+ }
+ s += " := "
+ }
+ for i, c := range p.Cmds {
+ if i > 0 {
+ s += " | "
+ }
+ s += c.String()
+ }
+ return s
+}
+
+func (p *PipeNode) tree() *Tree {
+ return p.tr
+}
+
+func (p *PipeNode) CopyPipe() *PipeNode {
+ if p == nil {
+ return p
+ }
+ var decl []*VariableNode
+ for _, d := range p.Decl {
+ decl = append(decl, d.Copy().(*VariableNode))
+ }
+ n := p.tr.newPipeline(p.Pos, p.Line, decl)
+ for _, c := range p.Cmds {
+ n.append(c.Copy().(*CommandNode))
+ }
+ return n
+}
+
+func (p *PipeNode) Copy() Node {
+ return p.CopyPipe()
+}
+
+// ActionNode holds an action (something bounded by delimiters).
+// Control actions have their own nodes; ActionNode represents simple
+// ones such as field evaluations and parenthesized pipelines.
+type ActionNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Pipe *PipeNode // The pipeline in the action.
+}
+
+func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
+ return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
+}
+
+func (a *ActionNode) String() string {
+ return fmt.Sprintf("{{%s}}", a.Pipe)
+
+}
+
+func (a *ActionNode) tree() *Tree {
+ return a.tr
+}
+
+func (a *ActionNode) Copy() Node {
+ return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
+
+}
+
+// CommandNode holds a command (a pipeline inside an evaluating action).
+type CommandNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Args []Node // Arguments in lexical order: Identifier, field, or constant.
+}
+
+func (t *Tree) newCommand(pos Pos) *CommandNode {
+ return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
+}
+
+func (c *CommandNode) append(arg Node) {
+ c.Args = append(c.Args, arg)
+}
+
+func (c *CommandNode) String() string {
+ s := ""
+ for i, arg := range c.Args {
+ if i > 0 {
+ s += " "
+ }
+ if arg, ok := arg.(*PipeNode); ok {
+ s += "(" + arg.String() + ")"
+ continue
+ }
+ s += arg.String()
+ }
+ return s
+}
+
+func (c *CommandNode) tree() *Tree {
+ return c.tr
+}
+
+func (c *CommandNode) Copy() Node {
+ if c == nil {
+ return c
+ }
+ n := c.tr.newCommand(c.Pos)
+ for _, c := range c.Args {
+ n.append(c.Copy())
+ }
+ return n
+}
+
+// IdentifierNode holds an identifier.
+type IdentifierNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident string // The identifier's name.
+}
+
+// NewIdentifier returns a new IdentifierNode with the given identifier name.
+func NewIdentifier(ident string) *IdentifierNode {
+ return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
+}
+
+// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
+ i.Pos = pos
+ return i
+}
+
+// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
+// Chained for convenience.
+// TODO: fix one day?
+func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
+ i.tr = t
+ return i
+}
+
+func (i *IdentifierNode) String() string {
+ return i.Ident
+}
+
+func (i *IdentifierNode) tree() *Tree {
+ return i.tr
+}
+
+func (i *IdentifierNode) Copy() Node {
+ return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
+}
+
+// VariableNode holds a list of variable names, possibly with chained field
+// accesses. The dollar sign is part of the (first) name.
+type VariableNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident []string // Variable name and fields in lexical order.
+}
+
+func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
+ return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
+}
+
+func (v *VariableNode) String() string {
+ s := ""
+ for i, id := range v.Ident {
+ if i > 0 {
+ s += "."
+ }
+ s += id
+ }
+ return s
+}
+
+func (v *VariableNode) tree() *Tree {
+ return v.tr
+}
+
+func (v *VariableNode) Copy() Node {
+ return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
+}
+
+// DotNode holds the special identifier '.'.
+type DotNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newDot(pos Pos) *DotNode {
+ return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
+}
+
+func (d *DotNode) Type() NodeType {
+ // Override method on embedded NodeType for API compatibility.
+ // TODO: Not really a problem; could change API without effect but
+ // api tool complains.
+ return NodeDot
+}
+
+func (d *DotNode) String() string {
+ return "."
+}
+
+func (d *DotNode) tree() *Tree {
+ return d.tr
+}
+
+func (d *DotNode) Copy() Node {
+ return d.tr.newDot(d.Pos)
+}
+
+// NilNode holds the special identifier 'nil' representing an untyped nil constant.
+type NilNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newNil(pos Pos) *NilNode {
+ return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
+}
+
+func (n *NilNode) Type() NodeType {
+ // Override method on embedded NodeType for API compatibility.
+ // TODO: Not really a problem; could change API without effect but
+ // api tool complains.
+ return NodeNil
+}
+
+func (n *NilNode) String() string {
+ return "nil"
+}
+
+func (n *NilNode) tree() *Tree {
+ return n.tr
+}
+
+func (n *NilNode) Copy() Node {
+ return n.tr.newNil(n.Pos)
+}
+
+// FieldNode holds a field (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The period is dropped from each ident.
+type FieldNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Ident []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newField(pos Pos, ident string) *FieldNode {
+ return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
+}
+
+func (f *FieldNode) String() string {
+ s := ""
+ for _, id := range f.Ident {
+ s += "." + id
+ }
+ return s
+}
+
+func (f *FieldNode) tree() *Tree {
+ return f.tr
+}
+
+func (f *FieldNode) Copy() Node {
+ return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
+}
+
+// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
+// The names may be chained ('.x.y').
+// The periods are dropped from each ident.
+type ChainNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Node Node
+ Field []string // The identifiers in lexical order.
+}
+
+func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
+ return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
+}
+
+// Add adds the named field (which should start with a period) to the end of the chain.
+func (c *ChainNode) Add(field string) {
+ if len(field) == 0 || field[0] != '.' {
+ panic("no dot in field")
+ }
+ field = field[1:] // Remove leading dot.
+ if field == "" {
+ panic("empty field")
+ }
+ c.Field = append(c.Field, field)
+}
+
+func (c *ChainNode) String() string {
+ s := c.Node.String()
+ if _, ok := c.Node.(*PipeNode); ok {
+ s = "(" + s + ")"
+ }
+ for _, field := range c.Field {
+ s += "." + field
+ }
+ return s
+}
+
+func (c *ChainNode) tree() *Tree {
+ return c.tr
+}
+
+func (c *ChainNode) Copy() Node {
+ return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
+}
+
+// BoolNode holds a boolean constant.
+type BoolNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ True bool // The value of the boolean constant.
+}
+
+func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
+ return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
+}
+
+func (b *BoolNode) String() string {
+ if b.True {
+ return "true"
+ }
+ return "false"
+}
+
+func (b *BoolNode) tree() *Tree {
+ return b.tr
+}
+
+func (b *BoolNode) Copy() Node {
+ return b.tr.newBool(b.Pos, b.True)
+}
+
+// NumberNode holds a number: signed or unsigned integer, float, or complex.
+// The value is parsed and stored under all the types that can represent the value.
+// This simulates in a small amount of code the behavior of Go's ideal constants.
+type NumberNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ IsInt bool // Number has an integral value.
+ IsUint bool // Number has an unsigned integral value.
+ IsFloat bool // Number has a floating-point value.
+ IsComplex bool // Number is complex.
+ Int64 int64 // The signed integer value.
+ Uint64 uint64 // The unsigned integer value.
+ Float64 float64 // The floating-point value.
+ Complex128 complex128 // The complex value.
+ Text string // The original textual representation from the input.
+}
+
+func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
+ n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
+ switch typ {
+ case itemCharConstant:
+ rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
+ if err != nil {
+ return nil, err
+ }
+ if tail != "'" {
+ return nil, fmt.Errorf("malformed character constant: %s", text)
+ }
+ n.Int64 = int64(rune)
+ n.IsInt = true
+ n.Uint64 = uint64(rune)
+ n.IsUint = true
+ n.Float64 = float64(rune) // odd but those are the rules.
+ n.IsFloat = true
+ return n, nil
+ case itemComplex:
+ // fmt.Sscan can parse the pair, so let it do the work.
+ if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
+ return nil, err
+ }
+ n.IsComplex = true
+ n.simplifyComplex()
+ return n, nil
+ }
+ // Imaginary constants can only be complex unless they are zero.
+ if len(text) > 0 && text[len(text)-1] == 'i' {
+ f, err := strconv.ParseFloat(text[:len(text)-1], 64)
+ if err == nil {
+ n.IsComplex = true
+ n.Complex128 = complex(0, f)
+ n.simplifyComplex()
+ return n, nil
+ }
+ }
+ // Do integer test first so we get 0x123 etc.
+ u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
+ if err == nil {
+ n.IsUint = true
+ n.Uint64 = u
+ }
+ i, err := strconv.ParseInt(text, 0, 64)
+ if err == nil {
+ n.IsInt = true
+ n.Int64 = i
+ if i == 0 {
+ n.IsUint = true // in case of -0.
+ n.Uint64 = u
+ }
+ }
+ // If an integer extraction succeeded, promote the float.
+ if n.IsInt {
+ n.IsFloat = true
+ n.Float64 = float64(n.Int64)
+ } else if n.IsUint {
+ n.IsFloat = true
+ n.Float64 = float64(n.Uint64)
+ } else {
+ f, err := strconv.ParseFloat(text, 64)
+ if err == nil {
+ n.IsFloat = true
+ n.Float64 = f
+ // If a floating-point extraction succeeded, extract the int if needed.
+ if !n.IsInt && float64(int64(f)) == f {
+ n.IsInt = true
+ n.Int64 = int64(f)
+ }
+ if !n.IsUint && float64(uint64(f)) == f {
+ n.IsUint = true
+ n.Uint64 = uint64(f)
+ }
+ }
+ }
+ if !n.IsInt && !n.IsUint && !n.IsFloat {
+ return nil, fmt.Errorf("illegal number syntax: %q", text)
+ }
+ return n, nil
+}
+
+// simplifyComplex pulls out any other types that are represented by the complex number.
+// These all require that the imaginary part be zero.
+func (n *NumberNode) simplifyComplex() {
+ n.IsFloat = imag(n.Complex128) == 0
+ if n.IsFloat {
+ n.Float64 = real(n.Complex128)
+ n.IsInt = float64(int64(n.Float64)) == n.Float64
+ if n.IsInt {
+ n.Int64 = int64(n.Float64)
+ }
+ n.IsUint = float64(uint64(n.Float64)) == n.Float64
+ if n.IsUint {
+ n.Uint64 = uint64(n.Float64)
+ }
+ }
+}
+
+func (n *NumberNode) String() string {
+ return n.Text
+}
+
+func (n *NumberNode) tree() *Tree {
+ return n.tr
+}
+
+func (n *NumberNode) Copy() Node {
+ nn := new(NumberNode)
+ *nn = *n // Easy, fast, correct.
+ return nn
+}
+
+// StringNode holds a string constant. The value has been "unquoted".
+type StringNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Quoted string // The original text of the string, with quotes.
+ Text string // The string, after quote processing.
+}
+
+func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
+ return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
+}
+
+func (s *StringNode) String() string {
+ return s.Quoted
+}
+
+func (s *StringNode) tree() *Tree {
+ return s.tr
+}
+
+func (s *StringNode) Copy() Node {
+ return s.tr.newString(s.Pos, s.Quoted, s.Text)
+}
+
+// endNode represents an {{end}} action.
+// It does not appear in the final parse tree.
+type endNode struct {
+ NodeType
+ Pos
+ tr *Tree
+}
+
+func (t *Tree) newEnd(pos Pos) *endNode {
+ return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
+}
+
+func (e *endNode) String() string {
+ return "{{end}}"
+}
+
+func (e *endNode) tree() *Tree {
+ return e.tr
+}
+
+func (e *endNode) Copy() Node {
+ return e.tr.newEnd(e.Pos)
+}
+
+// elseNode represents an {{else}} action. Does not appear in the final tree.
+type elseNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+}
+
+func (t *Tree) newElse(pos Pos, line int) *elseNode {
+ return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
+}
+
+func (e *elseNode) Type() NodeType {
+ return nodeElse
+}
+
+func (e *elseNode) String() string {
+ return "{{else}}"
+}
+
+func (e *elseNode) tree() *Tree {
+ return e.tr
+}
+
+func (e *elseNode) Copy() Node {
+ return e.tr.newElse(e.Pos, e.Line)
+}
+
+// BranchNode is the common representation of if, range, and with.
+type BranchNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Pipe *PipeNode // The pipeline to be evaluated.
+ List *ListNode // What to execute if the value is non-empty.
+ ElseList *ListNode // What to execute if the value is empty (nil if absent).
+}
+
+func (b *BranchNode) String() string {
+ name := ""
+ switch b.NodeType {
+ case NodeIf:
+ name = "if"
+ case NodeRange:
+ name = "range"
+ case NodeWith:
+ name = "with"
+ default:
+ panic("unknown branch type")
+ }
+ if b.ElseList != nil {
+ return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
+ }
+ return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
+}
+
+func (b *BranchNode) tree() *Tree {
+ return b.tr
+}
+
+func (b *BranchNode) Copy() Node {
+ switch b.NodeType {
+ case NodeIf:
+ return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ case NodeRange:
+ return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ case NodeWith:
+ return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
+ default:
+ panic("unknown branch type")
+ }
+}
+
+// IfNode represents an {{if}} action and its commands.
+type IfNode struct {
+ BranchNode
+}
+
+func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
+ return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (i *IfNode) Copy() Node {
+ return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
+}
+
+// RangeNode represents a {{range}} action and its commands.
+type RangeNode struct {
+ BranchNode
+}
+
+func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
+ return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (r *RangeNode) Copy() Node {
+ return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
+}
+
+// WithNode represents a {{with}} action and its commands.
+type WithNode struct {
+ BranchNode
+}
+
+func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
+ return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
+}
+
+func (w *WithNode) Copy() Node {
+ return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
+}
+
+// TemplateNode represents a {{template}} action.
+type TemplateNode struct {
+ NodeType
+ Pos
+ tr *Tree
+ Line int // The line number in the input (deprecated; kept for compatibility)
+ Name string // The name of the template (unquoted).
+ Pipe *PipeNode // The command to evaluate as dot for the template.
+}
+
+func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
+ return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
+}
+
+func (t *TemplateNode) String() string {
+ if t.Pipe == nil {
+ return fmt.Sprintf("{{template %q}}", t.Name)
+ }
+ return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
+}
+
+func (t *TemplateNode) tree() *Tree {
+ return t.tr
+}
+
+func (t *TemplateNode) Copy() Node {
+ return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
+}
diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go
new file mode 100644
index 000000000..af33880c1
--- /dev/null
+++ b/src/text/template/parse/parse.go
@@ -0,0 +1,677 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parse builds parse trees for templates as defined by text/template
+// and html/template. Clients should use those packages to construct templates
+// rather than this one, which provides shared internal data structures not
+// intended for general use.
+package parse
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Tree is the representation of a single parsed template.
+type Tree struct {
+ Name string // name of the template represented by the tree.
+ ParseName string // name of the top-level template during parsing, for error messages.
+ Root *ListNode // top-level root of the tree.
+ text string // text parsed to create the template (or its parent)
+ // Parsing only; cleared after parse.
+ funcs []map[string]interface{}
+ lex *lexer
+ token [3]item // three-token lookahead for parser.
+ peekCount int
+ vars []string // variables defined at the moment.
+}
+
+// Copy returns a copy of the Tree. Any parsing state is discarded.
+func (t *Tree) Copy() *Tree {
+ if t == nil {
+ return nil
+ }
+ return &Tree{
+ Name: t.Name,
+ ParseName: t.ParseName,
+ Root: t.Root.CopyList(),
+ text: t.text,
+ }
+}
+
+// Parse returns a map from template name to parse.Tree, created by parsing the
+// templates described in the argument string. The top-level template will be
+// given the specified name. If an error is encountered, parsing stops and an
+// empty map is returned with the error.
+func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
+ treeSet = make(map[string]*Tree)
+ t := New(name)
+ t.text = text
+ _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
+ return
+}
+
+// next returns the next token.
+func (t *Tree) next() item {
+ if t.peekCount > 0 {
+ t.peekCount--
+ } else {
+ t.token[0] = t.lex.nextItem()
+ }
+ return t.token[t.peekCount]
+}
+
+// backup backs the input stream up one token.
+func (t *Tree) backup() {
+ t.peekCount++
+}
+
+// backup2 backs the input stream up two tokens.
+// The zeroth token is already there.
+func (t *Tree) backup2(t1 item) {
+ t.token[1] = t1
+ t.peekCount = 2
+}
+
+// backup3 backs the input stream up three tokens
+// The zeroth token is already there.
+func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
+ t.token[1] = t1
+ t.token[2] = t2
+ t.peekCount = 3
+}
+
+// peek returns but does not consume the next token.
+func (t *Tree) peek() item {
+ if t.peekCount > 0 {
+ return t.token[t.peekCount-1]
+ }
+ t.peekCount = 1
+ t.token[0] = t.lex.nextItem()
+ return t.token[0]
+}
+
+// nextNonSpace returns the next non-space token.
+func (t *Tree) nextNonSpace() (token item) {
+ for {
+ token = t.next()
+ if token.typ != itemSpace {
+ break
+ }
+ }
+ return token
+}
+
+// peekNonSpace returns but does not consume the next non-space token.
+func (t *Tree) peekNonSpace() (token item) {
+ for {
+ token = t.next()
+ if token.typ != itemSpace {
+ break
+ }
+ }
+ t.backup()
+ return token
+}
+
+// Parsing.
+
+// New allocates a new parse tree with the given name.
+func New(name string, funcs ...map[string]interface{}) *Tree {
+ return &Tree{
+ Name: name,
+ funcs: funcs,
+ }
+}
+
+// ErrorContext returns a textual representation of the location of the node in the input text.
+// The receiver is only used when the node does not have a pointer to the tree inside,
+// which can occur in old code.
+func (t *Tree) ErrorContext(n Node) (location, context string) {
+ pos := int(n.Position())
+ tree := n.tree()
+ if tree == nil {
+ tree = t
+ }
+ text := tree.text[:pos]
+ byteNum := strings.LastIndex(text, "\n")
+ if byteNum == -1 {
+ byteNum = pos // On first line.
+ } else {
+ byteNum++ // After the newline.
+ byteNum = pos - byteNum
+ }
+ lineNum := 1 + strings.Count(text, "\n")
+ context = n.String()
+ if len(context) > 20 {
+ context = fmt.Sprintf("%.20s...", context)
+ }
+ return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
+}
+
+// errorf formats the error and terminates processing.
+func (t *Tree) errorf(format string, args ...interface{}) {
+ t.Root = nil
+ format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+// error terminates processing.
+func (t *Tree) error(err error) {
+ t.errorf("%s", err)
+}
+
+// expect consumes the next token and guarantees it has the required type.
+func (t *Tree) expect(expected itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// expectOneOf consumes the next token and guarantees it has one of the required types.
+func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
+ token := t.nextNonSpace()
+ if token.typ != expected1 && token.typ != expected2 {
+ t.unexpected(token, context)
+ }
+ return token
+}
+
+// unexpected complains about the token and terminates processing.
+func (t *Tree) unexpected(token item, context string) {
+ t.errorf("unexpected %s in %s", token, context)
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (t *Tree) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ if t != nil {
+ t.stopParse()
+ }
+ *errp = e.(error)
+ }
+ return
+}
+
+// startParse initializes the parser, using the lexer.
+func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
+ t.Root = nil
+ t.lex = lex
+ t.vars = []string{"$"}
+ t.funcs = funcs
+}
+
+// stopParse terminates parsing.
+func (t *Tree) stopParse() {
+ t.lex = nil
+ t.vars = nil
+ t.funcs = nil
+}
+
+// Parse parses the template definition string to construct a representation of
+// the template for execution. If either action delimiter string is empty, the
+// default ("{{" or "}}") is used. Embedded template definitions are added to
+// the treeSet map.
+func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
+ defer t.recover(&err)
+ t.ParseName = t.Name
+ t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
+ t.text = text
+ t.parse(treeSet)
+ t.add(treeSet)
+ t.stopParse()
+ return t, nil
+}
+
+// add adds tree to the treeSet.
+func (t *Tree) add(treeSet map[string]*Tree) {
+ tree := treeSet[t.Name]
+ if tree == nil || IsEmptyTree(tree.Root) {
+ treeSet[t.Name] = t
+ return
+ }
+ if !IsEmptyTree(t.Root) {
+ t.errorf("template: multiple definition of template %q", t.Name)
+ }
+}
+
+// IsEmptyTree reports whether this tree (node) is empty of everything but space.
+func IsEmptyTree(n Node) bool {
+ switch n := n.(type) {
+ case nil:
+ return true
+ case *ActionNode:
+ case *IfNode:
+ case *ListNode:
+ for _, node := range n.Nodes {
+ if !IsEmptyTree(node) {
+ return false
+ }
+ }
+ return true
+ case *RangeNode:
+ case *TemplateNode:
+ case *TextNode:
+ return len(bytes.TrimSpace(n.Text)) == 0
+ case *WithNode:
+ default:
+ panic("unknown node: " + n.String())
+ }
+ return false
+}
+
+// parse is the top-level parser for a template, essentially the same
+// as itemList except it also parses {{define}} actions.
+// It runs to EOF.
+func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
+ t.Root = t.newList(t.peek().pos)
+ for t.peek().typ != itemEOF {
+ if t.peek().typ == itemLeftDelim {
+ delim := t.next()
+ if t.nextNonSpace().typ == itemDefine {
+ newT := New("definition") // name will be updated once we know it.
+ newT.text = t.text
+ newT.ParseName = t.ParseName
+ newT.startParse(t.funcs, t.lex)
+ newT.parseDefinition(treeSet)
+ continue
+ }
+ t.backup2(delim)
+ }
+ n := t.textOrAction()
+ if n.Type() == nodeEnd {
+ t.errorf("unexpected %s", n)
+ }
+ t.Root.append(n)
+ }
+ return nil
+}
+
+// parseDefinition parses a {{define}} ... {{end}} template definition and
+// installs the definition in the treeSet map. The "define" keyword has already
+// been scanned.
+func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
+ const context = "define clause"
+ name := t.expectOneOf(itemString, itemRawString, context)
+ var err error
+ t.Name, err = strconv.Unquote(name.val)
+ if err != nil {
+ t.error(err)
+ }
+ t.expect(itemRightDelim, context)
+ var end Node
+ t.Root, end = t.itemList()
+ if end.Type() != nodeEnd {
+ t.errorf("unexpected %s in %s", end, context)
+ }
+ t.add(treeSet)
+ t.stopParse()
+}
+
+// itemList:
+// textOrAction*
+// Terminates at {{end}} or {{else}}, returned separately.
+func (t *Tree) itemList() (list *ListNode, next Node) {
+ list = t.newList(t.peekNonSpace().pos)
+ for t.peekNonSpace().typ != itemEOF {
+ n := t.textOrAction()
+ switch n.Type() {
+ case nodeEnd, nodeElse:
+ return list, n
+ }
+ list.append(n)
+ }
+ t.errorf("unexpected EOF")
+ return
+}
+
+// textOrAction:
+// text | action
+func (t *Tree) textOrAction() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemText:
+ return t.newText(token.pos, token.val)
+ case itemLeftDelim:
+ return t.action()
+ default:
+ t.unexpected(token, "input")
+ }
+ return nil
+}
+
+// Action:
+// control
+// command ("|" command)*
+// Left delim is past. Now get actions.
+// First word could be a keyword such as range.
+func (t *Tree) action() (n Node) {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemElse:
+ return t.elseControl()
+ case itemEnd:
+ return t.endControl()
+ case itemIf:
+ return t.ifControl()
+ case itemRange:
+ return t.rangeControl()
+ case itemTemplate:
+ return t.templateControl()
+ case itemWith:
+ return t.withControl()
+ }
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
+}
+
+// Pipeline:
+// declarations? command ('|' command)*
+func (t *Tree) pipeline(context string) (pipe *PipeNode) {
+ var decl []*VariableNode
+ pos := t.peekNonSpace().pos
+ // Are there declarations?
+ for {
+ if v := t.peekNonSpace(); v.typ == itemVariable {
+ t.next()
+ // Since space is a token, we need 3-token look-ahead here in the worst case:
+ // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
+ // argument variable rather than a declaration. So remember the token
+ // adjacent to the variable so we can push it back if necessary.
+ tokenAfterVariable := t.peek()
+ if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
+ t.nextNonSpace()
+ variable := t.newVariable(v.pos, v.val)
+ decl = append(decl, variable)
+ t.vars = append(t.vars, v.val)
+ if next.typ == itemChar && next.val == "," {
+ if context == "range" && len(decl) < 2 {
+ continue
+ }
+ t.errorf("too many declarations in %s", context)
+ }
+ } else if tokenAfterVariable.typ == itemSpace {
+ t.backup3(v, tokenAfterVariable)
+ } else {
+ t.backup2(v)
+ }
+ }
+ break
+ }
+ pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
+ for {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemRightDelim, itemRightParen:
+ if len(pipe.Cmds) == 0 {
+ t.errorf("missing value for %s", context)
+ }
+ if token.typ == itemRightParen {
+ t.backup()
+ }
+ return
+ case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
+ itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
+ t.backup()
+ pipe.append(t.command())
+ default:
+ t.unexpected(token, context)
+ }
+ }
+}
+
+func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
+ defer t.popVars(len(t.vars))
+ line = t.lex.lineNumber()
+ pipe = t.pipeline(context)
+ var next Node
+ list, next = t.itemList()
+ switch next.Type() {
+ case nodeEnd: //done
+ case nodeElse:
+ if allowElseIf {
+ // Special case for "else if". If the "else" is followed immediately by an "if",
+ // the elseControl will have left the "if" token pending. Treat
+ // {{if a}}_{{else if b}}_{{end}}
+ // as
+ // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
+ // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
+ // is assumed. This technique works even for long if-else-if chains.
+ // TODO: Should we allow else-if in with and range?
+ if t.peek().typ == itemIf {
+ t.next() // Consume the "if" token.
+ elseList = t.newList(next.Position())
+ elseList.append(t.ifControl())
+ // Do not consume the next item - only one {{end}} required.
+ break
+ }
+ }
+ elseList, next = t.itemList()
+ if next.Type() != nodeEnd {
+ t.errorf("expected end; found %s", next)
+ }
+ }
+ return pipe.Position(), line, pipe, list, elseList
+}
+
+// If:
+// {{if pipeline}} itemList {{end}}
+// {{if pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) ifControl() Node {
+ return t.newIf(t.parseControl(true, "if"))
+}
+
+// Range:
+// {{range pipeline}} itemList {{end}}
+// {{range pipeline}} itemList {{else}} itemList {{end}}
+// Range keyword is past.
+func (t *Tree) rangeControl() Node {
+ return t.newRange(t.parseControl(false, "range"))
+}
+
+// With:
+// {{with pipeline}} itemList {{end}}
+// {{with pipeline}} itemList {{else}} itemList {{end}}
+// If keyword is past.
+func (t *Tree) withControl() Node {
+ return t.newWith(t.parseControl(false, "with"))
+}
+
+// End:
+// {{end}}
+// End keyword is past.
+func (t *Tree) endControl() Node {
+ return t.newEnd(t.expect(itemRightDelim, "end").pos)
+}
+
+// Else:
+// {{else}}
+// Else keyword is past.
+func (t *Tree) elseControl() Node {
+ // Special case for "else if".
+ peek := t.peekNonSpace()
+ if peek.typ == itemIf {
+ // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
+ return t.newElse(peek.pos, t.lex.lineNumber())
+ }
+ return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
+}
+
+// Template:
+// {{template stringValue pipeline}}
+// Template keyword is past. The name must be something that can evaluate
+// to a string.
+func (t *Tree) templateControl() Node {
+ var name string
+ token := t.nextNonSpace()
+ switch token.typ {
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ name = s
+ default:
+ t.unexpected(token, "template invocation")
+ }
+ var pipe *PipeNode
+ if t.nextNonSpace().typ != itemRightDelim {
+ t.backup()
+ // Do not pop variables; they persist until "end".
+ pipe = t.pipeline("template")
+ }
+ return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
+}
+
+// command:
+// operand (space operand)*
+// space-separated arguments up to a pipeline character or right delimiter.
+// we consume the pipe character but leave the right delim to terminate the action.
+func (t *Tree) command() *CommandNode {
+ cmd := t.newCommand(t.peekNonSpace().pos)
+ for {
+ t.peekNonSpace() // skip leading spaces.
+ operand := t.operand()
+ if operand != nil {
+ cmd.append(operand)
+ }
+ switch token := t.next(); token.typ {
+ case itemSpace:
+ continue
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemRightDelim, itemRightParen:
+ t.backup()
+ case itemPipe:
+ default:
+ t.errorf("unexpected %s in operand; missing space?", token)
+ }
+ break
+ }
+ if len(cmd.Args) == 0 {
+ t.errorf("empty command")
+ }
+ return cmd
+}
+
+// operand:
+// term .Field*
+// An operand is a space-separated component of a command,
+// a term possibly followed by field accesses.
+// A nil return means the next item is not an operand.
+func (t *Tree) operand() Node {
+ node := t.term()
+ if node == nil {
+ return nil
+ }
+ if t.peek().typ == itemField {
+ chain := t.newChain(t.peek().pos, node)
+ for t.peek().typ == itemField {
+ chain.Add(t.next().val)
+ }
+ // Compatibility with original API: If the term is of type NodeField
+ // or NodeVariable, just put more fields on the original.
+ // Otherwise, keep the Chain node.
+ // TODO: Switch to Chains always when we can.
+ switch node.Type() {
+ case NodeField:
+ node = t.newField(chain.Position(), chain.String())
+ case NodeVariable:
+ node = t.newVariable(chain.Position(), chain.String())
+ default:
+ node = chain
+ }
+ }
+ return node
+}
+
+// term:
+// literal (number, string, nil, boolean)
+// function (identifier)
+// .
+// .Field
+// $
+// '(' pipeline ')'
+// A term is a simple "expression".
+// A nil return means the next item is not a term.
+func (t *Tree) term() Node {
+ switch token := t.nextNonSpace(); token.typ {
+ case itemError:
+ t.errorf("%s", token.val)
+ case itemIdentifier:
+ if !t.hasFunction(token.val) {
+ t.errorf("function %q not defined", token.val)
+ }
+ return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
+ case itemDot:
+ return t.newDot(token.pos)
+ case itemNil:
+ return t.newNil(token.pos)
+ case itemVariable:
+ return t.useVar(token.pos, token.val)
+ case itemField:
+ return t.newField(token.pos, token.val)
+ case itemBool:
+ return t.newBool(token.pos, token.val == "true")
+ case itemCharConstant, itemComplex, itemNumber:
+ number, err := t.newNumber(token.pos, token.val, token.typ)
+ if err != nil {
+ t.error(err)
+ }
+ return number
+ case itemLeftParen:
+ pipe := t.pipeline("parenthesized pipeline")
+ if token := t.next(); token.typ != itemRightParen {
+ t.errorf("unclosed right paren: unexpected %s", token)
+ }
+ return pipe
+ case itemString, itemRawString:
+ s, err := strconv.Unquote(token.val)
+ if err != nil {
+ t.error(err)
+ }
+ return t.newString(token.pos, token.val, s)
+ }
+ t.backup()
+ return nil
+}
+
+// hasFunction reports if a function name exists in the Tree's maps.
+func (t *Tree) hasFunction(name string) bool {
+ for _, funcMap := range t.funcs {
+ if funcMap == nil {
+ continue
+ }
+ if funcMap[name] != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// popVars trims the variable list to the specified length
+func (t *Tree) popVars(n int) {
+ t.vars = t.vars[:n]
+}
+
+// useVar returns a node for a variable reference. It errors if the
+// variable is not defined.
+func (t *Tree) useVar(pos Pos, name string) Node {
+ v := t.newVariable(pos, name)
+ for _, varName := range t.vars {
+ if varName == v.Ident[0] {
+ return v
+ }
+ }
+ t.errorf("undefined variable %q", v.Ident[0])
+ return nil
+}
diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go
new file mode 100644
index 000000000..4a504fa7c
--- /dev/null
+++ b/src/text/template/parse/parse_test.go
@@ -0,0 +1,423 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parse
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+var debug = flag.Bool("debug", false, "show the errors produced by the main tests")
+
+type numberTest struct {
+ text string
+ isInt bool
+ isUint bool
+ isFloat bool
+ isComplex bool
+ int64
+ uint64
+ float64
+ complex128
+}
+
+var numberTests = []numberTest{
+ // basics
+ {"0", true, true, true, false, 0, 0, 0, 0},
+ {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint.
+ {"73", true, true, true, false, 73, 73, 73, 0},
+ {"073", true, true, true, false, 073, 073, 073, 0},
+ {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0},
+ {"-73", true, false, true, false, -73, 0, -73, 0},
+ {"+73", true, false, true, false, 73, 0, 73, 0},
+ {"100", true, true, true, false, 100, 100, 100, 0},
+ {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0},
+ {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0},
+ {"-1.2", false, false, true, false, 0, 0, -1.2, 0},
+ {"1e19", false, true, true, false, 0, 1e19, 1e19, 0},
+ {"-1e19", false, false, true, false, 0, 0, -1e19, 0},
+ {"4i", false, false, false, true, 0, 0, 0, 4i},
+ {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i},
+ {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal!
+ // complex with 0 imaginary are float (and maybe integer)
+ {"0i", true, true, true, true, 0, 0, 0, 0},
+ {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2},
+ {"-12+0i", true, false, true, true, -12, 0, -12, -12},
+ {"13+0i", true, true, true, true, 13, 13, 13, 13},
+ // funny bases
+ {"0123", true, true, true, false, 0123, 0123, 0123, 0},
+ {"-0x0", true, true, true, false, 0, 0, 0, 0},
+ {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0},
+ // character constants
+ {`'a'`, true, true, true, false, 'a', 'a', 'a', 0},
+ {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0},
+ {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0},
+ {`'\''`, true, true, true, false, '\'', '\'', '\'', 0},
+ {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0},
+ {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0},
+ // some broken syntax
+ {text: "+-2"},
+ {text: "0x123."},
+ {text: "1e."},
+ {text: "0xi."},
+ {text: "1+2."},
+ {text: "'x"},
+ {text: "'xx'"},
+ // Issue 8622 - 0xe parsed as floating point. Very embarrassing.
+ {"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0},
+}
+
+func TestNumberParse(t *testing.T) {
+ for _, test := range numberTests {
+ // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output
+ // because imaginary comes out as a number.
+ var c complex128
+ typ := itemNumber
+ var tree *Tree
+ if test.text[0] == '\'' {
+ typ = itemCharConstant
+ } else {
+ _, err := fmt.Sscan(test.text, &c)
+ if err == nil {
+ typ = itemComplex
+ }
+ }
+ n, err := tree.newNumber(0, test.text, typ)
+ ok := test.isInt || test.isUint || test.isFloat || test.isComplex
+ if ok && err != nil {
+ t.Errorf("unexpected error for %q: %s", test.text, err)
+ continue
+ }
+ if !ok && err == nil {
+ t.Errorf("expected error for %q", test.text)
+ continue
+ }
+ if !ok {
+ if *debug {
+ fmt.Printf("%s\n\t%s\n", test.text, err)
+ }
+ continue
+ }
+ if n.IsComplex != test.isComplex {
+ t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex)
+ }
+ if test.isInt {
+ if !n.IsInt {
+ t.Errorf("expected integer for %q", test.text)
+ }
+ if n.Int64 != test.int64 {
+ t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64)
+ }
+ } else if n.IsInt {
+ t.Errorf("did not expect integer for %q", test.text)
+ }
+ if test.isUint {
+ if !n.IsUint {
+ t.Errorf("expected unsigned integer for %q", test.text)
+ }
+ if n.Uint64 != test.uint64 {
+ t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64)
+ }
+ } else if n.IsUint {
+ t.Errorf("did not expect unsigned integer for %q", test.text)
+ }
+ if test.isFloat {
+ if !n.IsFloat {
+ t.Errorf("expected float for %q", test.text)
+ }
+ if n.Float64 != test.float64 {
+ t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64)
+ }
+ } else if n.IsFloat {
+ t.Errorf("did not expect float for %q", test.text)
+ }
+ if test.isComplex {
+ if !n.IsComplex {
+ t.Errorf("expected complex for %q", test.text)
+ }
+ if n.Complex128 != test.complex128 {
+ t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128)
+ }
+ } else if n.IsComplex {
+ t.Errorf("did not expect complex for %q", test.text)
+ }
+ }
+}
+
+type parseTest struct {
+ name string
+ input string
+ ok bool
+ result string // what the user would see in an error message.
+}
+
+const (
+ noError = true
+ hasError = false
+)
+
+var parseTests = []parseTest{
+ {"empty", "", noError,
+ ``},
+ {"comment", "{{/*\n\n\n*/}}", noError,
+ ``},
+ {"spaces", " \t\n", noError,
+ `" \t\n"`},
+ {"text", "some text", noError,
+ `"some text"`},
+ {"emptyAction", "{{}}", hasError,
+ `{{}}`},
+ {"field", "{{.X}}", noError,
+ `{{.X}}`},
+ {"simple command", "{{printf}}", noError,
+ `{{printf}}`},
+ {"$ invocation", "{{$}}", noError,
+ "{{$}}"},
+ {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError,
+ "{{with $x := 3}}{{$x 23}}{{end}}"},
+ {"variable with fields", "{{$.I}}", noError,
+ "{{$.I}}"},
+ {"multi-word command", "{{printf `%d` 23}}", noError,
+ "{{printf `%d` 23}}"},
+ {"pipeline", "{{.X|.Y}}", noError,
+ `{{.X | .Y}}`},
+ {"pipeline with decl", "{{$x := .X|.Y}}", noError,
+ `{{$x := .X | .Y}}`},
+ {"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError,
+ `{{.X (.Y .Z) (.A | .B .C) (.E)}}`},
+ {"field applied to parentheses", "{{(.Y .Z).Field}}", noError,
+ `{{(.Y .Z).Field}}`},
+ {"simple if", "{{if .X}}hello{{end}}", noError,
+ `{{if .X}}"hello"{{end}}`},
+ {"if with else", "{{if .X}}true{{else}}false{{end}}", noError,
+ `{{if .X}}"true"{{else}}"false"{{end}}`},
+ {"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError,
+ `{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`},
+ {"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError,
+ `"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`},
+ {"simple range", "{{range .X}}hello{{end}}", noError,
+ `{{range .X}}"hello"{{end}}`},
+ {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError,
+ `{{range .X.Y.Z}}"hello"{{end}}`},
+ {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError,
+ `{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`},
+ {"range with else", "{{range .X}}true{{else}}false{{end}}", noError,
+ `{{range .X}}"true"{{else}}"false"{{end}}`},
+ {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError,
+ `{{range .X | .M}}"true"{{else}}"false"{{end}}`},
+ {"range []int", "{{range .SI}}{{.}}{{end}}", noError,
+ `{{range .SI}}{{.}}{{end}}`},
+ {"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError,
+ `{{range $x := .SI}}{{.}}{{end}}`},
+ {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError,
+ `{{range $x, $y := .SI}}{{.}}{{end}}`},
+ {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError,
+ `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`},
+ {"template", "{{template `x`}}", noError,
+ `{{template "x"}}`},
+ {"template with arg", "{{template `x` .Y}}", noError,
+ `{{template "x" .Y}}`},
+ {"with", "{{with .X}}hello{{end}}", noError,
+ `{{with .X}}"hello"{{end}}`},
+ {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError,
+ `{{with .X}}"hello"{{else}}"goodbye"{{end}}`},
+ // Errors.
+ {"unclosed action", "hello{{range", hasError, ""},
+ {"unmatched end", "{{end}}", hasError, ""},
+ {"missing end", "hello{{range .x}}", hasError, ""},
+ {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""},
+ {"undefined function", "hello{{undefined}}", hasError, ""},
+ {"undefined variable", "{{$x}}", hasError, ""},
+ {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""},
+ {"variable undefined in template", "{{template $v}}", hasError, ""},
+ {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""},
+ {"template with field ref", "{{template .X}}", hasError, ""},
+ {"template with var", "{{template $v}}", hasError, ""},
+ {"invalid punctuation", "{{printf 3, 4}}", hasError, ""},
+ {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""},
+ {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""},
+ {"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""},
+ {"adjacent args", "{{printf 3`x`}}", hasError, ""},
+ {"adjacent args with .", "{{printf `x`.}}", hasError, ""},
+ {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""},
+ // Equals (and other chars) do not assignments make (yet).
+ {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"},
+ {"bug0b", "{{$x = 1}}{{$x}}", hasError, ""},
+ {"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""},
+ {"bug0d", "{{$x % 3}}{{$x}}", hasError, ""},
+ // Check the parse fails for := rather than comma.
+ {"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""},
+ // Another bug: variable read must ignore following punctuation.
+ {"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""}, // ! is just illegal here.
+ {"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""}, // $x+2 should not parse as ($x) (+2).
+ {"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space.
+}
+
+var builtins = map[string]interface{}{
+ "printf": fmt.Sprintf,
+}
+
+func testParse(doCopy bool, t *testing.T) {
+ textFormat = "%q"
+ defer func() { textFormat = "%s" }()
+ for _, test := range parseTests {
+ tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins)
+ switch {
+ case err == nil && !test.ok:
+ t.Errorf("%q: expected error; got none", test.name)
+ continue
+ case err != nil && test.ok:
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ case err != nil && !test.ok:
+ // expected error, got one
+ if *debug {
+ fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err)
+ }
+ continue
+ }
+ var result string
+ if doCopy {
+ result = tmpl.Root.Copy().String()
+ } else {
+ result = tmpl.Root.String()
+ }
+ if result != test.result {
+ t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result)
+ }
+ }
+}
+
+func TestParse(t *testing.T) {
+ testParse(false, t)
+}
+
+// Same as TestParse, but we copy the node first
+func TestParseCopy(t *testing.T) {
+ testParse(true, t)
+}
+
+type isEmptyTest struct {
+ name string
+ input string
+ empty bool
+}
+
+var isEmptyTests = []isEmptyTest{
+ {"empty", ``, true},
+ {"nonempty", `hello`, false},
+ {"spaces only", " \t\n \t\n", true},
+ {"definition", `{{define "x"}}something{{end}}`, true},
+ {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true},
+ {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false},
+ {"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false},
+}
+
+func TestIsEmpty(t *testing.T) {
+ if !IsEmptyTree(nil) {
+ t.Errorf("nil tree is not empty")
+ }
+ for _, test := range isEmptyTests {
+ tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil)
+ if err != nil {
+ t.Errorf("%q: unexpected error: %v", test.name, err)
+ continue
+ }
+ if empty := IsEmptyTree(tree.Root); empty != test.empty {
+ t.Errorf("%q: expected %t got %t", test.name, test.empty, empty)
+ }
+ }
+}
+
+func TestErrorContextWithTreeCopy(t *testing.T) {
+ tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil)
+ if err != nil {
+ t.Fatalf("unexpected tree parse failure: %v", err)
+ }
+ treeCopy := tree.Copy()
+ wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0])
+ gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0])
+ if wantLocation != gotLocation {
+ t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation)
+ }
+ if wantContext != gotContext {
+ t.Errorf("wrong error location want %q got %q", wantContext, gotContext)
+ }
+}
+
+// All failures, and the result is a string that must appear in the error message.
+var errorTests = []parseTest{
+ // Check line numbers are accurate.
+ {"unclosed1",
+ "line1\n{{",
+ hasError, `unclosed1:2: unexpected unclosed action in command`},
+ {"unclosed2",
+ "line1\n{{define `x`}}line2\n{{",
+ hasError, `unclosed2:3: unexpected unclosed action in command`},
+ // Specific errors.
+ {"function",
+ "{{foo}}",
+ hasError, `function "foo" not defined`},
+ {"comment",
+ "{{/*}}",
+ hasError, `unclosed comment`},
+ {"lparen",
+ "{{.X (1 2 3}}",
+ hasError, `unclosed left paren`},
+ {"rparen",
+ "{{.X 1 2 3)}}",
+ hasError, `unexpected ")"`},
+ {"space",
+ "{{`x`3}}",
+ hasError, `missing space?`},
+ {"idchar",
+ "{{a#}}",
+ hasError, `'#'`},
+ {"charconst",
+ "{{'a}}",
+ hasError, `unterminated character constant`},
+ {"stringconst",
+ `{{"a}}`,
+ hasError, `unterminated quoted string`},
+ {"rawstringconst",
+ "{{`a}}",
+ hasError, `unterminated raw quoted string`},
+ {"number",
+ "{{0xi}}",
+ hasError, `number syntax`},
+ {"multidefine",
+ "{{define `a`}}a{{end}}{{define `a`}}b{{end}}",
+ hasError, `multiple definition of template`},
+ {"eof",
+ "{{range .X}}",
+ hasError, `unexpected EOF`},
+ {"variable",
+ // Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration.
+ "{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}",
+ hasError, `unexpected ":="`},
+ {"multidecl",
+ "{{$a,$b,$c := 23}}",
+ hasError, `too many declarations`},
+ {"undefvar",
+ "{{$a}}",
+ hasError, `undefined variable`},
+}
+
+func TestErrors(t *testing.T) {
+ for _, test := range errorTests {
+ _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree))
+ if err == nil {
+ t.Errorf("%q: expected error", test.name)
+ continue
+ }
+ if !strings.Contains(err.Error(), test.result) {
+ t.Errorf("%q: error %q does not contain %q", test.name, err, test.result)
+ }
+ }
+}
diff --git a/src/text/template/template.go b/src/text/template/template.go
new file mode 100644
index 000000000..249d0cbfb
--- /dev/null
+++ b/src/text/template/template.go
@@ -0,0 +1,217 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package template
+
+import (
+ "fmt"
+ "reflect"
+ "text/template/parse"
+)
+
+// common holds the information shared by related templates.
+type common struct {
+ tmpl map[string]*Template
+ // We use two maps, one for parsing and one for execution.
+ // This separation makes the API cleaner since it doesn't
+ // expose reflection to the client.
+ parseFuncs FuncMap
+ execFuncs map[string]reflect.Value
+}
+
+// Template is the representation of a parsed template. The *parse.Tree
+// field is exported only for use by html/template and should be treated
+// as unexported by all other clients.
+type Template struct {
+ name string
+ *parse.Tree
+ *common
+ leftDelim string
+ rightDelim string
+}
+
+// New allocates a new template with the given name.
+func New(name string) *Template {
+ return &Template{
+ name: name,
+ }
+}
+
+// Name returns the name of the template.
+func (t *Template) Name() string {
+ return t.name
+}
+
+// New allocates a new template associated with the given one and with the same
+// delimiters. The association, which is transitive, allows one template to
+// invoke another with a {{template}} action.
+func (t *Template) New(name string) *Template {
+ t.init()
+ return &Template{
+ name: name,
+ common: t.common,
+ leftDelim: t.leftDelim,
+ rightDelim: t.rightDelim,
+ }
+}
+
+func (t *Template) init() {
+ if t.common == nil {
+ t.common = new(common)
+ t.tmpl = make(map[string]*Template)
+ t.parseFuncs = make(FuncMap)
+ t.execFuncs = make(map[string]reflect.Value)
+ }
+}
+
+// Clone returns a duplicate of the template, including all associated
+// templates. The actual representation is not copied, but the name space of
+// associated templates is, so further calls to Parse in the copy will add
+// templates to the copy but not to the original. Clone can be used to prepare
+// common templates and use them with variant definitions for other templates
+// by adding the variants after the clone is made.
+func (t *Template) Clone() (*Template, error) {
+ nt := t.copy(nil)
+ nt.init()
+ nt.tmpl[t.name] = nt
+ for k, v := range t.tmpl {
+ if k == t.name { // Already installed.
+ continue
+ }
+ // The associated templates share nt's common structure.
+ tmpl := v.copy(nt.common)
+ nt.tmpl[k] = tmpl
+ }
+ for k, v := range t.parseFuncs {
+ nt.parseFuncs[k] = v
+ }
+ for k, v := range t.execFuncs {
+ nt.execFuncs[k] = v
+ }
+ return nt, nil
+}
+
+// copy returns a shallow copy of t, with common set to the argument.
+func (t *Template) copy(c *common) *Template {
+ nt := New(t.name)
+ nt.Tree = t.Tree
+ nt.common = c
+ nt.leftDelim = t.leftDelim
+ nt.rightDelim = t.rightDelim
+ return nt
+}
+
+// AddParseTree creates a new template with the name and parse tree
+// and associates it with t.
+func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
+ if t.common != nil && t.tmpl[name] != nil {
+ return nil, fmt.Errorf("template: redefinition of template %q", name)
+ }
+ nt := t.New(name)
+ nt.Tree = tree
+ t.tmpl[name] = nt
+ return nt, nil
+}
+
+// Templates returns a slice of the templates associated with t, including t
+// itself.
+func (t *Template) Templates() []*Template {
+ if t.common == nil {
+ return nil
+ }
+ // Return a slice so we don't expose the map.
+ m := make([]*Template, 0, len(t.tmpl))
+ for _, v := range t.tmpl {
+ m = append(m, v)
+ }
+ return m
+}
+
+// Delims sets the action delimiters to the specified strings, to be used in
+// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
+// definitions will inherit the settings. An empty delimiter stands for the
+// corresponding default: {{ or }}.
+// The return value is the template, so calls can be chained.
+func (t *Template) Delims(left, right string) *Template {
+ t.leftDelim = left
+ t.rightDelim = right
+ return t
+}
+
+// Funcs adds the elements of the argument map to the template's function map.
+// It panics if a value in the map is not a function with appropriate return
+// type. However, it is legal to overwrite elements of the map. The return
+// value is the template, so calls can be chained.
+func (t *Template) Funcs(funcMap FuncMap) *Template {
+ t.init()
+ addValueFuncs(t.execFuncs, funcMap)
+ addFuncs(t.parseFuncs, funcMap)
+ return t
+}
+
+// Lookup returns the template with the given name that is associated with t,
+// or nil if there is no such template.
+func (t *Template) Lookup(name string) *Template {
+ if t.common == nil {
+ return nil
+ }
+ return t.tmpl[name]
+}
+
+// Parse parses a string into a template. Nested template definitions will be
+// associated with the top-level template t. Parse may be called multiple times
+// to parse definitions of templates to associate with t. It is an error if a
+// resulting template is non-empty (contains content other than template
+// definitions) and would replace a non-empty template with the same name.
+// (In multiple calls to Parse with the same receiver template, only one call
+// can contain text other than space, comments, and template definitions.)
+func (t *Template) Parse(text string) (*Template, error) {
+ t.init()
+ trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
+ if err != nil {
+ return nil, err
+ }
+ // Add the newly parsed trees, including the one for t, into our common structure.
+ for name, tree := range trees {
+ // If the name we parsed is the name of this template, overwrite this template.
+ // The associate method checks it's not a redefinition.
+ tmpl := t
+ if name != t.name {
+ tmpl = t.New(name)
+ }
+ // Even if t == tmpl, we need to install it in the common.tmpl map.
+ if replace, err := t.associate(tmpl, tree); err != nil {
+ return nil, err
+ } else if replace {
+ tmpl.Tree = tree
+ }
+ tmpl.leftDelim = t.leftDelim
+ tmpl.rightDelim = t.rightDelim
+ }
+ return t, nil
+}
+
+// associate installs the new template into the group of templates associated
+// with t. It is an error to reuse a name except to overwrite an empty
+// template. The two are already known to share the common structure.
+// The boolean return value reports wither to store this tree as t.Tree.
+func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
+ if new.common != t.common {
+ panic("internal error: associate not common")
+ }
+ name := new.name
+ if old := t.tmpl[name]; old != nil {
+ oldIsEmpty := parse.IsEmptyTree(old.Root)
+ newIsEmpty := parse.IsEmptyTree(tree.Root)
+ if newIsEmpty {
+ // Whether old is empty or not, new is empty; no reason to replace old.
+ return false, nil
+ }
+ if !oldIsEmpty {
+ return false, fmt.Errorf("template: redefinition of template %q", name)
+ }
+ }
+ t.tmpl[name] = new
+ return true, nil
+}
diff --git a/src/text/template/testdata/file1.tmpl b/src/text/template/testdata/file1.tmpl
new file mode 100644
index 000000000..febf9d9f8
--- /dev/null
+++ b/src/text/template/testdata/file1.tmpl
@@ -0,0 +1,2 @@
+{{define "x"}}TEXT{{end}}
+{{define "dotV"}}{{.V}}{{end}}
diff --git a/src/text/template/testdata/file2.tmpl b/src/text/template/testdata/file2.tmpl
new file mode 100644
index 000000000..39bf6fb9e
--- /dev/null
+++ b/src/text/template/testdata/file2.tmpl
@@ -0,0 +1,2 @@
+{{define "dot"}}{{.}}{{end}}
+{{define "nested"}}{{template "dot" .}}{{end}}
diff --git a/src/text/template/testdata/tmpl1.tmpl b/src/text/template/testdata/tmpl1.tmpl
new file mode 100644
index 000000000..b72b3a340
--- /dev/null
+++ b/src/text/template/testdata/tmpl1.tmpl
@@ -0,0 +1,3 @@
+template1
+{{define "x"}}x{{end}}
+{{template "y"}}
diff --git a/src/text/template/testdata/tmpl2.tmpl b/src/text/template/testdata/tmpl2.tmpl
new file mode 100644
index 000000000..16beba6e7
--- /dev/null
+++ b/src/text/template/testdata/tmpl2.tmpl
@@ -0,0 +1,3 @@
+template2
+{{define "y"}}y{{end}}
+{{template "x"}}