summaryrefslogtreecommitdiff
path: root/src/go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-09-08 00:08:51 -0400
committerRuss Cox <rsc@golang.org>2014-09-08 00:08:51 -0400
commit8528da672cc093d4dd06732819abc1f7b6b5a46e (patch)
tree334be80d4a4c85b77db6f6fdb67cbf0528cba5f5 /src/go
parent73bcb69f272cbf34ddcc9daa56427a8683b5a95d (diff)
downloadgo-8528da672cc093d4dd06732819abc1f7b6b5a46e.tar.gz
build: move package sources from src/pkg to src
Preparation was in CL 134570043. This CL contains only the effect of 'hg mv src/pkg/* src'. For more about the move, see golang.org/s/go14nopkg.
Diffstat (limited to 'src/go')
-rw-r--r--src/go/ast/ast.go995
-rw-r--r--src/go/ast/ast_test.go50
-rw-r--r--src/go/ast/commentmap.go332
-rw-r--r--src/go/ast/commentmap_test.go143
-rw-r--r--src/go/ast/example_test.go210
-rw-r--r--src/go/ast/filter.go466
-rw-r--r--src/go/ast/filter_test.go86
-rw-r--r--src/go/ast/import.go196
-rw-r--r--src/go/ast/print.go251
-rw-r--r--src/go/ast/print_test.go97
-rw-r--r--src/go/ast/resolve.go174
-rw-r--r--src/go/ast/scope.go162
-rw-r--r--src/go/ast/walk.go386
-rw-r--r--src/go/build/build.go1364
-rw-r--r--src/go/build/build_test.go205
-rw-r--r--src/go/build/deps_test.go443
-rw-r--r--src/go/build/doc.go140
-rw-r--r--src/go/build/read.go238
-rw-r--r--src/go/build/read_test.go226
-rw-r--r--src/go/build/syslist.go8
-rw-r--r--src/go/build/syslist_test.go62
-rw-r--r--src/go/build/testdata/other/file/file.go5
-rw-r--r--src/go/build/testdata/other/main.go11
-rw-r--r--src/go/doc/Makefile7
-rw-r--r--src/go/doc/comment.go480
-rw-r--r--src/go/doc/comment_test.go207
-rw-r--r--src/go/doc/doc.go111
-rw-r--r--src/go/doc/doc_test.go146
-rw-r--r--src/go/doc/example.go355
-rw-r--r--src/go/doc/example_test.go191
-rw-r--r--src/go/doc/exports.go199
-rw-r--r--src/go/doc/filter.go105
-rw-r--r--src/go/doc/headscan.go114
-rw-r--r--src/go/doc/reader.go853
-rw-r--r--src/go/doc/synopsis.go82
-rw-r--r--src/go/doc/synopsis_test.go51
-rw-r--r--src/go/doc/testdata/a.0.golden52
-rw-r--r--src/go/doc/testdata/a.1.golden52
-rw-r--r--src/go/doc/testdata/a.2.golden52
-rw-r--r--src/go/doc/testdata/a0.go40
-rw-r--r--src/go/doc/testdata/a1.go12
-rw-r--r--src/go/doc/testdata/b.0.golden71
-rw-r--r--src/go/doc/testdata/b.1.golden83
-rw-r--r--src/go/doc/testdata/b.2.golden71
-rw-r--r--src/go/doc/testdata/b.go58
-rw-r--r--src/go/doc/testdata/benchmark.go293
-rw-r--r--src/go/doc/testdata/bugpara.0.golden20
-rw-r--r--src/go/doc/testdata/bugpara.1.golden20
-rw-r--r--src/go/doc/testdata/bugpara.2.golden20
-rw-r--r--src/go/doc/testdata/bugpara.go5
-rw-r--r--src/go/doc/testdata/c.0.golden48
-rw-r--r--src/go/doc/testdata/c.1.golden48
-rw-r--r--src/go/doc/testdata/c.2.golden48
-rw-r--r--src/go/doc/testdata/c.go62
-rw-r--r--src/go/doc/testdata/d.0.golden104
-rw-r--r--src/go/doc/testdata/d.1.golden104
-rw-r--r--src/go/doc/testdata/d.2.golden104
-rw-r--r--src/go/doc/testdata/d1.go57
-rw-r--r--src/go/doc/testdata/d2.go45
-rw-r--r--src/go/doc/testdata/e.0.golden109
-rw-r--r--src/go/doc/testdata/e.1.golden144
-rw-r--r--src/go/doc/testdata/e.2.golden130
-rw-r--r--src/go/doc/testdata/e.go147
-rw-r--r--src/go/doc/testdata/error1.0.golden30
-rw-r--r--src/go/doc/testdata/error1.1.golden32
-rw-r--r--src/go/doc/testdata/error1.2.golden30
-rw-r--r--src/go/doc/testdata/error1.go24
-rw-r--r--src/go/doc/testdata/error2.0.golden27
-rw-r--r--src/go/doc/testdata/error2.1.golden37
-rw-r--r--src/go/doc/testdata/error2.2.golden27
-rw-r--r--src/go/doc/testdata/error2.go29
-rw-r--r--src/go/doc/testdata/example.go81
-rw-r--r--src/go/doc/testdata/f.0.golden13
-rw-r--r--src/go/doc/testdata/f.1.golden16
-rw-r--r--src/go/doc/testdata/f.2.golden13
-rw-r--r--src/go/doc/testdata/f.go14
-rw-r--r--src/go/doc/testdata/template.txt68
-rw-r--r--src/go/doc/testdata/testing.0.golden156
-rw-r--r--src/go/doc/testdata/testing.1.golden298
-rw-r--r--src/go/doc/testdata/testing.2.golden156
-rw-r--r--src/go/doc/testdata/testing.go404
-rw-r--r--src/go/format/format.go199
-rw-r--r--src/go/format/format_test.go124
-rw-r--r--src/go/parser/error_test.go182
-rw-r--r--src/go/parser/example_test.go34
-rw-r--r--src/go/parser/interface.go198
-rw-r--r--src/go/parser/parser.go2460
-rw-r--r--src/go/parser/parser_test.go449
-rw-r--r--src/go/parser/performance_test.go30
-rw-r--r--src/go/parser/short_test.go103
-rw-r--r--src/go/parser/testdata/commas.src19
-rw-r--r--src/go/parser/testdata/issue3106.src46
-rw-r--r--src/go/printer/example_test.go67
-rw-r--r--src/go/printer/nodes.go1602
-rw-r--r--src/go/printer/performance_test.go58
-rw-r--r--src/go/printer/printer.go1292
-rw-r--r--src/go/printer/printer_test.go562
-rw-r--r--src/go/printer/testdata/comments.golden643
-rw-r--r--src/go/printer/testdata/comments.input648
-rw-r--r--src/go/printer/testdata/comments.x56
-rw-r--r--src/go/printer/testdata/comments2.golden105
-rw-r--r--src/go/printer/testdata/comments2.input105
-rw-r--r--src/go/printer/testdata/declarations.golden955
-rw-r--r--src/go/printer/testdata/declarations.input967
-rw-r--r--src/go/printer/testdata/empty.golden5
-rw-r--r--src/go/printer/testdata/empty.input5
-rw-r--r--src/go/printer/testdata/expressions.golden681
-rw-r--r--src/go/printer/testdata/expressions.input710
-rw-r--r--src/go/printer/testdata/expressions.raw681
-rw-r--r--src/go/printer/testdata/linebreaks.golden275
-rw-r--r--src/go/printer/testdata/linebreaks.input271
-rw-r--r--src/go/printer/testdata/parser.go2153
-rw-r--r--src/go/printer/testdata/slow.golden85
-rw-r--r--src/go/printer/testdata/slow.input85
-rw-r--r--src/go/printer/testdata/statements.golden644
-rw-r--r--src/go/printer/testdata/statements.input555
-rw-r--r--src/go/scanner/errors.go126
-rw-r--r--src/go/scanner/example_test.go46
-rw-r--r--src/go/scanner/scanner.go760
-rw-r--r--src/go/scanner/scanner_test.go775
-rw-r--r--src/go/token/position.go485
-rw-r--r--src/go/token/position_test.go297
-rw-r--r--src/go/token/serialize.go56
-rw-r--r--src/go/token/serialize_test.go111
-rw-r--r--src/go/token/token.go308
125 files changed, 32623 insertions, 0 deletions
diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go
new file mode 100644
index 000000000..312e3d1b9
--- /dev/null
+++ b/src/go/ast/ast.go
@@ -0,0 +1,995 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ast declares the types used to represent syntax trees for Go
+// packages.
+//
+package ast
+
+import (
+ "go/token"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Interfaces
+//
+// There are 3 main classes of nodes: Expressions and type nodes,
+// statement nodes, and declaration nodes. The node names usually
+// match the corresponding Go spec production names to which they
+// correspond. The node fields correspond to the individual parts
+// of the respective productions.
+//
+// All nodes contain position information marking the beginning of
+// the corresponding source text segment; it is accessible via the
+// Pos accessor method. Nodes may contain additional position info
+// for language constructs where comments may be found between parts
+// of the construct (typically any larger, parenthesized subpart).
+// That position information is needed to properly position comments
+// when printing the construct.
+
+// All node types implement the Node interface.
+type Node interface {
+ Pos() token.Pos // position of first character belonging to the node
+ End() token.Pos // position of first character immediately after the node
+}
+
+// All expression nodes implement the Expr interface.
+type Expr interface {
+ Node
+ exprNode()
+}
+
+// All statement nodes implement the Stmt interface.
+type Stmt interface {
+ Node
+ stmtNode()
+}
+
+// All declaration nodes implement the Decl interface.
+type Decl interface {
+ Node
+ declNode()
+}
+
+// ----------------------------------------------------------------------------
+// Comments
+
+// A Comment node represents a single //-style or /*-style comment.
+type Comment struct {
+ Slash token.Pos // position of "/" starting the comment
+ Text string // comment text (excluding '\n' for //-style comments)
+}
+
+func (c *Comment) Pos() token.Pos { return c.Slash }
+func (c *Comment) End() token.Pos { return token.Pos(int(c.Slash) + len(c.Text)) }
+
+// A CommentGroup represents a sequence of comments
+// with no other tokens and no empty lines between.
+//
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() }
+func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
+
+func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
+
+func stripTrailingWhitespace(s string) string {
+ i := len(s)
+ for i > 0 && isWhitespace(s[i-1]) {
+ i--
+ }
+ return s[0:i]
+}
+
+// Text returns the text of the comment.
+// Comment markers (//, /*, and */), the first space of a line comment, and
+// leading and trailing empty lines are removed. Multiple empty lines are
+// reduced to one, and trailing space on lines is trimmed. Unless the result
+// is empty, it is newline-terminated.
+//
+func (g *CommentGroup) Text() string {
+ if g == nil {
+ return ""
+ }
+ comments := make([]string, len(g.List))
+ for i, c := range g.List {
+ comments[i] = string(c.Text)
+ }
+
+ lines := make([]string, 0, 10) // most comments are less than 10 lines
+ for _, c := range comments {
+ // Remove comment markers.
+ // The parser has given us exactly the comment text.
+ switch c[1] {
+ case '/':
+ //-style comment (no newline at the end)
+ c = c[2:]
+ // strip first space - required for Example tests
+ if len(c) > 0 && c[0] == ' ' {
+ c = c[1:]
+ }
+ case '*':
+ /*-style comment */
+ c = c[2 : len(c)-2]
+ }
+
+ // Split on newlines.
+ cl := strings.Split(c, "\n")
+
+ // Walk lines, stripping trailing white space and adding to list.
+ for _, l := range cl {
+ lines = append(lines, stripTrailingWhitespace(l))
+ }
+ }
+
+ // Remove leading blank lines; convert runs of
+ // interior blank lines to a single blank line.
+ n := 0
+ for _, line := range lines {
+ if line != "" || n > 0 && lines[n-1] != "" {
+ lines[n] = line
+ n++
+ }
+ }
+ lines = lines[0:n]
+
+ // Add final "" entry to get trailing newline from Join.
+ if n > 0 && lines[n-1] != "" {
+ lines = append(lines, "")
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+// ----------------------------------------------------------------------------
+// Expressions and types
+
+// A Field represents a Field declaration list in a struct type,
+// a method list in an interface type, or a parameter/result declaration
+// in a signature.
+//
+type Field struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Names []*Ident // field/method/parameter names; or nil if anonymous field
+ Type Expr // field/method/parameter type
+ Tag *BasicLit // field tag; or nil
+ Comment *CommentGroup // line comments; or nil
+}
+
+func (f *Field) Pos() token.Pos {
+ if len(f.Names) > 0 {
+ return f.Names[0].Pos()
+ }
+ return f.Type.Pos()
+}
+
+func (f *Field) End() token.Pos {
+ if f.Tag != nil {
+ return f.Tag.End()
+ }
+ return f.Type.End()
+}
+
+// A FieldList represents a list of Fields, enclosed by parentheses or braces.
+type FieldList struct {
+ Opening token.Pos // position of opening parenthesis/brace, if any
+ List []*Field // field list; or nil
+ Closing token.Pos // position of closing parenthesis/brace, if any
+}
+
+func (f *FieldList) Pos() token.Pos {
+ if f.Opening.IsValid() {
+ return f.Opening
+ }
+ // the list should not be empty in this case;
+ // be conservative and guard against bad ASTs
+ if len(f.List) > 0 {
+ return f.List[0].Pos()
+ }
+ return token.NoPos
+}
+
+func (f *FieldList) End() token.Pos {
+ if f.Closing.IsValid() {
+ return f.Closing + 1
+ }
+ // the list should not be empty in this case;
+ // be conservative and guard against bad ASTs
+ if n := len(f.List); n > 0 {
+ return f.List[n-1].End()
+ }
+ return token.NoPos
+}
+
+// NumFields returns the number of (named and anonymous fields) in a FieldList.
+func (f *FieldList) NumFields() int {
+ n := 0
+ if f != nil {
+ for _, g := range f.List {
+ m := len(g.Names)
+ if m == 0 {
+ m = 1 // anonymous field
+ }
+ n += m
+ }
+ }
+ return n
+}
+
+// An expression is represented by a tree consisting of one
+// or more of the following concrete expression nodes.
+//
+type (
+ // A BadExpr node is a placeholder for expressions containing
+ // syntax errors for which no correct expression nodes can be
+ // created.
+ //
+ BadExpr struct {
+ From, To token.Pos // position range of bad expression
+ }
+
+ // An Ident node represents an identifier.
+ Ident struct {
+ NamePos token.Pos // identifier position
+ Name string // identifier name
+ Obj *Object // denoted object; or nil
+ }
+
+ // An Ellipsis node stands for the "..." type in a
+ // parameter list or the "..." length in an array type.
+ //
+ Ellipsis struct {
+ Ellipsis token.Pos // position of "..."
+ Elt Expr // ellipsis element type (parameter lists only); or nil
+ }
+
+ // A BasicLit node represents a literal of basic type.
+ BasicLit struct {
+ ValuePos token.Pos // literal position
+ Kind token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING
+ Value string // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
+ }
+
+ // A FuncLit node represents a function literal.
+ FuncLit struct {
+ Type *FuncType // function type
+ Body *BlockStmt // function body
+ }
+
+ // A CompositeLit node represents a composite literal.
+ CompositeLit struct {
+ Type Expr // literal type; or nil
+ Lbrace token.Pos // position of "{"
+ Elts []Expr // list of composite elements; or nil
+ Rbrace token.Pos // position of "}"
+ }
+
+ // A ParenExpr node represents a parenthesized expression.
+ ParenExpr struct {
+ Lparen token.Pos // position of "("
+ X Expr // parenthesized expression
+ Rparen token.Pos // position of ")"
+ }
+
+ // A SelectorExpr node represents an expression followed by a selector.
+ SelectorExpr struct {
+ X Expr // expression
+ Sel *Ident // field selector
+ }
+
+ // An IndexExpr node represents an expression followed by an index.
+ IndexExpr struct {
+ X Expr // expression
+ Lbrack token.Pos // position of "["
+ Index Expr // index expression
+ Rbrack token.Pos // position of "]"
+ }
+
+ // An SliceExpr node represents an expression followed by slice indices.
+ SliceExpr struct {
+ X Expr // expression
+ Lbrack token.Pos // position of "["
+ Low Expr // begin of slice range; or nil
+ High Expr // end of slice range; or nil
+ Max Expr // maximum capacity of slice; or nil
+ Slice3 bool // true if 3-index slice (2 colons present)
+ Rbrack token.Pos // position of "]"
+ }
+
+ // A TypeAssertExpr node represents an expression followed by a
+ // type assertion.
+ //
+ TypeAssertExpr struct {
+ X Expr // expression
+ Lparen token.Pos // position of "("
+ Type Expr // asserted type; nil means type switch X.(type)
+ Rparen token.Pos // position of ")"
+ }
+
+ // A CallExpr node represents an expression followed by an argument list.
+ CallExpr struct {
+ Fun Expr // function expression
+ Lparen token.Pos // position of "("
+ Args []Expr // function arguments; or nil
+ Ellipsis token.Pos // position of "...", if any
+ Rparen token.Pos // position of ")"
+ }
+
+ // A StarExpr node represents an expression of the form "*" Expression.
+ // Semantically it could be a unary "*" expression, or a pointer type.
+ //
+ StarExpr struct {
+ Star token.Pos // position of "*"
+ X Expr // operand
+ }
+
+ // A UnaryExpr node represents a unary expression.
+ // Unary "*" expressions are represented via StarExpr nodes.
+ //
+ UnaryExpr struct {
+ OpPos token.Pos // position of Op
+ Op token.Token // operator
+ X Expr // operand
+ }
+
+ // A BinaryExpr node represents a binary expression.
+ BinaryExpr struct {
+ X Expr // left operand
+ OpPos token.Pos // position of Op
+ Op token.Token // operator
+ Y Expr // right operand
+ }
+
+ // A KeyValueExpr node represents (key : value) pairs
+ // in composite literals.
+ //
+ KeyValueExpr struct {
+ Key Expr
+ Colon token.Pos // position of ":"
+ Value Expr
+ }
+)
+
+// The direction of a channel type is indicated by one
+// of the following constants.
+//
+type ChanDir int
+
+const (
+ SEND ChanDir = 1 << iota
+ RECV
+)
+
+// A type is represented by a tree consisting of one
+// or more of the following type-specific expression
+// nodes.
+//
+type (
+ // An ArrayType node represents an array or slice type.
+ ArrayType struct {
+ Lbrack token.Pos // position of "["
+ Len Expr // Ellipsis node for [...]T array types, nil for slice types
+ Elt Expr // element type
+ }
+
+ // A StructType node represents a struct type.
+ StructType struct {
+ Struct token.Pos // position of "struct" keyword
+ Fields *FieldList // list of field declarations
+ Incomplete bool // true if (source) fields are missing in the Fields list
+ }
+
+ // Pointer types are represented via StarExpr nodes.
+
+ // A FuncType node represents a function type.
+ FuncType struct {
+ Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
+ Params *FieldList // (incoming) parameters; non-nil
+ Results *FieldList // (outgoing) results; or nil
+ }
+
+ // An InterfaceType node represents an interface type.
+ InterfaceType struct {
+ Interface token.Pos // position of "interface" keyword
+ Methods *FieldList // list of methods
+ Incomplete bool // true if (source) methods are missing in the Methods list
+ }
+
+ // A MapType node represents a map type.
+ MapType struct {
+ Map token.Pos // position of "map" keyword
+ Key Expr
+ Value Expr
+ }
+
+ // A ChanType node represents a channel type.
+ ChanType struct {
+ Begin token.Pos // position of "chan" keyword or "<-" (whichever comes first)
+ Arrow token.Pos // position of "<-" (token.NoPos if there is no "<-")
+ Dir ChanDir // channel direction
+ Value Expr // value type
+ }
+)
+
+// Pos and End implementations for expression/type nodes.
+//
+func (x *BadExpr) Pos() token.Pos { return x.From }
+func (x *Ident) Pos() token.Pos { return x.NamePos }
+func (x *Ellipsis) Pos() token.Pos { return x.Ellipsis }
+func (x *BasicLit) Pos() token.Pos { return x.ValuePos }
+func (x *FuncLit) Pos() token.Pos { return x.Type.Pos() }
+func (x *CompositeLit) Pos() token.Pos {
+ if x.Type != nil {
+ return x.Type.Pos()
+ }
+ return x.Lbrace
+}
+func (x *ParenExpr) Pos() token.Pos { return x.Lparen }
+func (x *SelectorExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *IndexExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *TypeAssertExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() }
+func (x *StarExpr) Pos() token.Pos { return x.Star }
+func (x *UnaryExpr) Pos() token.Pos { return x.OpPos }
+func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *KeyValueExpr) Pos() token.Pos { return x.Key.Pos() }
+func (x *ArrayType) Pos() token.Pos { return x.Lbrack }
+func (x *StructType) Pos() token.Pos { return x.Struct }
+func (x *FuncType) Pos() token.Pos {
+ if x.Func.IsValid() || x.Params == nil { // see issue 3870
+ return x.Func
+ }
+ return x.Params.Pos() // interface method declarations have no "func" keyword
+}
+func (x *InterfaceType) Pos() token.Pos { return x.Interface }
+func (x *MapType) Pos() token.Pos { return x.Map }
+func (x *ChanType) Pos() token.Pos { return x.Begin }
+
+func (x *BadExpr) End() token.Pos { return x.To }
+func (x *Ident) End() token.Pos { return token.Pos(int(x.NamePos) + len(x.Name)) }
+func (x *Ellipsis) End() token.Pos {
+ if x.Elt != nil {
+ return x.Elt.End()
+ }
+ return x.Ellipsis + 3 // len("...")
+}
+func (x *BasicLit) End() token.Pos { return token.Pos(int(x.ValuePos) + len(x.Value)) }
+func (x *FuncLit) End() token.Pos { return x.Body.End() }
+func (x *CompositeLit) End() token.Pos { return x.Rbrace + 1 }
+func (x *ParenExpr) End() token.Pos { return x.Rparen + 1 }
+func (x *SelectorExpr) End() token.Pos { return x.Sel.End() }
+func (x *IndexExpr) End() token.Pos { return x.Rbrack + 1 }
+func (x *SliceExpr) End() token.Pos { return x.Rbrack + 1 }
+func (x *TypeAssertExpr) End() token.Pos { return x.Rparen + 1 }
+func (x *CallExpr) End() token.Pos { return x.Rparen + 1 }
+func (x *StarExpr) End() token.Pos { return x.X.End() }
+func (x *UnaryExpr) End() token.Pos { return x.X.End() }
+func (x *BinaryExpr) End() token.Pos { return x.Y.End() }
+func (x *KeyValueExpr) End() token.Pos { return x.Value.End() }
+func (x *ArrayType) End() token.Pos { return x.Elt.End() }
+func (x *StructType) End() token.Pos { return x.Fields.End() }
+func (x *FuncType) End() token.Pos {
+ if x.Results != nil {
+ return x.Results.End()
+ }
+ return x.Params.End()
+}
+func (x *InterfaceType) End() token.Pos { return x.Methods.End() }
+func (x *MapType) End() token.Pos { return x.Value.End() }
+func (x *ChanType) End() token.Pos { return x.Value.End() }
+
+// exprNode() ensures that only expression/type nodes can be
+// assigned to an ExprNode.
+//
+func (*BadExpr) exprNode() {}
+func (*Ident) exprNode() {}
+func (*Ellipsis) exprNode() {}
+func (*BasicLit) exprNode() {}
+func (*FuncLit) exprNode() {}
+func (*CompositeLit) exprNode() {}
+func (*ParenExpr) exprNode() {}
+func (*SelectorExpr) exprNode() {}
+func (*IndexExpr) exprNode() {}
+func (*SliceExpr) exprNode() {}
+func (*TypeAssertExpr) exprNode() {}
+func (*CallExpr) exprNode() {}
+func (*StarExpr) exprNode() {}
+func (*UnaryExpr) exprNode() {}
+func (*BinaryExpr) exprNode() {}
+func (*KeyValueExpr) exprNode() {}
+
+func (*ArrayType) exprNode() {}
+func (*StructType) exprNode() {}
+func (*FuncType) exprNode() {}
+func (*InterfaceType) exprNode() {}
+func (*MapType) exprNode() {}
+func (*ChanType) exprNode() {}
+
+// ----------------------------------------------------------------------------
+// Convenience functions for Idents
+
+// NewIdent creates a new Ident without position.
+// Useful for ASTs generated by code other than the Go parser.
+//
+func NewIdent(name string) *Ident { return &Ident{token.NoPos, name, nil} }
+
+// IsExported reports whether name is an exported Go symbol
+// (that is, whether it begins with an upper-case letter).
+//
+func IsExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+// IsExported reports whether id is an exported Go symbol
+// (that is, whether it begins with an uppercase letter).
+//
+func (id *Ident) IsExported() bool { return IsExported(id.Name) }
+
+func (id *Ident) String() string {
+ if id != nil {
+ return id.Name
+ }
+ return "<nil>"
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// A statement is represented by a tree consisting of one
+// or more of the following concrete statement nodes.
+//
+type (
+ // A BadStmt node is a placeholder for statements containing
+ // syntax errors for which no correct statement nodes can be
+ // created.
+ //
+ BadStmt struct {
+ From, To token.Pos // position range of bad statement
+ }
+
+ // A DeclStmt node represents a declaration in a statement list.
+ DeclStmt struct {
+ Decl Decl // *GenDecl with CONST, TYPE, or VAR token
+ }
+
+ // An EmptyStmt node represents an empty statement.
+ // The "position" of the empty statement is the position
+ // of the immediately preceding semicolon.
+ //
+ EmptyStmt struct {
+ Semicolon token.Pos // position of preceding ";"
+ }
+
+ // A LabeledStmt node represents a labeled statement.
+ LabeledStmt struct {
+ Label *Ident
+ Colon token.Pos // position of ":"
+ Stmt Stmt
+ }
+
+ // An ExprStmt node represents a (stand-alone) expression
+ // in a statement list.
+ //
+ ExprStmt struct {
+ X Expr // expression
+ }
+
+ // A SendStmt node represents a send statement.
+ SendStmt struct {
+ Chan Expr
+ Arrow token.Pos // position of "<-"
+ Value Expr
+ }
+
+ // An IncDecStmt node represents an increment or decrement statement.
+ IncDecStmt struct {
+ X Expr
+ TokPos token.Pos // position of Tok
+ Tok token.Token // INC or DEC
+ }
+
+ // An AssignStmt node represents an assignment or
+ // a short variable declaration.
+ //
+ AssignStmt struct {
+ Lhs []Expr
+ TokPos token.Pos // position of Tok
+ Tok token.Token // assignment token, DEFINE
+ Rhs []Expr
+ }
+
+ // A GoStmt node represents a go statement.
+ GoStmt struct {
+ Go token.Pos // position of "go" keyword
+ Call *CallExpr
+ }
+
+ // A DeferStmt node represents a defer statement.
+ DeferStmt struct {
+ Defer token.Pos // position of "defer" keyword
+ Call *CallExpr
+ }
+
+ // A ReturnStmt node represents a return statement.
+ ReturnStmt struct {
+ Return token.Pos // position of "return" keyword
+ Results []Expr // result expressions; or nil
+ }
+
+ // A BranchStmt node represents a break, continue, goto,
+ // or fallthrough statement.
+ //
+ BranchStmt struct {
+ TokPos token.Pos // position of Tok
+ Tok token.Token // keyword token (BREAK, CONTINUE, GOTO, FALLTHROUGH)
+ Label *Ident // label name; or nil
+ }
+
+ // A BlockStmt node represents a braced statement list.
+ BlockStmt struct {
+ Lbrace token.Pos // position of "{"
+ List []Stmt
+ Rbrace token.Pos // position of "}"
+ }
+
+ // An IfStmt node represents an if statement.
+ IfStmt struct {
+ If token.Pos // position of "if" keyword
+ Init Stmt // initialization statement; or nil
+ Cond Expr // condition
+ Body *BlockStmt
+ Else Stmt // else branch; or nil
+ }
+
+ // A CaseClause represents a case of an expression or type switch statement.
+ CaseClause struct {
+ Case token.Pos // position of "case" or "default" keyword
+ List []Expr // list of expressions or types; nil means default case
+ Colon token.Pos // position of ":"
+ Body []Stmt // statement list; or nil
+ }
+
+ // A SwitchStmt node represents an expression switch statement.
+ SwitchStmt struct {
+ Switch token.Pos // position of "switch" keyword
+ Init Stmt // initialization statement; or nil
+ Tag Expr // tag expression; or nil
+ Body *BlockStmt // CaseClauses only
+ }
+
+ // An TypeSwitchStmt node represents a type switch statement.
+ TypeSwitchStmt struct {
+ Switch token.Pos // position of "switch" keyword
+ Init Stmt // initialization statement; or nil
+ Assign Stmt // x := y.(type) or y.(type)
+ Body *BlockStmt // CaseClauses only
+ }
+
+ // A CommClause node represents a case of a select statement.
+ CommClause struct {
+ Case token.Pos // position of "case" or "default" keyword
+ Comm Stmt // send or receive statement; nil means default case
+ Colon token.Pos // position of ":"
+ Body []Stmt // statement list; or nil
+ }
+
+ // An SelectStmt node represents a select statement.
+ SelectStmt struct {
+ Select token.Pos // position of "select" keyword
+ Body *BlockStmt // CommClauses only
+ }
+
+ // A ForStmt represents a for statement.
+ ForStmt struct {
+ For token.Pos // position of "for" keyword
+ Init Stmt // initialization statement; or nil
+ Cond Expr // condition; or nil
+ Post Stmt // post iteration statement; or nil
+ Body *BlockStmt
+ }
+
+ // A RangeStmt represents a for statement with a range clause.
+ RangeStmt struct {
+ For token.Pos // position of "for" keyword
+ Key, Value Expr // Key, Value may be nil
+ TokPos token.Pos // position of Tok; invalid if Key == nil
+ Tok token.Token // ILLEGAL if Key == nil, ASSIGN, DEFINE
+ X Expr // value to range over
+ Body *BlockStmt
+ }
+)
+
+// Pos and End implementations for statement nodes.
+//
+func (s *BadStmt) Pos() token.Pos { return s.From }
+func (s *DeclStmt) Pos() token.Pos { return s.Decl.Pos() }
+func (s *EmptyStmt) Pos() token.Pos { return s.Semicolon }
+func (s *LabeledStmt) Pos() token.Pos { return s.Label.Pos() }
+func (s *ExprStmt) Pos() token.Pos { return s.X.Pos() }
+func (s *SendStmt) Pos() token.Pos { return s.Chan.Pos() }
+func (s *IncDecStmt) Pos() token.Pos { return s.X.Pos() }
+func (s *AssignStmt) Pos() token.Pos { return s.Lhs[0].Pos() }
+func (s *GoStmt) Pos() token.Pos { return s.Go }
+func (s *DeferStmt) Pos() token.Pos { return s.Defer }
+func (s *ReturnStmt) Pos() token.Pos { return s.Return }
+func (s *BranchStmt) Pos() token.Pos { return s.TokPos }
+func (s *BlockStmt) Pos() token.Pos { return s.Lbrace }
+func (s *IfStmt) Pos() token.Pos { return s.If }
+func (s *CaseClause) Pos() token.Pos { return s.Case }
+func (s *SwitchStmt) Pos() token.Pos { return s.Switch }
+func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch }
+func (s *CommClause) Pos() token.Pos { return s.Case }
+func (s *SelectStmt) Pos() token.Pos { return s.Select }
+func (s *ForStmt) Pos() token.Pos { return s.For }
+func (s *RangeStmt) Pos() token.Pos { return s.For }
+
+func (s *BadStmt) End() token.Pos { return s.To }
+func (s *DeclStmt) End() token.Pos { return s.Decl.End() }
+func (s *EmptyStmt) End() token.Pos {
+ return s.Semicolon + 1 /* len(";") */
+}
+func (s *LabeledStmt) End() token.Pos { return s.Stmt.End() }
+func (s *ExprStmt) End() token.Pos { return s.X.End() }
+func (s *SendStmt) End() token.Pos { return s.Value.End() }
+func (s *IncDecStmt) End() token.Pos {
+ return s.TokPos + 2 /* len("++") */
+}
+func (s *AssignStmt) End() token.Pos { return s.Rhs[len(s.Rhs)-1].End() }
+func (s *GoStmt) End() token.Pos { return s.Call.End() }
+func (s *DeferStmt) End() token.Pos { return s.Call.End() }
+func (s *ReturnStmt) End() token.Pos {
+ if n := len(s.Results); n > 0 {
+ return s.Results[n-1].End()
+ }
+ return s.Return + 6 // len("return")
+}
+func (s *BranchStmt) End() token.Pos {
+ if s.Label != nil {
+ return s.Label.End()
+ }
+ return token.Pos(int(s.TokPos) + len(s.Tok.String()))
+}
+func (s *BlockStmt) End() token.Pos { return s.Rbrace + 1 }
+func (s *IfStmt) End() token.Pos {
+ if s.Else != nil {
+ return s.Else.End()
+ }
+ return s.Body.End()
+}
+func (s *CaseClause) End() token.Pos {
+ if n := len(s.Body); n > 0 {
+ return s.Body[n-1].End()
+ }
+ return s.Colon + 1
+}
+func (s *SwitchStmt) End() token.Pos { return s.Body.End() }
+func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() }
+func (s *CommClause) End() token.Pos {
+ if n := len(s.Body); n > 0 {
+ return s.Body[n-1].End()
+ }
+ return s.Colon + 1
+}
+func (s *SelectStmt) End() token.Pos { return s.Body.End() }
+func (s *ForStmt) End() token.Pos { return s.Body.End() }
+func (s *RangeStmt) End() token.Pos { return s.Body.End() }
+
+// stmtNode() ensures that only statement nodes can be
+// assigned to a StmtNode.
+//
+func (*BadStmt) stmtNode() {}
+func (*DeclStmt) stmtNode() {}
+func (*EmptyStmt) stmtNode() {}
+func (*LabeledStmt) stmtNode() {}
+func (*ExprStmt) stmtNode() {}
+func (*SendStmt) stmtNode() {}
+func (*IncDecStmt) stmtNode() {}
+func (*AssignStmt) stmtNode() {}
+func (*GoStmt) stmtNode() {}
+func (*DeferStmt) stmtNode() {}
+func (*ReturnStmt) stmtNode() {}
+func (*BranchStmt) stmtNode() {}
+func (*BlockStmt) stmtNode() {}
+func (*IfStmt) stmtNode() {}
+func (*CaseClause) stmtNode() {}
+func (*SwitchStmt) stmtNode() {}
+func (*TypeSwitchStmt) stmtNode() {}
+func (*CommClause) stmtNode() {}
+func (*SelectStmt) stmtNode() {}
+func (*ForStmt) stmtNode() {}
+func (*RangeStmt) stmtNode() {}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// A Spec node represents a single (non-parenthesized) import,
+// constant, type, or variable declaration.
+//
+type (
+ // The Spec type stands for any of *ImportSpec, *ValueSpec, and *TypeSpec.
+ Spec interface {
+ Node
+ specNode()
+ }
+
+ // An ImportSpec node represents a single package import.
+ ImportSpec struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Name *Ident // local package name (including "."); or nil
+ Path *BasicLit // import path
+ Comment *CommentGroup // line comments; or nil
+ EndPos token.Pos // end of spec (overrides Path.Pos if nonzero)
+ }
+
+ // A ValueSpec node represents a constant or variable declaration
+ // (ConstSpec or VarSpec production).
+ //
+ ValueSpec struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Names []*Ident // value names (len(Names) > 0)
+ Type Expr // value type; or nil
+ Values []Expr // initial values; or nil
+ Comment *CommentGroup // line comments; or nil
+ }
+
+ // A TypeSpec node represents a type declaration (TypeSpec production).
+ TypeSpec struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Name *Ident // type name
+ Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
+ Comment *CommentGroup // line comments; or nil
+ }
+)
+
+// Pos and End implementations for spec nodes.
+//
+func (s *ImportSpec) Pos() token.Pos {
+ if s.Name != nil {
+ return s.Name.Pos()
+ }
+ return s.Path.Pos()
+}
+func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
+func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() }
+
+func (s *ImportSpec) End() token.Pos {
+ if s.EndPos != 0 {
+ return s.EndPos
+ }
+ return s.Path.End()
+}
+
+func (s *ValueSpec) End() token.Pos {
+ if n := len(s.Values); n > 0 {
+ return s.Values[n-1].End()
+ }
+ if s.Type != nil {
+ return s.Type.End()
+ }
+ return s.Names[len(s.Names)-1].End()
+}
+func (s *TypeSpec) End() token.Pos { return s.Type.End() }
+
+// specNode() ensures that only spec nodes can be
+// assigned to a Spec.
+//
+func (*ImportSpec) specNode() {}
+func (*ValueSpec) specNode() {}
+func (*TypeSpec) specNode() {}
+
+// A declaration is represented by one of the following declaration nodes.
+//
+type (
+ // A BadDecl node is a placeholder for declarations containing
+ // syntax errors for which no correct declaration nodes can be
+ // created.
+ //
+ BadDecl struct {
+ From, To token.Pos // position range of bad declaration
+ }
+
+ // A GenDecl node (generic declaration node) represents an import,
+ // constant, type or variable declaration. A valid Lparen position
+ // (Lparen.Line > 0) indicates a parenthesized declaration.
+ //
+ // Relationship between Tok value and Specs element type:
+ //
+ // token.IMPORT *ImportSpec
+ // token.CONST *ValueSpec
+ // token.TYPE *TypeSpec
+ // token.VAR *ValueSpec
+ //
+ GenDecl struct {
+ Doc *CommentGroup // associated documentation; or nil
+ TokPos token.Pos // position of Tok
+ Tok token.Token // IMPORT, CONST, TYPE, VAR
+ Lparen token.Pos // position of '(', if any
+ Specs []Spec
+ Rparen token.Pos // position of ')', if any
+ }
+
+ // A FuncDecl node represents a function declaration.
+ FuncDecl struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Recv *FieldList // receiver (methods); or nil (functions)
+ Name *Ident // function/method name
+ Type *FuncType // function signature: parameters, results, and position of "func" keyword
+ Body *BlockStmt // function body; or nil (forward declaration)
+ }
+)
+
+// Pos and End implementations for declaration nodes.
+//
+func (d *BadDecl) Pos() token.Pos { return d.From }
+func (d *GenDecl) Pos() token.Pos { return d.TokPos }
+func (d *FuncDecl) Pos() token.Pos { return d.Type.Pos() }
+
+func (d *BadDecl) End() token.Pos { return d.To }
+func (d *GenDecl) End() token.Pos {
+ if d.Rparen.IsValid() {
+ return d.Rparen + 1
+ }
+ return d.Specs[0].End()
+}
+func (d *FuncDecl) End() token.Pos {
+ if d.Body != nil {
+ return d.Body.End()
+ }
+ return d.Type.End()
+}
+
+// declNode() ensures that only declaration nodes can be
+// assigned to a DeclNode.
+//
+func (*BadDecl) declNode() {}
+func (*GenDecl) declNode() {}
+func (*FuncDecl) declNode() {}
+
+// ----------------------------------------------------------------------------
+// Files and packages
+
+// A File node represents a Go source file.
+//
+// The Comments list contains all comments in the source file in order of
+// appearance, including the comments that are pointed to from other nodes
+// via Doc and Comment fields.
+//
+type File struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Package token.Pos // position of "package" keyword
+ Name *Ident // package name
+ Decls []Decl // top-level declarations; or nil
+ Scope *Scope // package scope (this file only)
+ Imports []*ImportSpec // imports in this file
+ Unresolved []*Ident // unresolved identifiers in this file
+ Comments []*CommentGroup // list of all comments in the source file
+}
+
+func (f *File) Pos() token.Pos { return f.Package }
+func (f *File) End() token.Pos {
+ if n := len(f.Decls); n > 0 {
+ return f.Decls[n-1].End()
+ }
+ return f.Name.End()
+}
+
+// A Package node represents a set of source files
+// collectively building a Go package.
+//
+type Package struct {
+ Name string // package name
+ Scope *Scope // package scope across all files
+ Imports map[string]*Object // map of package id -> package object
+ Files map[string]*File // Go source files by filename
+}
+
+func (p *Package) Pos() token.Pos { return token.NoPos }
+func (p *Package) End() token.Pos { return token.NoPos }
diff --git a/src/go/ast/ast_test.go b/src/go/ast/ast_test.go
new file mode 100644
index 000000000..1a6a283f2
--- /dev/null
+++ b/src/go/ast/ast_test.go
@@ -0,0 +1,50 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "testing"
+)
+
+var comments = []struct {
+ list []string
+ text string
+}{
+ {[]string{"//"}, ""},
+ {[]string{"// "}, ""},
+ {[]string{"//", "//", "// "}, ""},
+ {[]string{"// foo "}, "foo\n"},
+ {[]string{"//", "//", "// foo"}, "foo\n"},
+ {[]string{"// foo bar "}, "foo bar\n"},
+ {[]string{"// foo", "// bar"}, "foo\nbar\n"},
+ {[]string{"// foo", "//", "//", "//", "// bar"}, "foo\n\nbar\n"},
+ {[]string{"// foo", "/* bar */"}, "foo\n bar\n"},
+ {[]string{"//", "//", "//", "// foo", "//", "//", "//"}, "foo\n"},
+
+ {[]string{"/**/"}, ""},
+ {[]string{"/* */"}, ""},
+ {[]string{"/**/", "/**/", "/* */"}, ""},
+ {[]string{"/* Foo */"}, " Foo\n"},
+ {[]string{"/* Foo Bar */"}, " Foo Bar\n"},
+ {[]string{"/* Foo*/", "/* Bar*/"}, " Foo\n Bar\n"},
+ {[]string{"/* Foo*/", "/**/", "/**/", "/**/", "// Bar"}, " Foo\n\nBar\n"},
+ {[]string{"/* Foo*/", "/*\n*/", "//", "/*\n*/", "// Bar"}, " Foo\n\nBar\n"},
+ {[]string{"/* Foo*/", "// Bar"}, " Foo\nBar\n"},
+ {[]string{"/* Foo\n Bar*/"}, " Foo\n Bar\n"},
+}
+
+func TestCommentText(t *testing.T) {
+ for i, c := range comments {
+ list := make([]*Comment, len(c.list))
+ for i, s := range c.list {
+ list[i] = &Comment{Text: s}
+ }
+
+ text := (&CommentGroup{list}).Text()
+ if text != c.text {
+ t.Errorf("case %d: got %q; expected %q", i, text, c.text)
+ }
+ }
+}
diff --git a/src/go/ast/commentmap.go b/src/go/ast/commentmap.go
new file mode 100644
index 000000000..ac999d627
--- /dev/null
+++ b/src/go/ast/commentmap.go
@@ -0,0 +1,332 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "sort"
+)
+
+type byPos []*CommentGroup
+
+func (a byPos) Len() int { return len(a) }
+func (a byPos) Less(i, j int) bool { return a[i].Pos() < a[j].Pos() }
+func (a byPos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// sortComments sorts the list of comment groups in source order.
+//
+func sortComments(list []*CommentGroup) {
+ // TODO(gri): Does it make sense to check for sorted-ness
+ // first (because we know that sorted-ness is
+ // very likely)?
+ if orderedList := byPos(list); !sort.IsSorted(orderedList) {
+ sort.Sort(orderedList)
+ }
+}
+
+// A CommentMap maps an AST node to a list of comment groups
+// associated with it. See NewCommentMap for a description of
+// the association.
+//
+type CommentMap map[Node][]*CommentGroup
+
+func (cmap CommentMap) addComment(n Node, c *CommentGroup) {
+ list := cmap[n]
+ if len(list) == 0 {
+ list = []*CommentGroup{c}
+ } else {
+ list = append(list, c)
+ }
+ cmap[n] = list
+}
+
+type byInterval []Node
+
+func (a byInterval) Len() int { return len(a) }
+func (a byInterval) Less(i, j int) bool {
+ pi, pj := a[i].Pos(), a[j].Pos()
+ return pi < pj || pi == pj && a[i].End() > a[j].End()
+}
+func (a byInterval) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// nodeList returns the list of nodes of the AST n in source order.
+//
+func nodeList(n Node) []Node {
+ var list []Node
+ Inspect(n, func(n Node) bool {
+ // don't collect comments
+ switch n.(type) {
+ case nil, *CommentGroup, *Comment:
+ return false
+ }
+ list = append(list, n)
+ return true
+ })
+ // Note: The current implementation assumes that Inspect traverses the
+ // AST in depth-first and thus _source_ order. If AST traversal
+ // does not follow source order, the sorting call below will be
+ // required.
+ // sort.Sort(byInterval(list))
+ return list
+}
+
+// A commentListReader helps iterating through a list of comment groups.
+//
+type commentListReader struct {
+ fset *token.FileSet
+ list []*CommentGroup
+ index int
+ comment *CommentGroup // comment group at current index
+ pos, end token.Position // source interval of comment group at current index
+}
+
+func (r *commentListReader) eol() bool {
+ return r.index >= len(r.list)
+}
+
+func (r *commentListReader) next() {
+ if !r.eol() {
+ r.comment = r.list[r.index]
+ r.pos = r.fset.Position(r.comment.Pos())
+ r.end = r.fset.Position(r.comment.End())
+ r.index++
+ }
+}
+
+// A nodeStack keeps track of nested nodes.
+// A node lower on the stack lexically contains the nodes higher on the stack.
+//
+type nodeStack []Node
+
+// push pops all nodes that appear lexically before n
+// and then pushes n on the stack.
+//
+func (s *nodeStack) push(n Node) {
+ s.pop(n.Pos())
+ *s = append((*s), n)
+}
+
+// pop pops all nodes that appear lexically before pos
+// (i.e., whose lexical extent has ended before or at pos).
+// It returns the last node popped.
+//
+func (s *nodeStack) pop(pos token.Pos) (top Node) {
+ i := len(*s)
+ for i > 0 && (*s)[i-1].End() <= pos {
+ top = (*s)[i-1]
+ i--
+ }
+ *s = (*s)[0:i]
+ return top
+}
+
+// NewCommentMap creates a new comment map by associating comment groups
+// of the comments list with the nodes of the AST specified by node.
+//
+// A comment group g is associated with a node n if:
+//
+// - g starts on the same line as n ends
+// - g starts on the line immediately following n, and there is
+// at least one empty line after g and before the next node
+// - g starts before n and is not associated to the node before n
+// via the previous rules
+//
+// NewCommentMap tries to associate a comment group to the "largest"
+// node possible: For instance, if the comment is a line comment
+// trailing an assignment, the comment is associated with the entire
+// assignment rather than just the last operand in the assignment.
+//
+func NewCommentMap(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap {
+ if len(comments) == 0 {
+ return nil // no comments to map
+ }
+
+ cmap := make(CommentMap)
+
+ // set up comment reader r
+ tmp := make([]*CommentGroup, len(comments))
+ copy(tmp, comments) // don't change incoming comments
+ sortComments(tmp)
+ r := commentListReader{fset: fset, list: tmp} // !r.eol() because len(comments) > 0
+ r.next()
+
+ // create node list in lexical order
+ nodes := nodeList(node)
+ nodes = append(nodes, nil) // append sentinel
+
+ // set up iteration variables
+ var (
+ p Node // previous node
+ pend token.Position // end of p
+ pg Node // previous node group (enclosing nodes of "importance")
+ pgend token.Position // end of pg
+ stack nodeStack // stack of node groups
+ )
+
+ for _, q := range nodes {
+ var qpos token.Position
+ if q != nil {
+ qpos = fset.Position(q.Pos()) // current node position
+ } else {
+ // set fake sentinel position to infinity so that
+ // all comments get processed before the sentinel
+ const infinity = 1 << 30
+ qpos.Offset = infinity
+ qpos.Line = infinity
+ }
+
+ // process comments before current node
+ for r.end.Offset <= qpos.Offset {
+ // determine recent node group
+ if top := stack.pop(r.comment.Pos()); top != nil {
+ pg = top
+ pgend = fset.Position(pg.End())
+ }
+ // Try to associate a comment first with a node group
+ // (i.e., a node of "importance" such as a declaration);
+ // if that fails, try to associate it with the most recent
+ // node.
+ // TODO(gri) try to simplify the logic below
+ var assoc Node
+ switch {
+ case pg != nil &&
+ (pgend.Line == r.pos.Line ||
+ pgend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line):
+ // 1) comment starts on same line as previous node group ends, or
+ // 2) comment starts on the line immediately after the
+ // previous node group and there is an empty line before
+ // the current node
+ // => associate comment with previous node group
+ assoc = pg
+ case p != nil &&
+ (pend.Line == r.pos.Line ||
+ pend.Line+1 == r.pos.Line && r.end.Line+1 < qpos.Line ||
+ q == nil):
+ // same rules apply as above for p rather than pg,
+ // but also associate with p if we are at the end (q == nil)
+ assoc = p
+ default:
+ // otherwise, associate comment with current node
+ if q == nil {
+ // we can only reach here if there was no p
+ // which would imply that there were no nodes
+ panic("internal error: no comments should be associated with sentinel")
+ }
+ assoc = q
+ }
+ cmap.addComment(assoc, r.comment)
+ if r.eol() {
+ return cmap
+ }
+ r.next()
+ }
+
+ // update previous node
+ p = q
+ pend = fset.Position(p.End())
+
+ // update previous node group if we see an "important" node
+ switch q.(type) {
+ case *File, *Field, Decl, Spec, Stmt:
+ stack.push(q)
+ }
+ }
+
+ return cmap
+}
+
+// Update replaces an old node in the comment map with the new node
+// and returns the new node. Comments that were associated with the
+// old node are associated with the new node.
+//
+func (cmap CommentMap) Update(old, new Node) Node {
+ if list := cmap[old]; len(list) > 0 {
+ delete(cmap, old)
+ cmap[new] = append(cmap[new], list...)
+ }
+ return new
+}
+
+// Filter returns a new comment map consisting of only those
+// entries of cmap for which a corresponding node exists in
+// the AST specified by node.
+//
+func (cmap CommentMap) Filter(node Node) CommentMap {
+ umap := make(CommentMap)
+ Inspect(node, func(n Node) bool {
+ if g := cmap[n]; len(g) > 0 {
+ umap[n] = g
+ }
+ return true
+ })
+ return umap
+}
+
+// Comments returns the list of comment groups in the comment map.
+// The result is sorted is source order.
+//
+func (cmap CommentMap) Comments() []*CommentGroup {
+ list := make([]*CommentGroup, 0, len(cmap))
+ for _, e := range cmap {
+ list = append(list, e...)
+ }
+ sortComments(list)
+ return list
+}
+
+func summary(list []*CommentGroup) string {
+ const maxLen = 40
+ var buf bytes.Buffer
+
+ // collect comments text
+loop:
+ for _, group := range list {
+ // Note: CommentGroup.Text() does too much work for what we
+ // need and would only replace this innermost loop.
+ // Just do it explicitly.
+ for _, comment := range group.List {
+ if buf.Len() >= maxLen {
+ break loop
+ }
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // truncate if too long
+ if buf.Len() > maxLen {
+ buf.Truncate(maxLen - 3)
+ buf.WriteString("...")
+ }
+
+ // replace any invisibles with blanks
+ bytes := buf.Bytes()
+ for i, b := range bytes {
+ switch b {
+ case '\t', '\n', '\r':
+ bytes[i] = ' '
+ }
+ }
+
+ return string(bytes)
+}
+
+func (cmap CommentMap) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "CommentMap {")
+ for node, comment := range cmap {
+ // print name of identifiers; print node type for other nodes
+ var s string
+ if ident, ok := node.(*Ident); ok {
+ s = ident.Name
+ } else {
+ s = fmt.Sprintf("%T", node)
+ }
+ fmt.Fprintf(&buf, "\t%p %20s: %s\n", node, s, summary(comment))
+ }
+ fmt.Fprintln(&buf, "}")
+ return buf.String()
+}
diff --git a/src/go/ast/commentmap_test.go b/src/go/ast/commentmap_test.go
new file mode 100644
index 000000000..e372eab74
--- /dev/null
+++ b/src/go/ast/commentmap_test.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To avoid a cyclic dependency with go/parser, this file is in a separate package.
+
+package ast_test
+
+import (
+ "bytes"
+ "fmt"
+ . "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "testing"
+)
+
+const src = `
+// the very first comment
+
+// package p
+package p /* the name is p */
+
+// imports
+import (
+ "bytes" // bytes
+ "fmt" // fmt
+ "go/ast"
+ "go/parser"
+)
+
+// T
+type T struct {
+ a, b, c int // associated with a, b, c
+ // associated with x, y
+ x, y float64 // float values
+ z complex128 // complex value
+}
+// also associated with T
+
+// x
+var x = 0 // x = 0
+// also associated with x
+
+// f1
+func f1() {
+ /* associated with s1 */
+ s1()
+ // also associated with s1
+
+ // associated with s2
+
+ // also associated with s2
+ s2() // line comment for s2
+}
+// associated with f1
+// also associated with f1
+
+// associated with f2
+
+// f2
+func f2() {
+}
+
+func f3() {
+ i := 1 /* 1 */ + 2 // addition
+ _ = i
+}
+
+// the very last comment
+`
+
+// res maps a key of the form "line number: node type"
+// to the associated comments' text.
+//
+var res = map[string]string{
+ " 5: *ast.File": "the very first comment\npackage p\n",
+ " 5: *ast.Ident": " the name is p\n",
+ " 8: *ast.GenDecl": "imports\n",
+ " 9: *ast.ImportSpec": "bytes\n",
+ "10: *ast.ImportSpec": "fmt\n",
+ "16: *ast.GenDecl": "T\nalso associated with T\n",
+ "17: *ast.Field": "associated with a, b, c\n",
+ "19: *ast.Field": "associated with x, y\nfloat values\n",
+ "20: *ast.Field": "complex value\n",
+ "25: *ast.GenDecl": "x\nx = 0\nalso associated with x\n",
+ "29: *ast.FuncDecl": "f1\nassociated with f1\nalso associated with f1\n",
+ "31: *ast.ExprStmt": " associated with s1\nalso associated with s1\n",
+ "37: *ast.ExprStmt": "associated with s2\nalso associated with s2\nline comment for s2\n",
+ "45: *ast.FuncDecl": "associated with f2\nf2\n",
+ "49: *ast.AssignStmt": "addition\n",
+ "49: *ast.BasicLit": " 1\n",
+ "50: *ast.Ident": "the very last comment\n",
+}
+
+func ctext(list []*CommentGroup) string {
+ var buf bytes.Buffer
+ for _, g := range list {
+ buf.WriteString(g.Text())
+ }
+ return buf.String()
+}
+
+func TestCommentMap(t *testing.T) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmap := NewCommentMap(fset, f, f.Comments)
+
+ // very correct association of comments
+ for n, list := range cmap {
+ key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n)
+ got := ctext(list)
+ want := res[key]
+ if got != want {
+ t.Errorf("%s: got %q; want %q", key, got, want)
+ }
+ }
+
+ // verify that no comments got lost
+ if n := len(cmap.Comments()); n != len(f.Comments) {
+ t.Errorf("got %d comment groups in map; want %d", n, len(f.Comments))
+ }
+
+ // support code to update test:
+ // set genMap to true to generate res map
+ const genMap = false
+ if genMap {
+ out := make([]string, 0, len(cmap))
+ for n, list := range cmap {
+ out = append(out, fmt.Sprintf("\t\"%2d: %T\":\t%q,", fset.Position(n.Pos()).Line, n, ctext(list)))
+ }
+ sort.Strings(out)
+ for _, s := range out {
+ fmt.Println(s)
+ }
+ }
+}
+
+// TODO(gri): add tests for Filter.
diff --git a/src/go/ast/example_test.go b/src/go/ast/example_test.go
new file mode 100644
index 000000000..d2e734f2c
--- /dev/null
+++ b/src/go/ast/example_test.go
@@ -0,0 +1,210 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+)
+
+// This example demonstrates how to inspect the AST of a Go program.
+func ExampleInspect() {
+ // src is the input for which we want to inspect the AST.
+ src := `
+package p
+const c = 1.0
+var X = f(3.14)*2 + c
+`
+
+ // Create the AST by parsing src.
+ fset := token.NewFileSet() // positions are relative to fset
+ f, err := parser.ParseFile(fset, "src.go", src, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // Inspect the AST and print all identifiers and literals.
+ ast.Inspect(f, func(n ast.Node) bool {
+ var s string
+ switch x := n.(type) {
+ case *ast.BasicLit:
+ s = x.Value
+ case *ast.Ident:
+ s = x.Name
+ }
+ if s != "" {
+ fmt.Printf("%s:\t%s\n", fset.Position(n.Pos()), s)
+ }
+ return true
+ })
+
+ // output:
+ // src.go:2:9: p
+ // src.go:3:7: c
+ // src.go:3:11: 1.0
+ // src.go:4:5: X
+ // src.go:4:9: f
+ // src.go:4:11: 3.14
+ // src.go:4:17: 2
+ // src.go:4:21: c
+}
+
+// This example shows what an AST looks like when printed for debugging.
+func ExamplePrint() {
+ // src is the input for which we want to print the AST.
+ src := `
+package main
+func main() {
+ println("Hello, World!")
+}
+`
+
+ // Create the AST by parsing src.
+ fset := token.NewFileSet() // positions are relative to fset
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // Print the AST.
+ ast.Print(fset, f)
+
+ // output:
+ // 0 *ast.File {
+ // 1 . Package: 2:1
+ // 2 . Name: *ast.Ident {
+ // 3 . . NamePos: 2:9
+ // 4 . . Name: "main"
+ // 5 . }
+ // 6 . Decls: []ast.Decl (len = 1) {
+ // 7 . . 0: *ast.FuncDecl {
+ // 8 . . . Name: *ast.Ident {
+ // 9 . . . . NamePos: 3:6
+ // 10 . . . . Name: "main"
+ // 11 . . . . Obj: *ast.Object {
+ // 12 . . . . . Kind: func
+ // 13 . . . . . Name: "main"
+ // 14 . . . . . Decl: *(obj @ 7)
+ // 15 . . . . }
+ // 16 . . . }
+ // 17 . . . Type: *ast.FuncType {
+ // 18 . . . . Func: 3:1
+ // 19 . . . . Params: *ast.FieldList {
+ // 20 . . . . . Opening: 3:10
+ // 21 . . . . . Closing: 3:11
+ // 22 . . . . }
+ // 23 . . . }
+ // 24 . . . Body: *ast.BlockStmt {
+ // 25 . . . . Lbrace: 3:13
+ // 26 . . . . List: []ast.Stmt (len = 1) {
+ // 27 . . . . . 0: *ast.ExprStmt {
+ // 28 . . . . . . X: *ast.CallExpr {
+ // 29 . . . . . . . Fun: *ast.Ident {
+ // 30 . . . . . . . . NamePos: 4:2
+ // 31 . . . . . . . . Name: "println"
+ // 32 . . . . . . . }
+ // 33 . . . . . . . Lparen: 4:9
+ // 34 . . . . . . . Args: []ast.Expr (len = 1) {
+ // 35 . . . . . . . . 0: *ast.BasicLit {
+ // 36 . . . . . . . . . ValuePos: 4:10
+ // 37 . . . . . . . . . Kind: STRING
+ // 38 . . . . . . . . . Value: "\"Hello, World!\""
+ // 39 . . . . . . . . }
+ // 40 . . . . . . . }
+ // 41 . . . . . . . Ellipsis: -
+ // 42 . . . . . . . Rparen: 4:25
+ // 43 . . . . . . }
+ // 44 . . . . . }
+ // 45 . . . . }
+ // 46 . . . . Rbrace: 5:1
+ // 47 . . . }
+ // 48 . . }
+ // 49 . }
+ // 50 . Scope: *ast.Scope {
+ // 51 . . Objects: map[string]*ast.Object (len = 1) {
+ // 52 . . . "main": *(obj @ 11)
+ // 53 . . }
+ // 54 . }
+ // 55 . Unresolved: []*ast.Ident (len = 1) {
+ // 56 . . 0: *(obj @ 29)
+ // 57 . }
+ // 58 }
+}
+
+// This example illustrates how to remove a variable declaration
+// in a Go program while maintaining correct comment association
+// using an ast.CommentMap.
+func ExampleCommentMap() {
+ // src is the input for which we create the AST that we
+ // are going to manipulate.
+ src := `
+// This is the package comment.
+package main
+
+// This comment is associated with the hello constant.
+const hello = "Hello, World!" // line comment 1
+
+// This comment is associated with the foo variable.
+var foo = hello // line comment 2
+
+// This comment is associated with the main function.
+func main() {
+ fmt.Println(hello) // line comment 3
+}
+`
+
+ // Create the AST by parsing src.
+ fset := token.NewFileSet() // positions are relative to fset
+ f, err := parser.ParseFile(fset, "src.go", src, parser.ParseComments)
+ if err != nil {
+ panic(err)
+ }
+
+ // Create an ast.CommentMap from the ast.File's comments.
+ // This helps keeping the association between comments
+ // and AST nodes.
+ cmap := ast.NewCommentMap(fset, f, f.Comments)
+
+ // Remove the first variable declaration from the list of declarations.
+ f.Decls = removeFirstVarDecl(f.Decls)
+
+ // Use the comment map to filter comments that don't belong anymore
+ // (the comments associated with the variable declaration), and create
+ // the new comments list.
+ f.Comments = cmap.Filter(f).Comments()
+
+ // Print the modified AST.
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, f); err != nil {
+ panic(err)
+ }
+ fmt.Printf("%s", buf.Bytes())
+
+ // output:
+ // // This is the package comment.
+ // package main
+ //
+ // // This comment is associated with the hello constant.
+ // const hello = "Hello, World!" // line comment 1
+ //
+ // // This comment is associated with the main function.
+ // func main() {
+ // fmt.Println(hello) // line comment 3
+ // }
+}
+
+func removeFirstVarDecl(list []ast.Decl) []ast.Decl {
+ for i, decl := range list {
+ if gen, ok := decl.(*ast.GenDecl); ok && gen.Tok == token.VAR {
+ copy(list[i:], list[i+1:])
+ return list[:len(list)-1]
+ }
+ }
+ panic("variable declaration not found")
+}
diff --git a/src/go/ast/filter.go b/src/go/ast/filter.go
new file mode 100644
index 000000000..fc3eeb4a1
--- /dev/null
+++ b/src/go/ast/filter.go
@@ -0,0 +1,466 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "go/token"
+ "sort"
+)
+
+// ----------------------------------------------------------------------------
+// Export filtering
+
+// exportFilter is a special filter function to extract exported nodes.
+func exportFilter(name string) bool {
+ return IsExported(name)
+}
+
+// FileExports trims the AST for a Go source file in place such that
+// only exported nodes remain: all top-level identifiers which are not exported
+// and their associated information (such as type, initial value, or function
+// body) are removed. Non-exported fields and methods of exported types are
+// stripped. The File.Comments list is not changed.
+//
+// FileExports returns true if there are exported declarations;
+// it returns false otherwise.
+//
+func FileExports(src *File) bool {
+ return filterFile(src, exportFilter, true)
+}
+
+// PackageExports trims the AST for a Go package in place such that
+// only exported nodes remain. The pkg.Files list is not changed, so that
+// file names and top-level package comments don't get lost.
+//
+// PackageExports returns true if there are exported declarations;
+// it returns false otherwise.
+//
+func PackageExports(pkg *Package) bool {
+ return filterPackage(pkg, exportFilter, true)
+}
+
+// ----------------------------------------------------------------------------
+// General filtering
+
+type Filter func(string) bool
+
+func filterIdentList(list []*Ident, f Filter) []*Ident {
+ j := 0
+ for _, x := range list {
+ if f(x.Name) {
+ list[j] = x
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+// fieldName assumes that x is the type of an anonymous field and
+// returns the corresponding field name. If x is not an acceptable
+// anonymous field, the result is nil.
+//
+func fieldName(x Expr) *Ident {
+ switch t := x.(type) {
+ case *Ident:
+ return t
+ case *SelectorExpr:
+ if _, ok := t.X.(*Ident); ok {
+ return t.Sel
+ }
+ case *StarExpr:
+ return fieldName(t.X)
+ }
+ return nil
+}
+
+func filterFieldList(fields *FieldList, filter Filter, export bool) (removedFields bool) {
+ if fields == nil {
+ return false
+ }
+ list := fields.List
+ j := 0
+ for _, f := range list {
+ keepField := false
+ if len(f.Names) == 0 {
+ // anonymous field
+ name := fieldName(f.Type)
+ keepField = name != nil && filter(name.Name)
+ } else {
+ n := len(f.Names)
+ f.Names = filterIdentList(f.Names, filter)
+ if len(f.Names) < n {
+ removedFields = true
+ }
+ keepField = len(f.Names) > 0
+ }
+ if keepField {
+ if export {
+ filterType(f.Type, filter, export)
+ }
+ list[j] = f
+ j++
+ }
+ }
+ if j < len(list) {
+ removedFields = true
+ }
+ fields.List = list[0:j]
+ return
+}
+
+func filterParamList(fields *FieldList, filter Filter, export bool) bool {
+ if fields == nil {
+ return false
+ }
+ var b bool
+ for _, f := range fields.List {
+ if filterType(f.Type, filter, export) {
+ b = true
+ }
+ }
+ return b
+}
+
+func filterType(typ Expr, f Filter, export bool) bool {
+ switch t := typ.(type) {
+ case *Ident:
+ return f(t.Name)
+ case *ParenExpr:
+ return filterType(t.X, f, export)
+ case *ArrayType:
+ return filterType(t.Elt, f, export)
+ case *StructType:
+ if filterFieldList(t.Fields, f, export) {
+ t.Incomplete = true
+ }
+ return len(t.Fields.List) > 0
+ case *FuncType:
+ b1 := filterParamList(t.Params, f, export)
+ b2 := filterParamList(t.Results, f, export)
+ return b1 || b2
+ case *InterfaceType:
+ if filterFieldList(t.Methods, f, export) {
+ t.Incomplete = true
+ }
+ return len(t.Methods.List) > 0
+ case *MapType:
+ b1 := filterType(t.Key, f, export)
+ b2 := filterType(t.Value, f, export)
+ return b1 || b2
+ case *ChanType:
+ return filterType(t.Value, f, export)
+ }
+ return false
+}
+
+func filterSpec(spec Spec, f Filter, export bool) bool {
+ switch s := spec.(type) {
+ case *ValueSpec:
+ s.Names = filterIdentList(s.Names, f)
+ if len(s.Names) > 0 {
+ if export {
+ filterType(s.Type, f, export)
+ }
+ return true
+ }
+ case *TypeSpec:
+ if f(s.Name.Name) {
+ if export {
+ filterType(s.Type, f, export)
+ }
+ return true
+ }
+ if !export {
+ // For general filtering (not just exports),
+ // filter type even if name is not filtered
+ // out.
+ // If the type contains filtered elements,
+ // keep the declaration.
+ return filterType(s.Type, f, export)
+ }
+ }
+ return false
+}
+
+func filterSpecList(list []Spec, f Filter, export bool) []Spec {
+ j := 0
+ for _, s := range list {
+ if filterSpec(s, f, export) {
+ list[j] = s
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+// FilterDecl trims the AST for a Go declaration in place by removing
+// all names (including struct field and interface method names, but
+// not from parameter lists) that don't pass through the filter f.
+//
+// FilterDecl returns true if there are any declared names left after
+// filtering; it returns false otherwise.
+//
+func FilterDecl(decl Decl, f Filter) bool {
+ return filterDecl(decl, f, false)
+}
+
+func filterDecl(decl Decl, f Filter, export bool) bool {
+ switch d := decl.(type) {
+ case *GenDecl:
+ d.Specs = filterSpecList(d.Specs, f, export)
+ return len(d.Specs) > 0
+ case *FuncDecl:
+ return f(d.Name.Name)
+ }
+ return false
+}
+
+// FilterFile trims the AST for a Go file in place by removing all
+// names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST. The File.Comments list
+// is not changed.
+//
+// FilterFile returns true if there are any top-level declarations
+// left after filtering; it returns false otherwise.
+//
+func FilterFile(src *File, f Filter) bool {
+ return filterFile(src, f, false)
+}
+
+func filterFile(src *File, f Filter, export bool) bool {
+ j := 0
+ for _, d := range src.Decls {
+ if filterDecl(d, f, export) {
+ src.Decls[j] = d
+ j++
+ }
+ }
+ src.Decls = src.Decls[0:j]
+ return j > 0
+}
+
+// FilterPackage trims the AST for a Go package in place by removing
+// all names from top-level declarations (including struct field and
+// interface method names, but not from parameter lists) that don't
+// pass through the filter f. If the declaration is empty afterwards,
+// the declaration is removed from the AST. The pkg.Files list is not
+// changed, so that file names and top-level package comments don't get
+// lost.
+//
+// FilterPackage returns true if there are any top-level declarations
+// left after filtering; it returns false otherwise.
+//
+func FilterPackage(pkg *Package, f Filter) bool {
+ return filterPackage(pkg, f, false)
+}
+
+func filterPackage(pkg *Package, f Filter, export bool) bool {
+ hasDecls := false
+ for _, src := range pkg.Files {
+ if filterFile(src, f, export) {
+ hasDecls = true
+ }
+ }
+ return hasDecls
+}
+
+// ----------------------------------------------------------------------------
+// Merging of package files
+
+// The MergeMode flags control the behavior of MergePackageFiles.
+type MergeMode uint
+
+const (
+ // If set, duplicate function declarations are excluded.
+ FilterFuncDuplicates MergeMode = 1 << iota
+ // If set, comments that are not associated with a specific
+ // AST node (as Doc or Comment) are excluded.
+ FilterUnassociatedComments
+ // If set, duplicate import declarations are excluded.
+ FilterImportDuplicates
+)
+
+// nameOf returns the function (foo) or method name (foo.bar) for
+// the given function declaration. If the AST is incorrect for the
+// receiver, it assumes a function instead.
+//
+func nameOf(f *FuncDecl) string {
+ if r := f.Recv; r != nil && len(r.List) == 1 {
+ // looks like a correct receiver declaration
+ t := r.List[0].Type
+ // dereference pointer receiver types
+ if p, _ := t.(*StarExpr); p != nil {
+ t = p.X
+ }
+ // the receiver type must be a type name
+ if p, _ := t.(*Ident); p != nil {
+ return p.Name + "." + f.Name.Name
+ }
+ // otherwise assume a function instead
+ }
+ return f.Name.Name
+}
+
+// separator is an empty //-style comment that is interspersed between
+// different comment groups when they are concatenated into a single group
+//
+var separator = &Comment{token.NoPos, "//"}
+
+// MergePackageFiles creates a file AST by merging the ASTs of the
+// files belonging to a package. The mode flags control merging behavior.
+//
+func MergePackageFiles(pkg *Package, mode MergeMode) *File {
+ // Count the number of package docs, comments and declarations across
+ // all package files. Also, compute sorted list of filenames, so that
+ // subsequent iterations can always iterate in the same order.
+ ndocs := 0
+ ncomments := 0
+ ndecls := 0
+ filenames := make([]string, len(pkg.Files))
+ i := 0
+ for filename, f := range pkg.Files {
+ filenames[i] = filename
+ i++
+ if f.Doc != nil {
+ ndocs += len(f.Doc.List) + 1 // +1 for separator
+ }
+ ncomments += len(f.Comments)
+ ndecls += len(f.Decls)
+ }
+ sort.Strings(filenames)
+
+ // Collect package comments from all package files into a single
+ // CommentGroup - the collected package documentation. In general
+ // there should be only one file with a package comment; but it's
+ // better to collect extra comments than drop them on the floor.
+ var doc *CommentGroup
+ var pos token.Pos
+ if ndocs > 0 {
+ list := make([]*Comment, ndocs-1) // -1: no separator before first group
+ i := 0
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
+ if f.Doc != nil {
+ if i > 0 {
+ // not the first group - add separator
+ list[i] = separator
+ i++
+ }
+ for _, c := range f.Doc.List {
+ list[i] = c
+ i++
+ }
+ if f.Package > pos {
+ // Keep the maximum package clause position as
+ // position for the package clause of the merged
+ // files.
+ pos = f.Package
+ }
+ }
+ }
+ doc = &CommentGroup{list}
+ }
+
+ // Collect declarations from all package files.
+ var decls []Decl
+ if ndecls > 0 {
+ decls = make([]Decl, ndecls)
+ funcs := make(map[string]int) // map of func name -> decls index
+ i := 0 // current index
+ n := 0 // number of filtered entries
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
+ for _, d := range f.Decls {
+ if mode&FilterFuncDuplicates != 0 {
+ // A language entity may be declared multiple
+ // times in different package files; only at
+ // build time declarations must be unique.
+ // For now, exclude multiple declarations of
+ // functions - keep the one with documentation.
+ //
+ // TODO(gri): Expand this filtering to other
+ // entities (const, type, vars) if
+ // multiple declarations are common.
+ if f, isFun := d.(*FuncDecl); isFun {
+ name := nameOf(f)
+ if j, exists := funcs[name]; exists {
+ // function declared already
+ if decls[j] != nil && decls[j].(*FuncDecl).Doc == nil {
+ // existing declaration has no documentation;
+ // ignore the existing declaration
+ decls[j] = nil
+ } else {
+ // ignore the new declaration
+ d = nil
+ }
+ n++ // filtered an entry
+ } else {
+ funcs[name] = i
+ }
+ }
+ }
+ decls[i] = d
+ i++
+ }
+ }
+
+ // Eliminate nil entries from the decls list if entries were
+ // filtered. We do this using a 2nd pass in order to not disturb
+ // the original declaration order in the source (otherwise, this
+ // would also invalidate the monotonically increasing position
+ // info within a single file).
+ if n > 0 {
+ i = 0
+ for _, d := range decls {
+ if d != nil {
+ decls[i] = d
+ i++
+ }
+ }
+ decls = decls[0:i]
+ }
+ }
+
+ // Collect import specs from all package files.
+ var imports []*ImportSpec
+ if mode&FilterImportDuplicates != 0 {
+ seen := make(map[string]bool)
+ for _, filename := range filenames {
+ f := pkg.Files[filename]
+ for _, imp := range f.Imports {
+ if path := imp.Path.Value; !seen[path] {
+ // TODO: consider handling cases where:
+ // - 2 imports exist with the same import path but
+ // have different local names (one should probably
+ // keep both of them)
+ // - 2 imports exist but only one has a comment
+ // - 2 imports exist and they both have (possibly
+ // different) comments
+ imports = append(imports, imp)
+ seen[path] = true
+ }
+ }
+ }
+ } else {
+ for _, f := range pkg.Files {
+ imports = append(imports, f.Imports...)
+ }
+ }
+
+ // Collect comments from all package files.
+ var comments []*CommentGroup
+ if mode&FilterUnassociatedComments == 0 {
+ comments = make([]*CommentGroup, ncomments)
+ i := 0
+ for _, f := range pkg.Files {
+ i += copy(comments[i:], f.Comments)
+ }
+ }
+
+ // TODO(gri) need to compute unresolved identifiers!
+ return &File{doc, pos, NewIdent(pkg.Name), decls, pkg.Scope, imports, nil, comments}
+}
diff --git a/src/go/ast/filter_test.go b/src/go/ast/filter_test.go
new file mode 100644
index 000000000..9fd86cb46
--- /dev/null
+++ b/src/go/ast/filter_test.go
@@ -0,0 +1,86 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To avoid a cyclic dependency with go/parser, this file is in a separate package.
+
+package ast_test
+
+import (
+ "bytes"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "testing"
+)
+
+const input = `package p
+
+type t1 struct{}
+type t2 struct{}
+
+func f1() {}
+func f1() {}
+func f2() {}
+
+func (*t1) f1() {}
+func (t1) f1() {}
+func (t1) f2() {}
+
+func (t2) f1() {}
+func (t2) f2() {}
+func (x *t2) f2() {}
+`
+
+// Calling ast.MergePackageFiles with ast.FilterFuncDuplicates
+// keeps a duplicate entry with attached documentation in favor
+// of one without, and it favors duplicate entries appearing
+// later in the source over ones appearing earlier. This is why
+// (*t2).f2 is kept and t2.f2 is eliminated in this test case.
+//
+const golden = `package p
+
+type t1 struct{}
+type t2 struct{}
+
+func f1() {}
+func f2() {}
+
+func (t1) f1() {}
+func (t1) f2() {}
+
+func (t2) f1() {}
+
+func (x *t2) f2() {}
+`
+
+func TestFilterDuplicates(t *testing.T) {
+ // parse input
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", input, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create package
+ files := map[string]*ast.File{"": file}
+ pkg, err := ast.NewPackage(fset, files, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // filter
+ merged := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates)
+
+ // pretty-print
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, merged); err != nil {
+ t.Fatal(err)
+ }
+ output := buf.String()
+
+ if output != golden {
+ t.Errorf("incorrect output:\n%s", output)
+ }
+}
diff --git a/src/go/ast/import.go b/src/go/ast/import.go
new file mode 100644
index 000000000..d2770d16c
--- /dev/null
+++ b/src/go/ast/import.go
@@ -0,0 +1,196 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "go/token"
+ "sort"
+ "strconv"
+)
+
+// SortImports sorts runs of consecutive import lines in import blocks in f.
+// It also removes duplicate imports when it is possible to do so without data loss.
+func SortImports(fset *token.FileSet, f *File) {
+ for _, d := range f.Decls {
+ d, ok := d.(*GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ break
+ }
+
+ if !d.Lparen.IsValid() {
+ // Not a block: sorted by default.
+ continue
+ }
+
+ // Identify and sort runs of specs on successive lines.
+ i := 0
+ specs := d.Specs[:0]
+ for j, s := range d.Specs {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
+ // j begins a new run. End this one.
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
+ i = j
+ }
+ }
+ specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
+ d.Specs = specs
+
+ // Deduping can leave a blank line before the rparen; clean that up.
+ if len(d.Specs) > 0 {
+ lastSpec := d.Specs[len(d.Specs)-1]
+ lastLine := fset.Position(lastSpec.Pos()).Line
+ if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
+ fset.File(d.Rparen).MergeLine(rParenLine - 1)
+ }
+ }
+ }
+}
+
+func importPath(s Spec) string {
+ t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
+ if err == nil {
+ return t
+ }
+ return ""
+}
+
+func importName(s Spec) string {
+ n := s.(*ImportSpec).Name
+ if n == nil {
+ return ""
+ }
+ return n.Name
+}
+
+func importComment(s Spec) string {
+ c := s.(*ImportSpec).Comment
+ if c == nil {
+ return ""
+ }
+ return c.Text()
+}
+
+// collapse indicates whether prev may be removed, leaving only next.
+func collapse(prev, next Spec) bool {
+ if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
+ return false
+ }
+ return prev.(*ImportSpec).Comment == nil
+}
+
+type posSpan struct {
+ Start token.Pos
+ End token.Pos
+}
+
+func sortSpecs(fset *token.FileSet, f *File, specs []Spec) []Spec {
+ // Can't short-circuit here even if specs are already sorted,
+ // since they might yet need deduplication.
+ // A lone import, however, may be safely ignored.
+ if len(specs) <= 1 {
+ return specs
+ }
+
+ // Record positions for specs.
+ pos := make([]posSpan, len(specs))
+ for i, s := range specs {
+ pos[i] = posSpan{s.Pos(), s.End()}
+ }
+
+ // Identify comments in this range.
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
+ for i, g := range f.Comments {
+ if g.Pos() < pos[0].Start {
+ continue
+ }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
+ }
+ }
+ comments := f.Comments[cstart:cend]
+
+ // Assign each comment to the import spec preceding it.
+ importComment := map[*ImportSpec][]*CommentGroup{}
+ specIndex := 0
+ for _, g := range comments {
+ for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
+ specIndex++
+ }
+ s := specs[specIndex].(*ImportSpec)
+ importComment[s] = append(importComment[s], g)
+ }
+
+ // Sort the import specs by import path.
+ // Remove duplicates, when possible without data loss.
+ // Reassign the import paths to have the same position sequence.
+ // Reassign each comment to abut the end of its spec.
+ // Sort the comments by new position.
+ sort.Sort(byImportSpec(specs))
+
+ // Dedup. Thanks to our sorting, we can just consider
+ // adjacent pairs of imports.
+ deduped := specs[:0]
+ for i, s := range specs {
+ if i == len(specs)-1 || !collapse(s, specs[i+1]) {
+ deduped = append(deduped, s)
+ } else {
+ p := s.Pos()
+ fset.File(p).MergeLine(fset.Position(p).Line)
+ }
+ }
+ specs = deduped
+
+ // Fix up comment positions
+ for i, s := range specs {
+ s := s.(*ImportSpec)
+ if s.Name != nil {
+ s.Name.NamePos = pos[i].Start
+ }
+ s.Path.ValuePos = pos[i].Start
+ s.EndPos = pos[i].End
+ for _, g := range importComment[s] {
+ for _, c := range g.List {
+ c.Slash = pos[i].End
+ }
+ }
+ }
+
+ sort.Sort(byCommentPos(comments))
+
+ return specs
+}
+
+type byImportSpec []Spec // slice of *ImportSpec
+
+func (x byImportSpec) Len() int { return len(x) }
+func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byImportSpec) Less(i, j int) bool {
+ ipath := importPath(x[i])
+ jpath := importPath(x[j])
+ if ipath != jpath {
+ return ipath < jpath
+ }
+ iname := importName(x[i])
+ jname := importName(x[j])
+ if iname != jname {
+ return iname < jname
+ }
+ return importComment(x[i]) < importComment(x[j])
+}
+
+type byCommentPos []*CommentGroup
+
+func (x byCommentPos) Len() int { return len(x) }
+func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
diff --git a/src/go/ast/print.go b/src/go/ast/print.go
new file mode 100644
index 000000000..f15dc11dc
--- /dev/null
+++ b/src/go/ast/print.go
@@ -0,0 +1,251 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains printing support for ASTs.
+
+package ast
+
+import (
+ "fmt"
+ "go/token"
+ "io"
+ "os"
+ "reflect"
+)
+
+// A FieldFilter may be provided to Fprint to control the output.
+type FieldFilter func(name string, value reflect.Value) bool
+
+// NotNilFilter returns true for field values that are not nil;
+// it returns false otherwise.
+func NotNilFilter(_ string, v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return !v.IsNil()
+ }
+ return true
+}
+
+// Fprint prints the (sub-)tree starting at AST node x to w.
+// If fset != nil, position information is interpreted relative
+// to that file set. Otherwise positions are printed as integer
+// values (file set specific offsets).
+//
+// A non-nil FieldFilter f may be provided to control the output:
+// struct fields for which f(fieldname, fieldvalue) is true are
+// printed; all others are filtered from the output. Unexported
+// struct fields are never printed.
+//
+func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (err error) {
+ // setup printer
+ p := printer{
+ output: w,
+ fset: fset,
+ filter: f,
+ ptrmap: make(map[interface{}]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ // install error handler
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(localError).err // re-panics if it's not a localError
+ }
+ }()
+
+ // print x
+ if x == nil {
+ p.printf("nil\n")
+ return
+ }
+ p.print(reflect.ValueOf(x))
+ p.printf("\n")
+
+ return
+}
+
+// Print prints x to standard output, skipping nil fields.
+// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
+func Print(fset *token.FileSet, x interface{}) error {
+ return Fprint(os.Stdout, fset, x, NotNilFilter)
+}
+
+type printer struct {
+ output io.Writer
+ fset *token.FileSet
+ filter FieldFilter
+ ptrmap map[interface{}]int // *T -> line number
+ indent int // current indentation level
+ last byte // the last byte processed by Write
+ line int // current line number
+}
+
+var indent = []byte(". ")
+
+func (p *printer) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ p.line++
+ } else if p.last == '\n' {
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indent)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// localError wraps locally caught errors so we can distinguish
+// them from genuine panics which we don't want to return as errors.
+type localError struct {
+ err error
+}
+
+// printf is a convenience wrapper that takes care of print errors.
+func (p *printer) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(localError{err})
+ }
+}
+
+// Implementation note: Print is written for AST nodes but could be
+// used to print arbitrary data structures; such a version should
+// probably be in a different package.
+//
+// Note: This code detects (some) cycles created via pointers but
+// not cycles that are created via slices or maps containing the
+// same slice or map. Code for general data structures probably
+// should catch those as well.
+
+func (p *printer) print(x reflect.Value) {
+ if !NotNilFilter("", x) {
+ p.printf("nil")
+ return
+ }
+
+ switch x.Kind() {
+ case reflect.Interface:
+ p.print(x.Elem())
+
+ case reflect.Map:
+ p.printf("%s (len = %d) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for _, key := range x.MapKeys() {
+ p.print(key)
+ p.printf(": ")
+ p.print(x.MapIndex(key))
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Ptr:
+ p.printf("*")
+ // type-checked ASTs may contain cycles - use ptrmap
+ // to keep track of objects that have been printed
+ // already and print the respective line number instead
+ ptr := x.Interface()
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(obj @ %d)", line)
+ } else {
+ p.ptrmap[ptr] = p.line
+ p.print(x.Elem())
+ }
+
+ case reflect.Array:
+ p.printf("%s {", x.Type())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.print(x.Index(i))
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Slice:
+ if s, ok := x.Interface().([]byte); ok {
+ p.printf("%#q", s)
+ return
+ }
+ p.printf("%s (len = %d) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.print(x.Index(i))
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ t := x.Type()
+ p.printf("%s {", t)
+ p.indent++
+ first := true
+ for i, n := 0, t.NumField(); i < n; i++ {
+ // exclude non-exported fields because their
+ // values cannot be accessed via reflection
+ if name := t.Field(i).Name; IsExported(name) {
+ value := x.Field(i)
+ if p.filter == nil || p.filter(name, value) {
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.print(value)
+ p.printf("\n")
+ }
+ }
+ }
+ p.indent--
+ p.printf("}")
+
+ default:
+ v := x.Interface()
+ switch v := v.(type) {
+ case string:
+ // print strings in quotes
+ p.printf("%q", v)
+ return
+ case token.Pos:
+ // position values can be printed nicely if we have a file set
+ if p.fset != nil {
+ p.printf("%s", p.fset.Position(v))
+ return
+ }
+ }
+ // default
+ p.printf("%v", v)
+ }
+}
diff --git a/src/go/ast/print_test.go b/src/go/ast/print_test.go
new file mode 100644
index 000000000..210f16430
--- /dev/null
+++ b/src/go/ast/print_test.go
@@ -0,0 +1,97 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+var tests = []struct {
+ x interface{} // x is printed as s
+ s string
+}{
+ // basic types
+ {nil, "0 nil"},
+ {true, "0 true"},
+ {42, "0 42"},
+ {3.14, "0 3.14"},
+ {1 + 2.718i, "0 (1+2.718i)"},
+ {"foobar", "0 \"foobar\""},
+
+ // maps
+ {map[Expr]string{}, `0 map[ast.Expr]string (len = 0) {}`},
+ {map[string]int{"a": 1},
+ `0 map[string]int (len = 1) {
+ 1 . "a": 1
+ 2 }`},
+
+ // pointers
+ {new(int), "0 *0"},
+
+ // arrays
+ {[0]int{}, `0 [0]int {}`},
+ {[3]int{1, 2, 3},
+ `0 [3]int {
+ 1 . 0: 1
+ 2 . 1: 2
+ 3 . 2: 3
+ 4 }`},
+ {[...]int{42},
+ `0 [1]int {
+ 1 . 0: 42
+ 2 }`},
+
+ // slices
+ {[]int{}, `0 []int (len = 0) {}`},
+ {[]int{1, 2, 3},
+ `0 []int (len = 3) {
+ 1 . 0: 1
+ 2 . 1: 2
+ 3 . 2: 3
+ 4 }`},
+
+ // structs
+ {struct{}{}, `0 struct {} {}`},
+ {struct{ x int }{007}, `0 struct { x int } {}`},
+ {struct{ X, y int }{42, 991},
+ `0 struct { X int; y int } {
+ 1 . X: 42
+ 2 }`},
+ {struct{ X, Y int }{42, 991},
+ `0 struct { X int; Y int } {
+ 1 . X: 42
+ 2 . Y: 991
+ 3 }`},
+}
+
+// Split s into lines, trim whitespace from all lines, and return
+// the concatenated non-empty lines.
+func trim(s string) string {
+ lines := strings.Split(s, "\n")
+ i := 0
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line != "" {
+ lines[i] = line
+ i++
+ }
+ }
+ return strings.Join(lines[0:i], "\n")
+}
+
+func TestPrint(t *testing.T) {
+ var buf bytes.Buffer
+ for _, test := range tests {
+ buf.Reset()
+ if err := Fprint(&buf, nil, test.x, nil); err != nil {
+ t.Errorf("Fprint failed: %s", err)
+ }
+ if s, ts := trim(buf.String()), trim(test.s); s != ts {
+ t.Errorf("got:\n%s\nexpected:\n%s\n", s, ts)
+ }
+ }
+}
diff --git a/src/go/ast/resolve.go b/src/go/ast/resolve.go
new file mode 100644
index 000000000..0406bfc58
--- /dev/null
+++ b/src/go/ast/resolve.go
@@ -0,0 +1,174 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements NewPackage.
+
+package ast
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "strconv"
+)
+
+type pkgBuilder struct {
+ fset *token.FileSet
+ errors scanner.ErrorList
+}
+
+func (p *pkgBuilder) error(pos token.Pos, msg string) {
+ p.errors.Add(p.fset.Position(pos), msg)
+}
+
+func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
+ p.error(pos, fmt.Sprintf(format, args...))
+}
+
+func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
+ alt := scope.Insert(obj)
+ if alt == nil && altScope != nil {
+ // see if there is a conflicting declaration in altScope
+ alt = altScope.Lookup(obj.Name)
+ }
+ if alt != nil {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.fset.Position(pos))
+ }
+ p.error(obj.Pos(), fmt.Sprintf("%s redeclared in this block%s", obj.Name, prevDecl))
+ }
+}
+
+func resolve(scope *Scope, ident *Ident) bool {
+ for ; scope != nil; scope = scope.Outer {
+ if obj := scope.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return true
+ }
+ }
+ return false
+}
+
+// An Importer resolves import paths to package Objects.
+// The imports map records the packages already imported,
+// indexed by package id (canonical import path).
+// An Importer must determine the canonical import path and
+// check the map to see if it is already present in the imports map.
+// If so, the Importer can return the map entry. Otherwise, the
+// Importer should load the package data for the given path into
+// a new *Object (pkg), record pkg in the imports map, and then
+// return pkg.
+type Importer func(imports map[string]*Object, path string) (pkg *Object, err error)
+
+// NewPackage creates a new Package node from a set of File nodes. It resolves
+// unresolved identifiers across files and updates each file's Unresolved list
+// accordingly. If a non-nil importer and universe scope are provided, they are
+// used to resolve identifiers not declared in any of the package files. Any
+// remaining unresolved identifiers are reported as undeclared. If the files
+// belong to different packages, one package name is selected and files with
+// different package names are reported and then ignored.
+// The result is a package node and a scanner.ErrorList if there were errors.
+//
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error) {
+ var p pkgBuilder
+ p.fset = fset
+
+ // complete package scope
+ pkgName := ""
+ pkgScope := NewScope(universe)
+ for _, file := range files {
+ // package names must match
+ switch name := file.Name.Name; {
+ case pkgName == "":
+ pkgName = name
+ case name != pkgName:
+ p.errorf(file.Package, "package %s; expected %s", name, pkgName)
+ continue // ignore this file
+ }
+
+ // collect top-level file objects in package scope
+ for _, obj := range file.Scope.Objects {
+ p.declare(pkgScope, nil, obj)
+ }
+ }
+
+ // package global mapping of imported package ids to package objects
+ imports := make(map[string]*Object)
+
+ // complete file scopes with imports and resolve identifiers
+ for _, file := range files {
+ // ignore file if it belongs to a different package
+ // (error has already been reported)
+ if file.Name.Name != pkgName {
+ continue
+ }
+
+ // build file scope by processing all imports
+ importErrors := false
+ fileScope := NewScope(pkgScope)
+ for _, spec := range file.Imports {
+ if importer == nil {
+ importErrors = true
+ continue
+ }
+ path, _ := strconv.Unquote(spec.Path.Value)
+ pkg, err := importer(imports, path)
+ if err != nil {
+ p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+ importErrors = true
+ continue
+ }
+ // TODO(gri) If a local package name != "." is provided,
+ // global identifier resolution could proceed even if the
+ // import failed. Consider adjusting the logic here a bit.
+
+ // local name overrides imported package name
+ name := pkg.Name
+ if spec.Name != nil {
+ name = spec.Name.Name
+ }
+
+ // add import to file scope
+ if name == "." {
+ // merge imported scope with file scope
+ for _, obj := range pkg.Data.(*Scope).Objects {
+ p.declare(fileScope, pkgScope, obj)
+ }
+ } else if name != "_" {
+ // declare imported package object in file scope
+ // (do not re-use pkg in the file scope but create
+ // a new object instead; the Decl field is different
+ // for different files)
+ obj := NewObj(Pkg, name)
+ obj.Decl = spec
+ obj.Data = pkg.Data
+ p.declare(fileScope, pkgScope, obj)
+ }
+ }
+
+ // resolve identifiers
+ if importErrors {
+ // don't use the universe scope without correct imports
+ // (objects in the universe may be shadowed by imports;
+ // with missing imports, identifiers might get resolved
+ // incorrectly to universe objects)
+ pkgScope.Outer = nil
+ }
+ i := 0
+ for _, ident := range file.Unresolved {
+ if !resolve(fileScope, ident) {
+ p.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+ file.Unresolved[i] = ident
+ i++
+ }
+
+ }
+ file.Unresolved = file.Unresolved[0:i]
+ pkgScope.Outer = universe // reset universe scope
+ }
+
+ p.errors.Sort()
+ return &Package{pkgName, pkgScope, imports, files}, p.errors.Err()
+}
diff --git a/src/go/ast/scope.go b/src/go/ast/scope.go
new file mode 100644
index 000000000..df1529d18
--- /dev/null
+++ b/src/go/ast/scope.go
@@ -0,0 +1,162 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements scopes and the objects they contain.
+
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+)
+
+// A Scope maintains the set of named language entities declared
+// in the scope and a link to the immediately surrounding (outer)
+// scope.
+//
+type Scope struct {
+ Outer *Scope
+ Objects map[string]*Object
+}
+
+// NewScope creates a new scope nested in the outer scope.
+func NewScope(outer *Scope) *Scope {
+ const n = 4 // initial scope capacity
+ return &Scope{outer, make(map[string]*Object, n)}
+}
+
+// Lookup returns the object with the given name if it is
+// found in scope s, otherwise it returns nil. Outer scopes
+// are ignored.
+//
+func (s *Scope) Lookup(name string) *Object {
+ return s.Objects[name]
+}
+
+// Insert attempts to insert a named object obj into the scope s.
+// If the scope already contains an object alt with the same name,
+// Insert leaves the scope unchanged and returns alt. Otherwise
+// it inserts obj and returns nil."
+//
+func (s *Scope) Insert(obj *Object) (alt *Object) {
+ if alt = s.Objects[obj.Name]; alt == nil {
+ s.Objects[obj.Name] = obj
+ }
+ return
+}
+
+// Debugging support
+func (s *Scope) String() string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "scope %p {", s)
+ if s != nil && len(s.Objects) > 0 {
+ fmt.Fprintln(&buf)
+ for _, obj := range s.Objects {
+ fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name)
+ }
+ }
+ fmt.Fprintf(&buf, "}\n")
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Objects
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+//
+// The Data fields contains object-specific data:
+//
+// Kind Data type Data value
+// Pkg *types.Package package scope
+// Con int iota for the respective declaration
+// Con != nil constant value
+// Typ *Scope (used as method scope during type checking - transient)
+//
+type Object struct {
+ Kind ObjKind
+ Name string // declared name
+ Decl interface{} // corresponding Field, XxxSpec, FuncDecl, LabeledStmt, AssignStmt, Scope; or nil
+ Data interface{} // object-specific data; or nil
+ Type interface{} // placeholder for type information; may be nil
+}
+
+// NewObj creates a new object of a given kind and name.
+func NewObj(kind ObjKind, name string) *Object {
+ return &Object{Kind: kind, Name: name}
+}
+
+// Pos computes the source position of the declaration of an object name.
+// The result may be an invalid position if it cannot be computed
+// (obj.Decl may be nil or not correct).
+func (obj *Object) Pos() token.Pos {
+ name := obj.Name
+ switch d := obj.Decl.(type) {
+ case *Field:
+ for _, n := range d.Names {
+ if n.Name == name {
+ return n.Pos()
+ }
+ }
+ case *ImportSpec:
+ if d.Name != nil && d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ return d.Path.Pos()
+ case *ValueSpec:
+ for _, n := range d.Names {
+ if n.Name == name {
+ return n.Pos()
+ }
+ }
+ case *TypeSpec:
+ if d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ case *FuncDecl:
+ if d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ case *LabeledStmt:
+ if d.Label.Name == name {
+ return d.Label.Pos()
+ }
+ case *AssignStmt:
+ for _, x := range d.Lhs {
+ if ident, isIdent := x.(*Ident); isIdent && ident.Name == name {
+ return ident.Pos()
+ }
+ }
+ case *Scope:
+ // predeclared object - nothing to do for now
+ }
+ return token.NoPos
+}
+
+// ObjKind describes what an object represents.
+type ObjKind int
+
+// The list of possible Object kinds.
+const (
+ Bad ObjKind = iota // for error handling
+ Pkg // package
+ Con // constant
+ Typ // type
+ Var // variable
+ Fun // function or method
+ Lbl // label
+)
+
+var objKindStrings = [...]string{
+ Bad: "bad",
+ Pkg: "package",
+ Con: "const",
+ Typ: "type",
+ Var: "var",
+ Fun: "func",
+ Lbl: "label",
+}
+
+func (kind ObjKind) String() string { return objKindStrings[kind] }
diff --git a/src/go/ast/walk.go b/src/go/ast/walk.go
new file mode 100644
index 000000000..73ac38647
--- /dev/null
+++ b/src/go/ast/walk.go
@@ -0,0 +1,386 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ast
+
+import "fmt"
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+ Visit(node Node) (w Visitor)
+}
+
+// Helper functions for common node lists. They may be empty.
+
+func walkIdentList(v Visitor, list []*Ident) {
+ for _, x := range list {
+ Walk(v, x)
+ }
+}
+
+func walkExprList(v Visitor, list []Expr) {
+ for _, x := range list {
+ Walk(v, x)
+ }
+}
+
+func walkStmtList(v Visitor, list []Stmt) {
+ for _, x := range list {
+ Walk(v, x)
+ }
+}
+
+func walkDeclList(v Visitor, list []Decl) {
+ for _, x := range list {
+ Walk(v, x)
+ }
+}
+
+// TODO(gri): Investigate if providing a closure to Walk leads to
+// simpler use (and may help eliminate Inspect in turn).
+
+// Walk traverses an AST in depth-first order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+func Walk(v Visitor, node Node) {
+ if v = v.Visit(node); v == nil {
+ return
+ }
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in ast.go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *Comment:
+ // nothing to do
+
+ case *CommentGroup:
+ for _, c := range n.List {
+ Walk(v, c)
+ }
+
+ case *Field:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ walkIdentList(v, n.Names)
+ Walk(v, n.Type)
+ if n.Tag != nil {
+ Walk(v, n.Tag)
+ }
+ if n.Comment != nil {
+ Walk(v, n.Comment)
+ }
+
+ case *FieldList:
+ for _, f := range n.List {
+ Walk(v, f)
+ }
+
+ // Expressions
+ case *BadExpr, *Ident, *BasicLit:
+ // nothing to do
+
+ case *Ellipsis:
+ if n.Elt != nil {
+ Walk(v, n.Elt)
+ }
+
+ case *FuncLit:
+ Walk(v, n.Type)
+ Walk(v, n.Body)
+
+ case *CompositeLit:
+ if n.Type != nil {
+ Walk(v, n.Type)
+ }
+ walkExprList(v, n.Elts)
+
+ case *ParenExpr:
+ Walk(v, n.X)
+
+ case *SelectorExpr:
+ Walk(v, n.X)
+ Walk(v, n.Sel)
+
+ case *IndexExpr:
+ Walk(v, n.X)
+ Walk(v, n.Index)
+
+ case *SliceExpr:
+ Walk(v, n.X)
+ if n.Low != nil {
+ Walk(v, n.Low)
+ }
+ if n.High != nil {
+ Walk(v, n.High)
+ }
+ if n.Max != nil {
+ Walk(v, n.Max)
+ }
+
+ case *TypeAssertExpr:
+ Walk(v, n.X)
+ if n.Type != nil {
+ Walk(v, n.Type)
+ }
+
+ case *CallExpr:
+ Walk(v, n.Fun)
+ walkExprList(v, n.Args)
+
+ case *StarExpr:
+ Walk(v, n.X)
+
+ case *UnaryExpr:
+ Walk(v, n.X)
+
+ case *BinaryExpr:
+ Walk(v, n.X)
+ Walk(v, n.Y)
+
+ case *KeyValueExpr:
+ Walk(v, n.Key)
+ Walk(v, n.Value)
+
+ // Types
+ case *ArrayType:
+ if n.Len != nil {
+ Walk(v, n.Len)
+ }
+ Walk(v, n.Elt)
+
+ case *StructType:
+ Walk(v, n.Fields)
+
+ case *FuncType:
+ if n.Params != nil {
+ Walk(v, n.Params)
+ }
+ if n.Results != nil {
+ Walk(v, n.Results)
+ }
+
+ case *InterfaceType:
+ Walk(v, n.Methods)
+
+ case *MapType:
+ Walk(v, n.Key)
+ Walk(v, n.Value)
+
+ case *ChanType:
+ Walk(v, n.Value)
+
+ // Statements
+ case *BadStmt:
+ // nothing to do
+
+ case *DeclStmt:
+ Walk(v, n.Decl)
+
+ case *EmptyStmt:
+ // nothing to do
+
+ case *LabeledStmt:
+ Walk(v, n.Label)
+ Walk(v, n.Stmt)
+
+ case *ExprStmt:
+ Walk(v, n.X)
+
+ case *SendStmt:
+ Walk(v, n.Chan)
+ Walk(v, n.Value)
+
+ case *IncDecStmt:
+ Walk(v, n.X)
+
+ case *AssignStmt:
+ walkExprList(v, n.Lhs)
+ walkExprList(v, n.Rhs)
+
+ case *GoStmt:
+ Walk(v, n.Call)
+
+ case *DeferStmt:
+ Walk(v, n.Call)
+
+ case *ReturnStmt:
+ walkExprList(v, n.Results)
+
+ case *BranchStmt:
+ if n.Label != nil {
+ Walk(v, n.Label)
+ }
+
+ case *BlockStmt:
+ walkStmtList(v, n.List)
+
+ case *IfStmt:
+ if n.Init != nil {
+ Walk(v, n.Init)
+ }
+ Walk(v, n.Cond)
+ Walk(v, n.Body)
+ if n.Else != nil {
+ Walk(v, n.Else)
+ }
+
+ case *CaseClause:
+ walkExprList(v, n.List)
+ walkStmtList(v, n.Body)
+
+ case *SwitchStmt:
+ if n.Init != nil {
+ Walk(v, n.Init)
+ }
+ if n.Tag != nil {
+ Walk(v, n.Tag)
+ }
+ Walk(v, n.Body)
+
+ case *TypeSwitchStmt:
+ if n.Init != nil {
+ Walk(v, n.Init)
+ }
+ Walk(v, n.Assign)
+ Walk(v, n.Body)
+
+ case *CommClause:
+ if n.Comm != nil {
+ Walk(v, n.Comm)
+ }
+ walkStmtList(v, n.Body)
+
+ case *SelectStmt:
+ Walk(v, n.Body)
+
+ case *ForStmt:
+ if n.Init != nil {
+ Walk(v, n.Init)
+ }
+ if n.Cond != nil {
+ Walk(v, n.Cond)
+ }
+ if n.Post != nil {
+ Walk(v, n.Post)
+ }
+ Walk(v, n.Body)
+
+ case *RangeStmt:
+ if n.Key != nil {
+ Walk(v, n.Key)
+ }
+ if n.Value != nil {
+ Walk(v, n.Value)
+ }
+ Walk(v, n.X)
+ Walk(v, n.Body)
+
+ // Declarations
+ case *ImportSpec:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ if n.Name != nil {
+ Walk(v, n.Name)
+ }
+ Walk(v, n.Path)
+ if n.Comment != nil {
+ Walk(v, n.Comment)
+ }
+
+ case *ValueSpec:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ walkIdentList(v, n.Names)
+ if n.Type != nil {
+ Walk(v, n.Type)
+ }
+ walkExprList(v, n.Values)
+ if n.Comment != nil {
+ Walk(v, n.Comment)
+ }
+
+ case *TypeSpec:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ Walk(v, n.Name)
+ Walk(v, n.Type)
+ if n.Comment != nil {
+ Walk(v, n.Comment)
+ }
+
+ case *BadDecl:
+ // nothing to do
+
+ case *GenDecl:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ for _, s := range n.Specs {
+ Walk(v, s)
+ }
+
+ case *FuncDecl:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ if n.Recv != nil {
+ Walk(v, n.Recv)
+ }
+ Walk(v, n.Name)
+ Walk(v, n.Type)
+ if n.Body != nil {
+ Walk(v, n.Body)
+ }
+
+ // Files and packages
+ case *File:
+ if n.Doc != nil {
+ Walk(v, n.Doc)
+ }
+ Walk(v, n.Name)
+ walkDeclList(v, n.Decls)
+ // don't walk n.Comments - they have been
+ // visited already through the individual
+ // nodes
+
+ case *Package:
+ for _, f := range n.Files {
+ Walk(v, f)
+ }
+
+ default:
+ fmt.Printf("ast.Walk: unexpected node type %T", n)
+ panic("ast.Walk")
+ }
+
+ v.Visit(nil)
+}
+
+type inspector func(Node) bool
+
+func (f inspector) Visit(node Node) Visitor {
+ if f(node) {
+ return f
+ }
+ return nil
+}
+
+// Inspect traverses an AST in depth-first order: It starts by calling
+// f(node); node must not be nil. If f returns true, Inspect invokes f
+// for all the non-nil children of node, recursively.
+//
+func Inspect(node Node, f func(Node) bool) {
+ Walk(inspector(f), node)
+}
diff --git a/src/go/build/build.go b/src/go/build/build.go
new file mode 100644
index 000000000..69cb4b2f6
--- /dev/null
+++ b/src/go/build/build.go
@@ -0,0 +1,1364 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ pathpkg "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Context specifies the supporting context for a build.
+type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go path
+ CgoEnabled bool // whether cgo can be used
+ UseAllFiles bool // use files regardless of +build lines, file names
+ Compiler string // compiler to assume when computing target paths
+
+ // The build and release tags specify build constraints
+ // that should be considered satisfied when processing +build lines.
+ // Clients creating a new context may customize BuildTags, which
+ // defaults to empty, but it is usually an error to customize ReleaseTags,
+ // which defaults to the list of Go releases the current release is compatible with.
+ // In addition to the BuildTags and ReleaseTags, build constraints
+ // consider the values of GOARCH and GOOS as satisfied tags.
+ BuildTags []string
+ ReleaseTags []string
+
+ // The install suffix specifies a suffix to use in the name of the installation
+ // directory. By default it is empty, but custom builds that need to keep
+ // their outputs separate can set InstallSuffix to do so. For example, when
+ // using the race detector, the go command uses InstallSuffix = "race", so
+ // that on a Linux/386 system, packages are written to a directory named
+ // "linux_386_race" instead of the usual "linux_386".
+ InstallSuffix string
+
+ // By default, Import uses the operating system's file system calls
+ // to read directories and files. To read from other sources,
+ // callers can set the following functions. They all have default
+ // behaviors that use the local file system, so clients need only set
+ // the functions whose behaviors they wish to change.
+
+ // JoinPath joins the sequence of path fragments into a single path.
+ // If JoinPath is nil, Import uses filepath.Join.
+ JoinPath func(elem ...string) string
+
+ // SplitPathList splits the path list into a slice of individual paths.
+ // If SplitPathList is nil, Import uses filepath.SplitList.
+ SplitPathList func(list string) []string
+
+ // IsAbsPath reports whether path is an absolute path.
+ // If IsAbsPath is nil, Import uses filepath.IsAbs.
+ IsAbsPath func(path string) bool
+
+ // IsDir reports whether the path names a directory.
+ // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+ IsDir func(path string) bool
+
+ // HasSubdir reports whether dir is a subdirectory of
+ // (perhaps multiple levels below) root.
+ // If so, HasSubdir sets rel to a slash-separated path that
+ // can be joined to root to produce a path equivalent to dir.
+ // If HasSubdir is nil, Import uses an implementation built on
+ // filepath.EvalSymlinks.
+ HasSubdir func(root, dir string) (rel string, ok bool)
+
+ // ReadDir returns a slice of os.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // If ReadDir is nil, Import uses ioutil.ReadDir.
+ ReadDir func(dir string) (fi []os.FileInfo, err error)
+
+ // OpenFile opens a file (not a directory) for reading.
+ // If OpenFile is nil, Import uses os.Open.
+ OpenFile func(path string) (r io.ReadCloser, err error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+ if f := ctxt.JoinPath; f != nil {
+ return f(elem...)
+ }
+ return filepath.Join(elem...)
+}
+
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+ if f := ctxt.SplitPathList; f != nil {
+ return f(s)
+ }
+ return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsSPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+ if f := ctxt.IsAbsPath; f != nil {
+ return f(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
+func (ctxt *Context) isDir(path string) bool {
+ if f := ctxt.IsDir; f != nil {
+ return f(path)
+ }
+ fi, err := os.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
+
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
+ }
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
+ }
+ return hasSubdir(rootSym, dirSym)
+}
+
+func hasSubdir(root, dir string) (rel string, ok bool) {
+ const sep = string(filepath.Separator)
+ root = filepath.Clean(root)
+ if !strings.HasSuffix(root, sep) {
+ root += sep
+ }
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+ return filepath.ToSlash(dir[len(root):]), true
+}
+
+// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
+func (ctxt *Context) readDir(path string) ([]os.FileInfo, error) {
+ if f := ctxt.ReadDir; f != nil {
+ return f(path)
+ }
+ return ioutil.ReadDir(path)
+}
+
+// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
+func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
+ if fn := ctxt.OpenFile; fn != nil {
+ return fn(path)
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err // nil interface
+ }
+ return f, nil
+}
+
+// isFile determines whether path is a file by trying to open it.
+// It reuses openFile instead of adding another function to the
+// list in Context.
+func (ctxt *Context) isFile(path string) bool {
+ f, err := ctxt.openFile(path)
+ if err != nil {
+ return false
+ }
+ f.Close()
+ return true
+}
+
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT.
+ // Do not get confused by this common mistake.
+ continue
+ }
+ if strings.HasPrefix(p, "~") {
+ // Path segments starting with ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
+ all = append(all, p)
+ }
+ return all
+}
+
+// SrcDirs returns a list of package source root directories.
+// It draws from the current Go root and Go path but omits directories
+// that do not exist.
+func (ctxt *Context) SrcDirs() []string {
+ var all []string
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
+ }
+ }
+ for _, p := range ctxt.gopath() {
+ dir := ctxt.joinPath(p, "src")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
+ }
+ }
+ return all
+}
+
+// Default is the default Context for builds.
+// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
+// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
+var Default Context = defaultContext()
+
+var cgoEnabled = map[string]bool{
+ "darwin/386": true,
+ "darwin/amd64": true,
+ "dragonfly/386": true,
+ "dragonfly/amd64": true,
+ "freebsd/386": true,
+ "freebsd/amd64": true,
+ "freebsd/arm": true,
+ "linux/386": true,
+ "linux/amd64": true,
+ "linux/arm": true,
+ "android/386": true,
+ "android/amd64": true,
+ "android/arm": true,
+ "netbsd/386": true,
+ "netbsd/amd64": true,
+ "netbsd/arm": true,
+ "openbsd/386": true,
+ "openbsd/amd64": true,
+ "windows/386": true,
+ "windows/amd64": true,
+}
+
+func defaultContext() Context {
+ var c Context
+
+ c.GOARCH = envOr("GOARCH", runtime.GOARCH)
+ c.GOOS = envOr("GOOS", runtime.GOOS)
+ c.GOROOT = runtime.GOROOT()
+ c.GOPATH = envOr("GOPATH", "")
+ c.Compiler = runtime.Compiler
+
+ // Each major Go release in the Go 1.x series should add a tag here.
+ // Old tags should not be removed. That is, the go1.x tag is present
+ // in all releases >= Go 1.x. Code that requires Go 1.x or later should
+ // say "+build go1.x", and code that should only be built before Go 1.x
+ // (perhaps it is the stub to use in that case) should say "+build !go1.x".
+ //
+ // When we reach Go 1.4 the line will read
+ // c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4"}
+ // and so on.
+ c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3"}
+
+ switch os.Getenv("CGO_ENABLED") {
+ case "1":
+ c.CgoEnabled = true
+ case "0":
+ c.CgoEnabled = false
+ default:
+ // cgo must be explicitly enabled for cross compilation builds
+ if runtime.GOARCH == c.GOARCH && runtime.GOOS == c.GOOS {
+ c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH]
+ break
+ }
+ c.CgoEnabled = false
+ }
+
+ return c
+}
+
+func envOr(name, def string) string {
+ s := os.Getenv(name)
+ if s == "" {
+ return def
+ }
+ return s
+}
+
+// An ImportMode controls the behavior of the Import method.
+type ImportMode uint
+
+const (
+ // If FindOnly is set, Import stops after locating the directory
+ // that should contain the sources for a package. It does not
+ // read any files in the directory.
+ FindOnly ImportMode = 1 << iota
+
+ // If AllowBinary is set, Import can be satisfied by a compiled
+ // package object without corresponding sources.
+ AllowBinary
+
+ // If ImportComment is set, parse import comments on package statements.
+ // Import returns an error if it finds a comment it cannot understand
+ // or finds conflicting comments in multiple source files.
+ // See golang.org/s/go14customimport for more information.
+ ImportComment
+)
+
+// A Package describes the Go package found in a directory.
+type Package struct {
+ Dir string // directory containing package sources
+ Name string // package name
+ ImportComment string // path in import comment on package statement
+ Doc string // documentation synopsis
+ ImportPath string // import path of package ("" if unknown)
+ Root string // root of Go tree where this package lives
+ SrcRoot string // package source root directory ("" if unknown)
+ PkgRoot string // package install root directory ("" if unknown)
+ BinDir string // command install directory ("" if unknown)
+ Goroot bool // package found in Go root
+ PkgObj string // installed .a file
+ AllTags []string // tags that can influence file selection in this directory
+ ConflictDir string // this directory shadows Dir in $GOPATH
+
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ IgnoredGoFiles []string // .go source files ignored for this build
+ CFiles []string // .c source files
+ CXXFiles []string // .cc, .cpp and .cxx source files
+ MFiles []string // .m (Objective-C) source files
+ HFiles []string // .h, .hh, .hpp and .hxx source files
+ SFiles []string // .s source files
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
+ SysoFiles []string // .syso system object files to add to archive
+
+ // Cgo directives
+ CgoCFLAGS []string // Cgo CFLAGS directives
+ CgoCPPFLAGS []string // Cgo CPPFLAGS directives
+ CgoCXXFLAGS []string // Cgo CXXFLAGS directives
+ CgoLDFLAGS []string // Cgo LDFLAGS directives
+ CgoPkgConfig []string // Cgo pkg-config directives
+
+ // Dependency information
+ Imports []string // imports from GoFiles, CgoFiles
+ ImportPos map[string][]token.Position // line information for Imports
+
+ // Test information
+ TestGoFiles []string // _test.go files in package
+ TestImports []string // imports from TestGoFiles
+ TestImportPos map[string][]token.Position // line information for TestImports
+ XTestGoFiles []string // _test.go files outside package
+ XTestImports []string // imports from XTestGoFiles
+ XTestImportPos map[string][]token.Position // line information for XTestImports
+}
+
+// IsCommand reports whether the package is considered a
+// command to be installed (not just a library).
+// Packages named "main" are treated as commands.
+func (p *Package) IsCommand() bool {
+ return p.Name == "main"
+}
+
+// ImportDir is like Import but processes the Go package found in
+// the named directory.
+func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return ctxt.Import(".", dir, mode)
+}
+
+// NoGoError is the error used by Import to describe a directory
+// containing no buildable Go source files. (It may still contain
+// test files, files hidden by build tags, and so on.)
+type NoGoError struct {
+ Dir string
+}
+
+func (e *NoGoError) Error() string {
+ return "no buildable Go source files in " + e.Dir
+}
+
+func nameExt(name string) string {
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ return ""
+ }
+ return name[i:]
+}
+
+// Import returns details about the Go package named by the import path,
+// interpreting local import paths relative to the srcDir directory.
+// If the path is a local import path naming a package that can be imported
+// using a standard import path, the returned package will set p.ImportPath
+// to that path.
+//
+// In the directory containing the package, .go, .c, .h, and .s files are
+// considered part of the package except for:
+//
+// - .go files in package documentation
+// - files starting with _ or . (likely editor temporary files)
+// - files with build constraints not satisfied by the context
+//
+// If an error occurs, Import returns a non-nil error and a non-nil
+// *Package containing partial information.
+//
+func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
+ p := &Package{
+ ImportPath: path,
+ }
+ if path == "" {
+ return p, fmt.Errorf("import %q: invalid import path", path)
+ }
+
+ var pkga string
+ var pkgerr error
+ switch ctxt.Compiler {
+ case "gccgo":
+ dir, elem := pathpkg.Split(p.ImportPath)
+ pkga = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + "/" + dir + "lib" + elem + ".a"
+ case "gc":
+ suffix := ""
+ if ctxt.InstallSuffix != "" {
+ suffix = "_" + ctxt.InstallSuffix
+ }
+ pkga = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + "/" + p.ImportPath + ".a"
+ default:
+ // Save error for end of function.
+ pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
+ }
+
+ binaryOnly := false
+ if IsLocalImport(path) {
+ pkga = "" // local imports have no installed path
+ if srcDir == "" {
+ return p, fmt.Errorf("import %q: import relative to unknown directory", path)
+ }
+ if !ctxt.isAbsPath(path) {
+ p.Dir = ctxt.joinPath(srcDir, path)
+ }
+ // Determine canonical import path, if any.
+ if ctxt.GOROOT != "" {
+ root := ctxt.joinPath(ctxt.GOROOT, "src")
+ if sub, ok := ctxt.hasSubdir(root, p.Dir); ok {
+ p.Goroot = true
+ p.ImportPath = sub
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ }
+ all := ctxt.gopath()
+ for i, root := range all {
+ rootsrc := ctxt.joinPath(root, "src")
+ if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok {
+ // We found a potential import path for dir,
+ // but check that using it wouldn't find something
+ // else first.
+ if ctxt.GOROOT != "" {
+ if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
+ p.ConflictDir = dir
+ goto Found
+ }
+ }
+ for _, earlyRoot := range all[:i] {
+ if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
+ p.ConflictDir = dir
+ goto Found
+ }
+ }
+
+ // sub would not name some other directory instead of this one.
+ // Record it.
+ p.ImportPath = sub
+ p.Root = root
+ goto Found
+ }
+ }
+ // It's okay that we didn't find a root containing dir.
+ // Keep going with the information we have.
+ } else {
+ if strings.HasPrefix(path, "/") {
+ return p, fmt.Errorf("import %q: cannot import absolute path", path)
+ }
+
+ // tried records the location of unsuccessful package lookups
+ var tried struct {
+ goroot string
+ gopath []string
+ }
+
+ // Determine directory from import path.
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Goroot = true
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ tried.goroot = dir
+ }
+ for _, root := range ctxt.gopath() {
+ dir := ctxt.joinPath(root, "src", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Root = root
+ goto Found
+ }
+ tried.gopath = append(tried.gopath, dir)
+ }
+
+ // package was not found
+ var paths []string
+ if tried.goroot != "" {
+ paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
+ } else {
+ paths = append(paths, "\t($GOROOT not set)")
+ }
+ var i int
+ var format = "\t%s (from $GOPATH)"
+ for ; i < len(tried.gopath); i++ {
+ if i > 0 {
+ format = "\t%s"
+ }
+ paths = append(paths, fmt.Sprintf(format, tried.gopath[i]))
+ }
+ if i == 0 {
+ paths = append(paths, "\t($GOPATH not set)")
+ }
+ return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
+ }
+
+Found:
+ if p.Root != "" {
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
+ }
+ }
+
+ if mode&FindOnly != 0 {
+ return p, pkgerr
+ }
+ if binaryOnly && (mode&AllowBinary) != 0 {
+ return p, pkgerr
+ }
+
+ dirs, err := ctxt.readDir(p.Dir)
+ if err != nil {
+ return p, err
+ }
+
+ var Sfiles []string // files with ".S" (capital S)
+ var firstFile, firstCommentFile string
+ imported := make(map[string][]token.Position)
+ testImported := make(map[string][]token.Position)
+ xTestImported := make(map[string][]token.Position)
+ allTags := make(map[string]bool)
+ fset := token.NewFileSet()
+ for _, d := range dirs {
+ if d.IsDir() {
+ continue
+ }
+
+ name := d.Name()
+ ext := nameExt(name)
+
+ match, data, filename, err := ctxt.matchFile(p.Dir, name, true, allTags)
+ if err != nil {
+ return p, err
+ }
+ if !match {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
+ case ".c":
+ p.CFiles = append(p.CFiles, name)
+ continue
+ case ".cc", ".cpp", ".cxx":
+ p.CXXFiles = append(p.CXXFiles, name)
+ continue
+ case ".m":
+ p.MFiles = append(p.MFiles, name)
+ continue
+ case ".h", ".hh", ".hpp", ".hxx":
+ p.HFiles = append(p.HFiles, name)
+ continue
+ case ".s":
+ p.SFiles = append(p.SFiles, name)
+ continue
+ case ".S":
+ Sfiles = append(Sfiles, name)
+ continue
+ case ".swig":
+ p.SwigFiles = append(p.SwigFiles, name)
+ continue
+ case ".swigcxx":
+ p.SwigCXXFiles = append(p.SwigCXXFiles, name)
+ continue
+ case ".syso":
+ // binary objects to add to package archive
+ // Likely of the form foo_windows.syso, but
+ // the name was vetted above with goodOSArchFile.
+ p.SysoFiles = append(p.SysoFiles, name)
+ continue
+ }
+
+ pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
+ if err != nil {
+ return p, err
+ }
+
+ pkg := pf.Name.Name
+ if pkg == "documentation" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ continue
+ }
+
+ isTest := strings.HasSuffix(name, "_test.go")
+ isXTest := false
+ if isTest && strings.HasSuffix(pkg, "_test") {
+ isXTest = true
+ pkg = pkg[:len(pkg)-len("_test")]
+ }
+
+ if p.Name == "" {
+ p.Name = pkg
+ firstFile = name
+ } else if pkg != p.Name {
+ return p, fmt.Errorf("found packages %s (%s) and %s (%s) in %s", p.Name, firstFile, pkg, name, p.Dir)
+ }
+ if pf.Doc != nil && p.Doc == "" {
+ p.Doc = doc.Synopsis(pf.Doc.Text())
+ }
+
+ if mode&ImportComment != 0 {
+ qcom, line := findImportComment(data)
+ if line != 0 {
+ com, err := strconv.Unquote(qcom)
+ if err != nil {
+ return p, fmt.Errorf("%s:%d: cannot parse import comment", filename, line)
+ }
+ if p.ImportComment == "" {
+ p.ImportComment = com
+ firstCommentFile = name
+ } else if p.ImportComment != com {
+ return p, fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir)
+ }
+ }
+ }
+
+ // Record imports and information about cgo.
+ isCgo := false
+ for _, decl := range pf.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
+ }
+ if isXTest {
+ xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos()))
+ } else if isTest {
+ testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
+ } else {
+ imported[path] = append(imported[path], fset.Position(spec.Pos()))
+ }
+ if path == "C" {
+ if isTest {
+ return p, fmt.Errorf("use of cgo in test %s not supported", filename)
+ }
+ cg := spec.Doc
+ if cg == nil && len(d.Specs) == 1 {
+ cg = d.Doc
+ }
+ if cg != nil {
+ if err := ctxt.saveCgo(filename, p, cg); err != nil {
+ return p, err
+ }
+ }
+ isCgo = true
+ }
+ }
+ }
+ if isCgo {
+ allTags["cgo"] = true
+ if ctxt.CgoEnabled {
+ p.CgoFiles = append(p.CgoFiles, name)
+ } else {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
+ } else if isXTest {
+ p.XTestGoFiles = append(p.XTestGoFiles, name)
+ } else if isTest {
+ p.TestGoFiles = append(p.TestGoFiles, name)
+ } else {
+ p.GoFiles = append(p.GoFiles, name)
+ }
+ }
+ if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ return p, &NoGoError{p.Dir}
+ }
+
+ for tag := range allTags {
+ p.AllTags = append(p.AllTags, tag)
+ }
+ sort.Strings(p.AllTags)
+
+ p.Imports, p.ImportPos = cleanImports(imported)
+ p.TestImports, p.TestImportPos = cleanImports(testImported)
+ p.XTestImports, p.XTestImportPos = cleanImports(xTestImported)
+
+ // add the .S files only if we are using cgo
+ // (which means gcc will compile them).
+ // The standard assemblers expect .s files.
+ if len(p.CgoFiles) > 0 {
+ p.SFiles = append(p.SFiles, Sfiles...)
+ sort.Strings(p.SFiles)
+ }
+
+ return p, pkgerr
+}
+
+func findImportComment(data []byte) (s string, line int) {
+ // expect keyword package
+ word, data := parseWord(data)
+ if string(word) != "package" {
+ return "", 0
+ }
+
+ // expect package name
+ _, data = parseWord(data)
+
+ // now ready for import comment, a // or /* */ comment
+ // beginning and ending on the current line.
+ for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
+ data = data[1:]
+ }
+
+ var comment []byte
+ switch {
+ case bytes.HasPrefix(data, slashSlash):
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ i = len(data)
+ }
+ comment = data[2:i]
+ case bytes.HasPrefix(data, slashStar):
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ // malformed comment
+ return "", 0
+ }
+ comment = data[:i]
+ if bytes.Contains(comment, newline) {
+ return "", 0
+ }
+ }
+ comment = bytes.TrimSpace(comment)
+
+ // split comment into `import`, `"pkg"`
+ word, arg := parseWord(comment)
+ if string(word) != "import" {
+ return "", 0
+ }
+
+ line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
+ return strings.TrimSpace(string(arg)), line
+}
+
+var (
+ slashSlash = []byte("//")
+ slashStar = []byte("/*")
+ starSlash = []byte("*/")
+ newline = []byte("\n")
+)
+
+// skipSpaceOrComment returns data with any leading spaces or comments removed.
+func skipSpaceOrComment(data []byte) []byte {
+ for len(data) > 0 {
+ switch data[0] {
+ case ' ', '\t', '\r', '\n':
+ data = data[1:]
+ continue
+ case '/':
+ if bytes.HasPrefix(data, slashSlash) {
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+1:]
+ continue
+ }
+ if bytes.HasPrefix(data, slashStar) {
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+2:]
+ continue
+ }
+ }
+ break
+ }
+ return data
+}
+
+// parseWord skips any leading spaces or comments in data
+// and then parses the beginning of data as an identifier or keyword,
+// returning that word and what remains after the word.
+func parseWord(data []byte) (word, rest []byte) {
+ data = skipSpaceOrComment(data)
+
+ // Parse past leading word characters.
+ rest = data
+ for {
+ r, size := utf8.DecodeRune(rest)
+ if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
+ rest = rest[size:]
+ continue
+ }
+ break
+ }
+
+ word = data[:len(data)-len(rest)]
+ if len(word) == 0 {
+ return nil, nil
+ }
+
+ return word, rest
+}
+
+// MatchFile reports whether the file with the given name in the given directory
+// matches the context and would be included in a Package created by ImportDir
+// of that directory.
+//
+// MatchFile considers the name of the file and may use ctxt.OpenFile to
+// read some or all of the file's content.
+func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) {
+ match, _, _, err = ctxt.matchFile(dir, name, false, nil)
+ return
+}
+
+// matchFile determines whether the file with the given name in the given directory
+// should be included in the package being constructed.
+// It returns the data read from the file.
+// If returnImports is true and name denotes a Go program, matchFile reads
+// until the end of the imports (and returns that data) even though it only
+// considers text until the first non-comment.
+// If allTags is non-nil, matchFile records any encountered build tag
+// by setting allTags[tag] = true.
+func (ctxt *Context) matchFile(dir, name string, returnImports bool, allTags map[string]bool) (match bool, data []byte, filename string, err error) {
+ if strings.HasPrefix(name, "_") ||
+ strings.HasPrefix(name, ".") {
+ return
+ }
+
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ i = len(name)
+ }
+ ext := name[i:]
+
+ if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
+ return
+ }
+
+ switch ext {
+ case ".go", ".c", ".cc", ".cxx", ".cpp", ".m", ".s", ".h", ".hh", ".hpp", ".hxx", ".S", ".swig", ".swigcxx":
+ // tentatively okay - read to make sure
+ case ".syso":
+ // binary, no reading
+ match = true
+ return
+ default:
+ // skip
+ return
+ }
+
+ filename = ctxt.joinPath(dir, name)
+ f, err := ctxt.openFile(filename)
+ if err != nil {
+ return
+ }
+
+ if strings.HasSuffix(filename, ".go") {
+ data, err = readImports(f, false)
+ } else {
+ data, err = readComments(f)
+ }
+ f.Close()
+ if err != nil {
+ err = fmt.Errorf("read %s: %v", filename, err)
+ return
+ }
+
+ // Look for +build comments to accept or reject the file.
+ if !ctxt.shouldBuild(data, allTags) && !ctxt.UseAllFiles {
+ return
+ }
+
+ match = true
+ return
+}
+
+func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+ all := make([]string, 0, len(m))
+ for path := range m {
+ all = append(all, path)
+ }
+ sort.Strings(all)
+ return all, m
+}
+
+// Import is shorthand for Default.Import.
+func Import(path, srcDir string, mode ImportMode) (*Package, error) {
+ return Default.Import(path, srcDir, mode)
+}
+
+// ImportDir is shorthand for Default.ImportDir.
+func ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return Default.ImportDir(dir, mode)
+}
+
+var slashslash = []byte("//")
+
+// shouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+// // +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool) bool {
+ // Pass 1. Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ end := 0
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 { // Blank line
+ end = len(content) - len(p)
+ continue
+ }
+ if !bytes.HasPrefix(line, slashslash) { // Not comment line
+ break
+ }
+ }
+ content = content[:end]
+
+ // Pass 2. Process each line in the run.
+ p = content
+ allok := true
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if bytes.HasPrefix(line, slashslash) {
+ line = bytes.TrimSpace(line[len(slashslash):])
+ if len(line) > 0 && line[0] == '+' {
+ // Looks like a comment +line.
+ f := strings.Fields(string(line))
+ if f[0] == "+build" {
+ ok := false
+ for _, tok := range f[1:] {
+ if ctxt.match(tok, allTags) {
+ ok = true
+ }
+ }
+ if !ok {
+ allok = false
+ }
+ }
+ }
+ }
+ }
+
+ return allok
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
+// that affect the way cgo's C code is built.
+//
+// TODO(rsc): This duplicates code in cgo.
+// Once the dust settles, remove this code from cgo.
+func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
+ text := cg.Text()
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line = strings.TrimSpace(line[4:])
+ i := strings.Index(line, ":")
+ if i < 0 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ line, argstr := line[:i], line[i+1:]
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.match(c, nil) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ for _, arg := range args {
+ if !safeCgoName(arg) {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "CPPFLAGS":
+ di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
+ case "CXXFLAGS":
+ di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
+}
+
+// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
+// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
+// See golang.org/issue/6038.
+var safeBytes = []byte("+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$")
+
+func safeCgoName(s string) bool {
+ if s == "" {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < 0x80 && bytes.IndexByte(safeBytes, c) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// match returns true if the name is one of:
+//
+// $GOOS
+// $GOARCH
+// cgo (if cgo is enabled)
+// !cgo (if cgo is disabled)
+// ctxt.Compiler
+// !ctxt.Compiler
+// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+// !tag (if tag is not listed in ctxt.BuildTags or ctxt.ReleaseTags)
+// a comma-separated list of any of these
+//
+func (ctxt *Context) match(name string, allTags map[string]bool) bool {
+ if name == "" {
+ if allTags != nil {
+ allTags[name] = true
+ }
+ return false
+ }
+ if i := strings.Index(name, ","); i >= 0 {
+ // comma-separated list
+ ok1 := ctxt.match(name[:i], allTags)
+ ok2 := ctxt.match(name[i+1:], allTags)
+ return ok1 && ok2
+ }
+ if strings.HasPrefix(name, "!!") { // bad syntax, reject always
+ return false
+ }
+ if strings.HasPrefix(name, "!") { // negation
+ return len(name) > 1 && !ctxt.match(name[1:], allTags)
+ }
+
+ if allTags != nil {
+ allTags[name] = true
+ }
+
+ // Tags must be letters, digits, underscores or dots.
+ // Unlike in Go identifiers, all digits are fine (e.g., "386").
+ for _, c := range name {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
+ return false
+ }
+ }
+
+ // special tags
+ if ctxt.CgoEnabled && name == "cgo" {
+ return true
+ }
+ if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
+ return true
+ }
+ if ctxt.GOOS == "android" && name == "linux" {
+ return true
+ }
+
+ // other tags
+ for _, tag := range ctxt.BuildTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ReleaseTags {
+ if tag == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+// An exception: if GOOS=android, then files with GOOS=linux are also matched.
+func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
+ if dot := strings.Index(name, "."); dot != -1 {
+ name = name[:dot]
+ }
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-2]] = true
+ allTags[l[n-1]] = true
+ }
+ if l[n-1] != ctxt.GOARCH {
+ return false
+ }
+ if ctxt.GOOS == "android" && l[n-2] == "linux" {
+ return true
+ }
+ return l[n-2] == ctxt.GOOS
+ }
+ if n >= 1 && knownOS[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-1]] = true
+ }
+ if ctxt.GOOS == "android" && l[n-1] == "linux" {
+ return true
+ }
+ return l[n-1] == ctxt.GOOS
+ }
+ if n >= 1 && knownArch[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-1]] = true
+ }
+ return l[n-1] == ctxt.GOARCH
+ }
+ return true
+}
+
+var knownOS = make(map[string]bool)
+var knownArch = make(map[string]bool)
+
+func init() {
+ for _, v := range strings.Fields(goosList) {
+ knownOS[v] = true
+ }
+ for _, v := range strings.Fields(goarchList) {
+ knownArch[v] = true
+ }
+}
+
+// ToolDir is the directory containing build tools.
+var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
+
+// IsLocalImport reports whether the import path is
+// a local import path, like ".", "..", "./foo", or "../foo".
+func IsLocalImport(path string) bool {
+ return path == "." || path == ".." ||
+ strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
+}
+
+// ArchChar returns the architecture character for the given goarch.
+// For example, ArchChar("amd64") returns "6".
+func ArchChar(goarch string) (string, error) {
+ switch goarch {
+ case "386":
+ return "8", nil
+ case "amd64", "amd64p32":
+ return "6", nil
+ case "arm":
+ return "5", nil
+ }
+ return "", errors.New("unsupported GOARCH " + goarch)
+}
diff --git a/src/go/build/build_test.go b/src/go/build/build_test.go
new file mode 100644
index 000000000..004010113
--- /dev/null
+++ b/src/go/build/build_test.go
@@ -0,0 +1,205 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestMatch(t *testing.T) {
+ ctxt := Default
+ what := "default"
+ match := func(tag string, want map[string]bool) {
+ m := make(map[string]bool)
+ if !ctxt.match(tag, m) {
+ t.Errorf("%s context should match %s, does not", what, tag)
+ }
+ if !reflect.DeepEqual(m, want) {
+ t.Errorf("%s tags = %v, want %v", tag, m, want)
+ }
+ }
+ nomatch := func(tag string, want map[string]bool) {
+ m := make(map[string]bool)
+ if ctxt.match(tag, m) {
+ t.Errorf("%s context should NOT match %s, does", what, tag)
+ }
+ if !reflect.DeepEqual(m, want) {
+ t.Errorf("%s tags = %v, want %v", tag, m, want)
+ }
+ }
+
+ match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true})
+ match(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
+ nomatch(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
+
+ what = "modified"
+ ctxt.BuildTags = []string{"foo"}
+ match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true})
+ match(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
+ nomatch(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
+ match(runtime.GOOS+","+runtime.GOARCH+",!bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true})
+ nomatch(runtime.GOOS+","+runtime.GOARCH+",bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true})
+ nomatch("!", map[string]bool{})
+}
+
+func TestDotSlashImport(t *testing.T) {
+ p, err := ImportDir("testdata/other", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(p.Imports) != 1 || p.Imports[0] != "./file" {
+ t.Fatalf("testdata/other: Imports=%v, want [./file]", p.Imports)
+ }
+
+ p1, err := Import("./file", "testdata/other", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p1.Name != "file" {
+ t.Fatalf("./file: Name=%q, want %q", p1.Name, "file")
+ }
+ dir := filepath.Clean("testdata/other/file") // Clean to use \ on Windows
+ if p1.Dir != dir {
+ t.Fatalf("./file: Dir=%q, want %q", p1.Name, dir)
+ }
+}
+
+func TestEmptyImport(t *testing.T) {
+ p, err := Import("", Default.GOROOT, FindOnly)
+ if err == nil {
+ t.Fatal(`Import("") returned nil error.`)
+ }
+ if p == nil {
+ t.Fatal(`Import("") returned nil package.`)
+ }
+ if p.ImportPath != "" {
+ t.Fatalf("ImportPath=%q, want %q.", p.ImportPath, "")
+ }
+}
+
+func TestLocalDirectory(t *testing.T) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p, err := ImportDir(cwd, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p.ImportPath != "go/build" {
+ t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "go/build")
+ }
+}
+
+func TestShouldBuild(t *testing.T) {
+ const file1 = "// +build tag1\n\n" +
+ "package main\n"
+ want1 := map[string]bool{"tag1": true}
+
+ const file2 = "// +build cgo\n\n" +
+ "// This package implements parsing of tags like\n" +
+ "// +build tag1\n" +
+ "package build"
+ want2 := map[string]bool{"cgo": true}
+
+ const file3 = "// Copyright The Go Authors.\n\n" +
+ "package build\n\n" +
+ "// shouldBuild checks tags given by lines of the form\n" +
+ "// +build tag\n" +
+ "func shouldBuild(content []byte)\n"
+ want3 := map[string]bool{}
+
+ ctx := &Context{BuildTags: []string{"tag1"}}
+ m := map[string]bool{}
+ if !ctx.shouldBuild([]byte(file1), m) {
+ t.Errorf("shouldBuild(file1) = false, want true")
+ }
+ if !reflect.DeepEqual(m, want1) {
+ t.Errorf("shoudBuild(file1) tags = %v, want %v", m, want1)
+ }
+
+ m = map[string]bool{}
+ if ctx.shouldBuild([]byte(file2), m) {
+ t.Errorf("shouldBuild(file2) = true, want fakse")
+ }
+ if !reflect.DeepEqual(m, want2) {
+ t.Errorf("shoudBuild(file2) tags = %v, want %v", m, want2)
+ }
+
+ m = map[string]bool{}
+ ctx = &Context{BuildTags: nil}
+ if !ctx.shouldBuild([]byte(file3), m) {
+ t.Errorf("shouldBuild(file3) = false, want true")
+ }
+ if !reflect.DeepEqual(m, want3) {
+ t.Errorf("shoudBuild(file3) tags = %v, want %v", m, want3)
+ }
+}
+
+type readNopCloser struct {
+ io.Reader
+}
+
+func (r readNopCloser) Close() error {
+ return nil
+}
+
+var (
+ ctxtP9 = Context{GOARCH: "arm", GOOS: "plan9"}
+ ctxtAndroid = Context{GOARCH: "arm", GOOS: "android"}
+)
+
+var matchFileTests = []struct {
+ ctxt Context
+ name string
+ data string
+ match bool
+}{
+ {ctxtP9, "foo_arm.go", "", true},
+ {ctxtP9, "foo1_arm.go", "// +build linux\n\npackage main\n", false},
+ {ctxtP9, "foo_darwin.go", "", false},
+ {ctxtP9, "foo.go", "", true},
+ {ctxtP9, "foo1.go", "// +build linux\n\npackage main\n", false},
+ {ctxtP9, "foo.badsuffix", "", false},
+ {ctxtAndroid, "foo_linux.go", "", true},
+ {ctxtAndroid, "foo_android.go", "", true},
+ {ctxtAndroid, "foo_plan9.go", "", false},
+}
+
+func TestMatchFile(t *testing.T) {
+ for _, tt := range matchFileTests {
+ ctxt := tt.ctxt
+ ctxt.OpenFile = func(path string) (r io.ReadCloser, err error) {
+ if path != "x+"+tt.name {
+ t.Fatalf("OpenFile asked for %q, expected %q", path, "x+"+tt.name)
+ }
+ return &readNopCloser{strings.NewReader(tt.data)}, nil
+ }
+ ctxt.JoinPath = func(elem ...string) string {
+ return strings.Join(elem, "+")
+ }
+ match, err := ctxt.MatchFile("x", tt.name)
+ if match != tt.match || err != nil {
+ t.Fatalf("MatchFile(%q) = %v, %v, want %v, nil", tt.name, match, err, tt.match)
+ }
+ }
+}
+
+func TestImportCmd(t *testing.T) {
+ p, err := Import("cmd/internal/objfile", "", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !strings.HasSuffix(filepath.ToSlash(p.Dir), "src/cmd/internal/objfile") {
+ t.Fatalf("Import cmd/internal/objfile returned Dir=%q, want %q", filepath.ToSlash(p.Dir), ".../src/cmd/internal/objfile")
+ }
+}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
new file mode 100644
index 000000000..b74595ea8
--- /dev/null
+++ b/src/go/build/deps_test.go
@@ -0,0 +1,443 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file exercises the import parser but also checks that
+// some low-level packages do not have new dependencies added.
+
+package build
+
+import (
+ "runtime"
+ "sort"
+ "testing"
+)
+
+// pkgDeps defines the expected dependencies between packages in
+// the Go source tree. It is a statement of policy.
+// Changes should not be made to this map without prior discussion.
+//
+// The map contains two kinds of entries:
+// 1) Lower-case keys are standard import paths and list the
+// allowed imports in that package.
+// 2) Upper-case keys define aliases for package sets, which can then
+// be used as dependencies by other rules.
+//
+// DO NOT CHANGE THIS DATA TO FIX BUILDS.
+//
+var pkgDeps = map[string][]string{
+ // L0 is the lowest level, core, nearly unavoidable packages.
+ "errors": {},
+ "io": {"errors", "sync"},
+ "runtime": {"unsafe"},
+ "sync": {"runtime", "sync/atomic", "unsafe"},
+ "sync/atomic": {"unsafe"},
+ "unsafe": {},
+
+ "L0": {
+ "errors",
+ "io",
+ "runtime",
+ "sync",
+ "sync/atomic",
+ "unsafe",
+ },
+
+ // L1 adds simple functions and strings processing,
+ // but not Unicode tables.
+ "math": {"unsafe"},
+ "math/cmplx": {"math"},
+ "math/rand": {"L0", "math"},
+ "sort": {},
+ "strconv": {"L0", "unicode/utf8", "math"},
+ "unicode/utf16": {},
+ "unicode/utf8": {},
+
+ "L1": {
+ "L0",
+ "math",
+ "math/cmplx",
+ "math/rand",
+ "sort",
+ "strconv",
+ "unicode/utf16",
+ "unicode/utf8",
+ },
+
+ // L2 adds Unicode and strings processing.
+ "bufio": {"L0", "unicode/utf8", "bytes"},
+ "bytes": {"L0", "unicode", "unicode/utf8"},
+ "path": {"L0", "unicode/utf8", "strings"},
+ "strings": {"L0", "unicode", "unicode/utf8"},
+ "unicode": {},
+
+ "L2": {
+ "L1",
+ "bufio",
+ "bytes",
+ "path",
+ "strings",
+ "unicode",
+ },
+
+ // L3 adds reflection and some basic utility packages
+ // and interface definitions, but nothing that makes
+ // system calls.
+ "crypto": {"L2", "hash"}, // interfaces
+ "crypto/cipher": {"L2", "crypto/subtle"}, // interfaces
+ "crypto/subtle": {},
+ "encoding/base32": {"L2"},
+ "encoding/base64": {"L2"},
+ "encoding/binary": {"L2", "reflect"},
+ "hash": {"L2"}, // interfaces
+ "hash/adler32": {"L2", "hash"},
+ "hash/crc32": {"L2", "hash"},
+ "hash/crc64": {"L2", "hash"},
+ "hash/fnv": {"L2", "hash"},
+ "image": {"L2", "image/color"}, // interfaces
+ "image/color": {"L2"}, // interfaces
+ "image/color/palette": {"L2", "image/color"},
+ "reflect": {"L2"},
+
+ "L3": {
+ "L2",
+ "crypto",
+ "crypto/cipher",
+ "crypto/subtle",
+ "encoding/base32",
+ "encoding/base64",
+ "encoding/binary",
+ "hash",
+ "hash/adler32",
+ "hash/crc32",
+ "hash/crc64",
+ "hash/fnv",
+ "image",
+ "image/color",
+ "image/color/palette",
+ "reflect",
+ },
+
+ // End of linear dependency definitions.
+
+ // Operating system access.
+ "syscall": {"L0", "unicode/utf16"},
+ "time": {"L0", "syscall"},
+ "os": {"L1", "os", "syscall", "time"},
+ "path/filepath": {"L2", "os", "syscall"},
+ "io/ioutil": {"L2", "os", "path/filepath", "time"},
+ "os/exec": {"L2", "os", "path/filepath", "syscall"},
+ "os/signal": {"L2", "os", "syscall"},
+
+ // OS enables basic operating system functionality,
+ // but not direct use of package syscall, nor os/signal.
+ "OS": {
+ "io/ioutil",
+ "os",
+ "os/exec",
+ "path/filepath",
+ "time",
+ },
+
+ // Formatted I/O: few dependencies (L1) but we must add reflect.
+ "fmt": {"L1", "os", "reflect"},
+ "log": {"L1", "os", "fmt", "time"},
+
+ // Packages used by testing must be low-level (L2+fmt).
+ "regexp": {"L2", "regexp/syntax"},
+ "regexp/syntax": {"L2"},
+ "runtime/debug": {"L2", "fmt", "io/ioutil", "os", "time"},
+ "runtime/pprof": {"L2", "fmt", "text/tabwriter"},
+ "text/tabwriter": {"L2"},
+
+ "testing": {"L2", "flag", "fmt", "os", "runtime/pprof", "time"},
+ "testing/iotest": {"L2", "log"},
+ "testing/quick": {"L2", "flag", "fmt", "reflect"},
+
+ // L4 is defined as L3+fmt+log+time, because in general once
+ // you're using L3 packages, use of fmt, log, or time is not a big deal.
+ "L4": {
+ "L3",
+ "fmt",
+ "log",
+ "time",
+ },
+
+ // Go parser.
+ "go/ast": {"L4", "OS", "go/scanner", "go/token"},
+ "go/doc": {"L4", "go/ast", "go/token", "regexp", "text/template"},
+ "go/parser": {"L4", "OS", "go/ast", "go/scanner", "go/token"},
+ "go/printer": {"L4", "OS", "go/ast", "go/scanner", "go/token", "text/tabwriter"},
+ "go/scanner": {"L4", "OS", "go/token"},
+ "go/token": {"L4"},
+
+ "GOPARSER": {
+ "go/ast",
+ "go/doc",
+ "go/parser",
+ "go/printer",
+ "go/scanner",
+ "go/token",
+ },
+
+ // One of a kind.
+ "archive/tar": {"L4", "OS", "syscall"},
+ "archive/zip": {"L4", "OS", "compress/flate"},
+ "compress/bzip2": {"L4"},
+ "compress/flate": {"L4"},
+ "compress/gzip": {"L4", "compress/flate"},
+ "compress/lzw": {"L4"},
+ "compress/zlib": {"L4", "compress/flate"},
+ "database/sql": {"L4", "container/list", "database/sql/driver"},
+ "database/sql/driver": {"L4", "time"},
+ "debug/dwarf": {"L4"},
+ "debug/elf": {"L4", "OS", "debug/dwarf"},
+ "debug/gosym": {"L4"},
+ "debug/macho": {"L4", "OS", "debug/dwarf"},
+ "debug/pe": {"L4", "OS", "debug/dwarf"},
+ "encoding": {"L4"},
+ "encoding/ascii85": {"L4"},
+ "encoding/asn1": {"L4", "math/big"},
+ "encoding/csv": {"L4"},
+ "encoding/gob": {"L4", "OS", "encoding"},
+ "encoding/hex": {"L4"},
+ "encoding/json": {"L4", "encoding"},
+ "encoding/pem": {"L4"},
+ "encoding/xml": {"L4", "encoding"},
+ "flag": {"L4", "OS"},
+ "go/build": {"L4", "OS", "GOPARSER"},
+ "html": {"L4"},
+ "image/draw": {"L4"},
+ "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
+ "image/jpeg": {"L4"},
+ "image/png": {"L4", "compress/zlib"},
+ "index/suffixarray": {"L4", "regexp"},
+ "math/big": {"L4"},
+ "mime": {"L4", "OS", "syscall"},
+ "net/url": {"L4"},
+ "text/scanner": {"L4", "OS"},
+ "text/template/parse": {"L4"},
+
+ "html/template": {
+ "L4", "OS", "encoding/json", "html", "text/template",
+ "text/template/parse",
+ },
+ "text/template": {
+ "L4", "OS", "net/url", "text/template/parse",
+ },
+
+ // Cgo.
+ "runtime/cgo": {"L0", "C"},
+ "CGO": {"C", "runtime/cgo"},
+
+ // Fake entry to satisfy the pseudo-import "C"
+ // that shows up in programs that use cgo.
+ "C": {},
+
+ // Plan 9 alone needs io/ioutil and os.
+ "os/user": {"L4", "CGO", "io/ioutil", "os", "syscall"},
+
+ // Basic networking.
+ // Because net must be used by any package that wants to
+ // do networking portably, it must have a small dependency set: just L1+basic os.
+ "net": {"L1", "CGO", "os", "syscall", "time"},
+
+ // NET enables use of basic network-related packages.
+ "NET": {
+ "net",
+ "mime",
+ "net/textproto",
+ "net/url",
+ },
+
+ // Uses of networking.
+ "log/syslog": {"L4", "OS", "net"},
+ "net/mail": {"L4", "NET", "OS"},
+ "net/textproto": {"L4", "OS", "net"},
+
+ // Core crypto.
+ "crypto/aes": {"L3"},
+ "crypto/des": {"L3"},
+ "crypto/hmac": {"L3"},
+ "crypto/md5": {"L3"},
+ "crypto/rc4": {"L3"},
+ "crypto/sha1": {"L3"},
+ "crypto/sha256": {"L3"},
+ "crypto/sha512": {"L3"},
+
+ "CRYPTO": {
+ "crypto/aes",
+ "crypto/des",
+ "crypto/hmac",
+ "crypto/md5",
+ "crypto/rc4",
+ "crypto/sha1",
+ "crypto/sha256",
+ "crypto/sha512",
+ },
+
+ // Random byte, number generation.
+ // This would be part of core crypto except that it imports
+ // math/big, which imports fmt.
+ "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "internal/syscall"},
+
+ // Mathematical crypto: dependencies on fmt (L4) and math/big.
+ // We could avoid some of the fmt, but math/big imports fmt anyway.
+ "crypto/dsa": {"L4", "CRYPTO", "math/big"},
+ "crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big", "encoding/asn1"},
+ "crypto/elliptic": {"L4", "CRYPTO", "math/big"},
+ "crypto/rsa": {"L4", "CRYPTO", "crypto/rand", "math/big"},
+
+ "CRYPTO-MATH": {
+ "CRYPTO",
+ "crypto/dsa",
+ "crypto/ecdsa",
+ "crypto/elliptic",
+ "crypto/rand",
+ "crypto/rsa",
+ "encoding/asn1",
+ "math/big",
+ },
+
+ // SSL/TLS.
+ "crypto/tls": {
+ "L4", "CRYPTO-MATH", "CGO", "OS",
+ "container/list", "crypto/x509", "encoding/pem", "net", "syscall",
+ },
+ "crypto/x509": {
+ "L4", "CRYPTO-MATH", "OS", "CGO",
+ "crypto/x509/pkix", "encoding/pem", "encoding/hex", "net", "syscall",
+ },
+ "crypto/x509/pkix": {"L4", "CRYPTO-MATH"},
+
+ // Simple net+crypto-aware packages.
+ "mime/multipart": {"L4", "OS", "mime", "crypto/rand", "net/textproto"},
+ "net/smtp": {"L4", "CRYPTO", "NET", "crypto/tls"},
+
+ // HTTP, kingpin of dependencies.
+ "net/http": {
+ "L4", "NET", "OS",
+ "compress/gzip", "crypto/tls", "mime/multipart", "runtime/debug",
+ "net/http/internal",
+ },
+
+ // HTTP-using packages.
+ "expvar": {"L4", "OS", "encoding/json", "net/http"},
+ "net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"},
+ "net/http/fcgi": {"L4", "NET", "OS", "net/http", "net/http/cgi"},
+ "net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http"},
+ "net/http/httputil": {"L4", "NET", "OS", "net/http", "net/http/internal"},
+ "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof"},
+ "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"},
+ "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
+}
+
+// isMacro reports whether p is a package dependency macro
+// (uppercase name).
+func isMacro(p string) bool {
+ return 'A' <= p[0] && p[0] <= 'Z'
+}
+
+func allowed(pkg string) map[string]bool {
+ m := map[string]bool{}
+ var allow func(string)
+ allow = func(p string) {
+ if m[p] {
+ return
+ }
+ m[p] = true // set even for macros, to avoid loop on cycle
+
+ // Upper-case names are macro-expanded.
+ if isMacro(p) {
+ for _, pp := range pkgDeps[p] {
+ allow(pp)
+ }
+ }
+ }
+ for _, pp := range pkgDeps[pkg] {
+ allow(pp)
+ }
+ return m
+}
+
+var bools = []bool{false, true}
+var geese = []string{"android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows"}
+var goarches = []string{"386", "amd64", "arm"}
+
+type osPkg struct {
+ goos, pkg string
+}
+
+// allowedErrors are the operating systems and packages known to contain errors
+// (currently just "no Go source files")
+var allowedErrors = map[osPkg]bool{
+ osPkg{"windows", "log/syslog"}: true,
+ osPkg{"plan9", "log/syslog"}: true,
+}
+
+func TestDependencies(t *testing.T) {
+ if runtime.GOOS == "nacl" {
+ // NaCl tests run in a limited file system and we do not
+ // provide access to every source file.
+ t.Skip("skipping on NaCl")
+ }
+ var all []string
+
+ for k := range pkgDeps {
+ all = append(all, k)
+ }
+ sort.Strings(all)
+
+ ctxt := Default
+ test := func(mustImport bool) {
+ for _, pkg := range all {
+ if isMacro(pkg) {
+ continue
+ }
+ if pkg == "runtime/cgo" && !ctxt.CgoEnabled {
+ continue
+ }
+ p, err := ctxt.Import(pkg, "", 0)
+ if err != nil {
+ if allowedErrors[osPkg{ctxt.GOOS, pkg}] {
+ continue
+ }
+ if !ctxt.CgoEnabled && pkg == "runtime/cgo" {
+ continue
+ }
+ // Some of the combinations we try might not
+ // be reasonable (like arm,plan9,cgo), so ignore
+ // errors for the auto-generated combinations.
+ if !mustImport {
+ continue
+ }
+ t.Errorf("%s/%s/cgo=%v %v", ctxt.GOOS, ctxt.GOARCH, ctxt.CgoEnabled, err)
+ continue
+ }
+ ok := allowed(pkg)
+ var bad []string
+ for _, imp := range p.Imports {
+ if !ok[imp] {
+ bad = append(bad, imp)
+ }
+ }
+ if bad != nil {
+ t.Errorf("%s/%s/cgo=%v unexpected dependency: %s imports %v", ctxt.GOOS, ctxt.GOARCH, ctxt.CgoEnabled, pkg, bad)
+ }
+ }
+ }
+ test(true)
+
+ if testing.Short() {
+ t.Logf("skipping other systems")
+ return
+ }
+
+ for _, ctxt.GOOS = range geese {
+ for _, ctxt.GOARCH = range goarches {
+ for _, ctxt.CgoEnabled = range bools {
+ test(false)
+ }
+ }
+ }
+}
diff --git a/src/go/build/doc.go b/src/go/build/doc.go
new file mode 100644
index 000000000..d78ef3f1c
--- /dev/null
+++ b/src/go/build/doc.go
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package build gathers information about Go packages.
+//
+// Go Path
+//
+// The Go path is a list of directory trees containing Go source code.
+// It is consulted to resolve imports that cannot be found in the standard
+// Go tree. The default path is the value of the GOPATH environment
+// variable, interpreted as a path list appropriate to the operating system
+// (on Unix, the variable is a colon-separated string;
+// on Windows, a semicolon-separated string;
+// on Plan 9, a list).
+//
+// Each directory listed in the Go path must have a prescribed structure:
+//
+// The src/ directory holds source code. The path below 'src' determines
+// the import path or executable name.
+//
+// The pkg/ directory holds installed package objects.
+// As in the Go tree, each target operating system and
+// architecture pair has its own subdirectory of pkg
+// (pkg/GOOS_GOARCH).
+//
+// If DIR is a directory listed in the Go path, a package with
+// source in DIR/src/foo/bar can be imported as "foo/bar" and
+// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
+// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
+//
+// The bin/ directory holds compiled commands.
+// Each command is named for its source directory, but only
+// using the final element, not the entire path. That is, the
+// command with source in DIR/src/foo/quux is installed into
+// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
+// so that you can add DIR/bin to your PATH to get at the
+// installed commands.
+//
+// Here's an example directory layout:
+//
+// GOPATH=/home/user/gocode
+//
+// /home/user/gocode/
+// src/
+// foo/
+// bar/ (go code in package bar)
+// x.go
+// quux/ (go code in package main)
+// y.go
+// bin/
+// quux (installed command)
+// pkg/
+// linux_amd64/
+// foo/
+// bar.a (installed package object)
+//
+// Build Constraints
+//
+// A build constraint, also known as a build tag, is a line comment that begins
+//
+// // +build
+//
+// that lists the conditions under which a file should be included in the package.
+// Constraints may appear in any kind of source file (not just Go), but
+// they must appear near the top of the file, preceded
+// only by blank lines and other line comments. These rules mean that in Go
+// files a build constraint must appear before the package clause.
+//
+// To distinguish build constraints from package documentation, a series of
+// build constraints must be followed by a blank line.
+//
+// A build constraint is evaluated as the OR of space-separated options;
+// each option evaluates as the AND of its comma-separated terms;
+// and each term is an alphanumeric word or, preceded by !, its negation.
+// That is, the build constraint:
+//
+// // +build linux,386 darwin,!cgo
+//
+// corresponds to the boolean formula:
+//
+// (linux AND 386) OR (darwin AND (NOT cgo))
+//
+// A file may have multiple build constraints. The overall constraint is the AND
+// of the individual constraints. That is, the build constraints:
+//
+// // +build linux darwin
+// // +build 386
+//
+// corresponds to the boolean formula:
+//
+// (linux OR darwin) AND 386
+//
+// During a particular build, the following words are satisfied:
+//
+// - the target operating system, as spelled by runtime.GOOS
+// - the target architecture, as spelled by runtime.GOARCH
+// - the compiler being used, either "gc" or "gccgo"
+// - "cgo", if ctxt.CgoEnabled is true
+// - "go1.1", from Go version 1.1 onward
+// - "go1.2", from Go version 1.2 onward
+// - "go1.3", from Go version 1.3 onward
+// - any additional words listed in ctxt.BuildTags
+//
+// If a file's name, after stripping the extension and a possible _test suffix,
+// matches any of the following patterns:
+// *_GOOS
+// *_GOARCH
+// *_GOOS_GOARCH
+// (example: source_windows_amd64.go) or the literals:
+// GOOS
+// GOARCH
+// (example: windows.go) where GOOS and GOARCH represent any known operating
+// system and architecture values respectively, then the file is considered to
+// have an implicit build constraint requiring those terms.
+//
+// To keep a file from being considered for the build:
+//
+// // +build ignore
+//
+// (any other unsatisfied word will work as well, but ``ignore'' is conventional.)
+//
+// To build a file only when using cgo, and only on Linux and OS X:
+//
+// // +build linux,cgo darwin,cgo
+//
+// Such a file is usually paired with another file implementing the
+// default functionality for other systems, which in this case would
+// carry the constraint:
+//
+// // +build !linux,!darwin !cgo
+//
+// Naming a file dns_windows.go will cause it to be included only when
+// building the package for Windows; similarly, math_386.s will be included
+// only when building the package for 32-bit x86.
+//
+// Using GOOS=android matches build tags and files as for GOOS=linux
+// in addition to android tags and files.
+//
+package build
diff --git a/src/go/build/read.go b/src/go/build/read.go
new file mode 100644
index 000000000..c8079dfd1
--- /dev/null
+++ b/src/go/build/read.go
@@ -0,0 +1,238 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "bufio"
+ "errors"
+ "io"
+)
+
+type importReader struct {
+ b *bufio.Reader
+ buf []byte
+ peek byte
+ err error
+ eof bool
+ nerr int
+}
+
+func isIdent(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= 0x80
+}
+
+var (
+ errSyntax = errors.New("syntax error")
+ errNUL = errors.New("unexpected NUL in input")
+)
+
+// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
+func (r *importReader) syntaxError() {
+ if r.err == nil {
+ r.err = errSyntax
+ }
+}
+
+// readByte reads the next byte from the input, saves it in buf, and returns it.
+// If an error occurs, readByte records the error in r.err and returns 0.
+func (r *importReader) readByte() byte {
+ c, err := r.b.ReadByte()
+ if err == nil {
+ r.buf = append(r.buf, c)
+ if c == 0 {
+ err = errNUL
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ r.eof = true
+ } else if r.err == nil {
+ r.err = err
+ }
+ c = 0
+ }
+ return c
+}
+
+// peekByte returns the next byte from the input reader but does not advance beyond it.
+// If skipSpace is set, peekByte skips leading spaces and comments.
+func (r *importReader) peekByte(skipSpace bool) byte {
+ if r.err != nil {
+ if r.nerr++; r.nerr > 10000 {
+ panic("go/build: import reader looping")
+ }
+ return 0
+ }
+
+ // Use r.peek as first input byte.
+ // Don't just return r.peek here: it might have been left by peekByte(false)
+ // and this might be peekByte(true).
+ c := r.peek
+ if c == 0 {
+ c = r.readByte()
+ }
+ for r.err == nil && !r.eof {
+ if skipSpace {
+ // For the purposes of this reader, semicolons are never necessary to
+ // understand the input and are treated as spaces.
+ switch c {
+ case ' ', '\f', '\t', '\r', '\n', ';':
+ c = r.readByte()
+ continue
+
+ case '/':
+ c = r.readByte()
+ if c == '/' {
+ for c != '\n' && r.err == nil && !r.eof {
+ c = r.readByte()
+ }
+ } else if c == '*' {
+ var c1 byte
+ for (c != '*' || c1 != '/') && r.err == nil {
+ if r.eof {
+ r.syntaxError()
+ }
+ c, c1 = c1, r.readByte()
+ }
+ } else {
+ r.syntaxError()
+ }
+ c = r.readByte()
+ continue
+ }
+ }
+ break
+ }
+ r.peek = c
+ return r.peek
+}
+
+// nextByte is like peekByte but advances beyond the returned byte.
+func (r *importReader) nextByte(skipSpace bool) byte {
+ c := r.peekByte(skipSpace)
+ r.peek = 0
+ return c
+}
+
+// readKeyword reads the given keyword from the input.
+// If the keyword is not present, readKeyword records a syntax error.
+func (r *importReader) readKeyword(kw string) {
+ r.peekByte(true)
+ for i := 0; i < len(kw); i++ {
+ if r.nextByte(false) != kw[i] {
+ r.syntaxError()
+ return
+ }
+ }
+ if isIdent(r.peekByte(false)) {
+ r.syntaxError()
+ }
+}
+
+// readIdent reads an identifier from the input.
+// If an identifier is not present, readIdent records a syntax error.
+func (r *importReader) readIdent() {
+ c := r.peekByte(true)
+ if !isIdent(c) {
+ r.syntaxError()
+ return
+ }
+ for isIdent(r.peekByte(false)) {
+ r.peek = 0
+ }
+}
+
+// readString reads a quoted string literal from the input.
+// If an identifier is not present, readString records a syntax error.
+func (r *importReader) readString() {
+ switch r.nextByte(true) {
+ case '`':
+ for r.err == nil {
+ if r.nextByte(false) == '`' {
+ break
+ }
+ if r.eof {
+ r.syntaxError()
+ }
+ }
+ case '"':
+ for r.err == nil {
+ c := r.nextByte(false)
+ if c == '"' {
+ break
+ }
+ if r.eof || c == '\n' {
+ r.syntaxError()
+ }
+ if c == '\\' {
+ r.nextByte(false)
+ }
+ }
+ default:
+ r.syntaxError()
+ }
+}
+
+// readImport reads an import clause - optional identifier followed by quoted string -
+// from the input.
+func (r *importReader) readImport() {
+ c := r.peekByte(true)
+ if c == '.' {
+ r.peek = 0
+ } else if isIdent(c) {
+ r.readIdent()
+ }
+ r.readString()
+}
+
+// readComments is like ioutil.ReadAll, except that it only reads the leading
+// block of comments in the file.
+func readComments(f io.Reader) ([]byte, error) {
+ r := &importReader{b: bufio.NewReader(f)}
+ r.peekByte(true)
+ if r.err == nil && !r.eof {
+ // Didn't reach EOF, so must have found a non-space byte. Remove it.
+ r.buf = r.buf[:len(r.buf)-1]
+ }
+ return r.buf, r.err
+}
+
+// readImports is like ioutil.ReadAll, except that it expects a Go file as input
+// and stops reading the input once the imports have completed.
+func readImports(f io.Reader, reportSyntaxError bool) ([]byte, error) {
+ r := &importReader{b: bufio.NewReader(f)}
+
+ r.readKeyword("package")
+ r.readIdent()
+ for r.peekByte(true) == 'i' {
+ r.readKeyword("import")
+ if r.peekByte(true) == '(' {
+ r.nextByte(false)
+ for r.peekByte(true) != ')' && r.err == nil {
+ r.readImport()
+ }
+ r.nextByte(false)
+ } else {
+ r.readImport()
+ }
+ }
+
+ // If we stopped successfully before EOF, we read a byte that told us we were done.
+ // Return all but that last byte, which would cause a syntax error if we let it through.
+ if r.err == nil && !r.eof {
+ return r.buf[:len(r.buf)-1], nil
+ }
+
+ // If we stopped for a syntax error, consume the whole file so that
+ // we are sure we don't change the errors that go/parser returns.
+ if r.err == errSyntax && !reportSyntaxError {
+ r.err = nil
+ for r.err == nil && !r.eof {
+ r.readByte()
+ }
+ }
+
+ return r.buf, r.err
+}
diff --git a/src/go/build/read_test.go b/src/go/build/read_test.go
new file mode 100644
index 000000000..2dcc1208f
--- /dev/null
+++ b/src/go/build/read_test.go
@@ -0,0 +1,226 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "io"
+ "strings"
+ "testing"
+)
+
+const quote = "`"
+
+type readTest struct {
+ // Test input contains ℙ where readImports should stop.
+ in string
+ err string
+}
+
+var readImportsTests = []readTest{
+ {
+ `package p`,
+ "",
+ },
+ {
+ `package p; import "x"`,
+ "",
+ },
+ {
+ `package p; import . "x"`,
+ "",
+ },
+ {
+ `package p; import "x";ℙvar x = 1`,
+ "",
+ },
+ {
+ `package p
+
+ // comment
+
+ import "x"
+ import _ "x"
+ import a "x"
+
+ /* comment */
+
+ import (
+ "x" /* comment */
+ _ "x"
+ a "x" // comment
+ ` + quote + `x` + quote + `
+ _ /*comment*/ ` + quote + `x` + quote + `
+ a ` + quote + `x` + quote + `
+ )
+ import (
+ )
+ import ()
+ import()import()import()
+ import();import();import()
+
+ ℙvar x = 1
+ `,
+ "",
+ },
+}
+
+var readCommentsTests = []readTest{
+ {
+ `ℙpackage p`,
+ "",
+ },
+ {
+ `ℙpackage p; import "x"`,
+ "",
+ },
+ {
+ `ℙpackage p; import . "x"`,
+ "",
+ },
+ {
+ `// foo
+
+ /* bar */
+
+ /* quux */ // baz
+
+ /*/ zot */
+
+ // asdf
+ ℙHello, world`,
+ "",
+ },
+}
+
+func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) {
+ for i, tt := range tests {
+ var in, testOut string
+ j := strings.Index(tt.in, "ℙ")
+ if j < 0 {
+ in = tt.in
+ testOut = tt.in
+ } else {
+ in = tt.in[:j] + tt.in[j+len("ℙ"):]
+ testOut = tt.in[:j]
+ }
+ r := strings.NewReader(in)
+ buf, err := read(r)
+ if err != nil {
+ if tt.err == "" {
+ t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf))
+ continue
+ }
+ if !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("#%d: err=%q, expected %q", i, err, tt.err)
+ continue
+ }
+ continue
+ }
+ if err == nil && tt.err != "" {
+ t.Errorf("#%d: success, expected %q", i, tt.err)
+ continue
+ }
+
+ out := string(buf)
+ if out != testOut {
+ t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut)
+ }
+ }
+}
+
+func TestReadImports(t *testing.T) {
+ testRead(t, readImportsTests, func(r io.Reader) ([]byte, error) { return readImports(r, true) })
+}
+
+func TestReadComments(t *testing.T) {
+ testRead(t, readCommentsTests, readComments)
+}
+
+var readFailuresTests = []readTest{
+ {
+ `package`,
+ "syntax error",
+ },
+ {
+ "package p\n\x00\nimport `math`\n",
+ "unexpected NUL in input",
+ },
+ {
+ `package p; import`,
+ "syntax error",
+ },
+ {
+ `package p; import "`,
+ "syntax error",
+ },
+ {
+ "package p; import ` \n\n",
+ "syntax error",
+ },
+ {
+ `package p; import "x`,
+ "syntax error",
+ },
+ {
+ `package p; import _`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "`,
+ "syntax error",
+ },
+ {
+ `package p; import _ "x`,
+ "syntax error",
+ },
+ {
+ `package p; import .`,
+ "syntax error",
+ },
+ {
+ `package p; import . "`,
+ "syntax error",
+ },
+ {
+ `package p; import . "x`,
+ "syntax error",
+ },
+ {
+ `package p; import (`,
+ "syntax error",
+ },
+ {
+ `package p; import ("`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x`,
+ "syntax error",
+ },
+ {
+ `package p; import ("x"`,
+ "syntax error",
+ },
+}
+
+func TestReadFailures(t *testing.T) {
+ // Errors should be reported (true arg to readImports).
+ testRead(t, readFailuresTests, func(r io.Reader) ([]byte, error) { return readImports(r, true) })
+}
+
+func TestReadFailuresIgnored(t *testing.T) {
+ // Syntax errors should not be reported (false arg to readImports).
+ // Instead, entire file should be the output and no error.
+ // Convert tests not to return syntax errors.
+ tests := make([]readTest, len(readFailuresTests))
+ copy(tests, readFailuresTests)
+ for i := range tests {
+ tt := &tests[i]
+ if !strings.Contains(tt.err, "NUL") {
+ tt.err = ""
+ }
+ }
+ testRead(t, tests, func(r io.Reader) ([]byte, error) { return readImports(r, false) })
+}
diff --git a/src/go/build/syslist.go b/src/go/build/syslist.go
new file mode 100644
index 000000000..965f873df
--- /dev/null
+++ b/src/go/build/syslist.go
@@ -0,0 +1,8 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+const goosList = "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows "
+const goarchList = "386 amd64 amd64p32 arm "
diff --git a/src/go/build/syslist_test.go b/src/go/build/syslist_test.go
new file mode 100644
index 000000000..3be2928f5
--- /dev/null
+++ b/src/go/build/syslist_test.go
@@ -0,0 +1,62 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "runtime"
+ "testing"
+)
+
+var (
+ thisOS = runtime.GOOS
+ thisArch = runtime.GOARCH
+ otherOS = anotherOS()
+ otherArch = anotherArch()
+)
+
+func anotherOS() string {
+ if thisOS != "darwin" {
+ return "darwin"
+ }
+ return "linux"
+}
+
+func anotherArch() string {
+ if thisArch != "amd64" {
+ return "amd64"
+ }
+ return "386"
+}
+
+type GoodFileTest struct {
+ name string
+ result bool
+}
+
+var tests = []GoodFileTest{
+ {"file.go", true},
+ {"file.c", true},
+ {"file_foo.go", true},
+ {"file_" + thisArch + ".go", true},
+ {"file_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".go", true},
+ {"file_" + otherOS + ".go", false},
+ {"file_" + thisOS + "_" + thisArch + ".go", true},
+ {"file_" + otherOS + "_" + thisArch + ".go", false},
+ {"file_" + thisOS + "_" + otherArch + ".go", false},
+ {"file_" + otherOS + "_" + otherArch + ".go", false},
+ {"file_foo_" + thisArch + ".go", true},
+ {"file_foo_" + otherArch + ".go", false},
+ {"file_" + thisOS + ".c", true},
+ {"file_" + otherOS + ".c", false},
+}
+
+func TestGoodOSArch(t *testing.T) {
+ for _, test := range tests {
+ if Default.goodOSArchFile(test.name, make(map[string]bool)) != test.result {
+ t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result)
+ }
+ }
+}
diff --git a/src/go/build/testdata/other/file/file.go b/src/go/build/testdata/other/file/file.go
new file mode 100644
index 000000000..bbfd3e9e5
--- /dev/null
+++ b/src/go/build/testdata/other/file/file.go
@@ -0,0 +1,5 @@
+// Test data - not compiled.
+
+package file
+
+func F() {}
diff --git a/src/go/build/testdata/other/main.go b/src/go/build/testdata/other/main.go
new file mode 100644
index 000000000..e0904357c
--- /dev/null
+++ b/src/go/build/testdata/other/main.go
@@ -0,0 +1,11 @@
+// Test data - not compiled.
+
+package main
+
+import (
+ "./file"
+)
+
+func main() {
+ file.F()
+}
diff --git a/src/go/doc/Makefile b/src/go/doc/Makefile
new file mode 100644
index 000000000..ca4948f91
--- /dev/null
+++ b/src/go/doc/Makefile
@@ -0,0 +1,7 @@
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# Script to test heading detection heuristic
+headscan: headscan.go
+ go build headscan.go
diff --git a/src/go/doc/comment.go b/src/go/doc/comment.go
new file mode 100644
index 000000000..f414ca409
--- /dev/null
+++ b/src/go/doc/comment.go
@@ -0,0 +1,480 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Godoc comment extraction and comment -> HTML formatting.
+
+package doc
+
+import (
+ "io"
+ "regexp"
+ "strings"
+ "text/template" // for HTMLEscape
+ "unicode"
+ "unicode/utf8"
+)
+
+var (
+ ldquo = []byte("&ldquo;")
+ rdquo = []byte("&rdquo;")
+)
+
+// Escape comment text for HTML. If nice is set,
+// also turn `` into &ldquo; and '' into &rdquo;.
+func commentEscape(w io.Writer, text string, nice bool) {
+ last := 0
+ if nice {
+ for i := 0; i < len(text)-1; i++ {
+ ch := text[i]
+ if ch == text[i+1] && (ch == '`' || ch == '\'') {
+ template.HTMLEscape(w, []byte(text[last:i]))
+ last = i + 2
+ switch ch {
+ case '`':
+ w.Write(ldquo)
+ case '\'':
+ w.Write(rdquo)
+ }
+ i++ // loop will add one more
+ }
+ }
+ }
+ template.HTMLEscape(w, []byte(text[last:]))
+}
+
+const (
+ // Regexp for Go identifiers
+ identRx = `[\pL_][\pL_0-9]*`
+
+ // Regexp for URLs
+ protocol = `https?|ftp|file|gopher|mailto|news|nntp|telnet|wais|prospero`
+ hostPart = `[a-zA-Z0-9_@\-]+`
+ filePart = `[a-zA-Z0-9_?%#~&/\-+=()]+` // parentheses may not be matching; see pairedParensPrefixLen
+ urlRx = `(` + protocol + `)://` + // http://
+ hostPart + `([.:]` + hostPart + `)*/?` + // //www.google.com:8080/
+ filePart + `([:.,]` + filePart + `)*`
+)
+
+var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)
+
+var (
+ html_a = []byte(`<a href="`)
+ html_aq = []byte(`">`)
+ html_enda = []byte("</a>")
+ html_i = []byte("<i>")
+ html_endi = []byte("</i>")
+ html_p = []byte("<p>\n")
+ html_endp = []byte("</p>\n")
+ html_pre = []byte("<pre>")
+ html_endpre = []byte("</pre>\n")
+ html_h = []byte(`<h3 id="`)
+ html_hq = []byte(`">`)
+ html_endh = []byte("</h3>\n")
+)
+
+// pairedParensPrefixLen returns the length of the longest prefix of s containing paired parentheses.
+func pairedParensPrefixLen(s string) int {
+ parens := 0
+ l := len(s)
+ for i, ch := range s {
+ switch ch {
+ case '(':
+ if parens == 0 {
+ l = i
+ }
+ parens++
+ case ')':
+ parens--
+ if parens == 0 {
+ l = len(s)
+ } else if parens < 0 {
+ return i
+ }
+ }
+ }
+ return l
+}
+
+// Emphasize and escape a line of text for HTML. URLs are converted into links;
+// if the URL also appears in the words map, the link is taken from the map (if
+// the corresponding map value is the empty string, the URL is not converted
+// into a link). Go identifiers that appear in the words map are italicized; if
+// the corresponding map value is not the empty string, it is considered a URL
+// and the word is converted into a link. If nice is set, the remaining text's
+// appearance is improved where it makes sense (e.g., `` is turned into &ldquo;
+// and '' into &rdquo;).
+func emphasize(w io.Writer, line string, words map[string]string, nice bool) {
+ for {
+ m := matchRx.FindStringSubmatchIndex(line)
+ if m == nil {
+ break
+ }
+ // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)
+
+ // write text before match
+ commentEscape(w, line[0:m[0]], nice)
+
+ // adjust match if necessary
+ match := line[m[0]:m[1]]
+ if n := pairedParensPrefixLen(match); n < len(match) {
+ // match contains unpaired parentheses (rare);
+ // redo matching with shortened line for correct indices
+ m = matchRx.FindStringSubmatchIndex(line[:m[0]+n])
+ match = match[:n]
+ }
+
+ // analyze match
+ url := ""
+ italics := false
+ if words != nil {
+ url, italics = words[match]
+ }
+ if m[2] >= 0 {
+ // match against first parenthesized sub-regexp; must be match against urlRx
+ if !italics {
+ // no alternative URL in words list, use match instead
+ url = match
+ }
+ italics = false // don't italicize URLs
+ }
+
+ // write match
+ if len(url) > 0 {
+ w.Write(html_a)
+ template.HTMLEscape(w, []byte(url))
+ w.Write(html_aq)
+ }
+ if italics {
+ w.Write(html_i)
+ }
+ commentEscape(w, match, nice)
+ if italics {
+ w.Write(html_endi)
+ }
+ if len(url) > 0 {
+ w.Write(html_enda)
+ }
+
+ // advance
+ line = line[m[1]:]
+ }
+ commentEscape(w, line, nice)
+}
+
+func indentLen(s string) int {
+ i := 0
+ for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
+ i++
+ }
+ return i
+}
+
+func isBlank(s string) bool {
+ return len(s) == 0 || (len(s) == 1 && s[0] == '\n')
+}
+
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return a[0:i]
+}
+
+func unindent(block []string) {
+ if len(block) == 0 {
+ return
+ }
+
+ // compute maximum common white prefix
+ prefix := block[0][0:indentLen(block[0])]
+ for _, line := range block {
+ if !isBlank(line) {
+ prefix = commonPrefix(prefix, line[0:indentLen(line)])
+ }
+ }
+ n := len(prefix)
+
+ // remove
+ for i, line := range block {
+ if !isBlank(line) {
+ block[i] = line[n:]
+ }
+ }
+}
+
+// heading returns the trimmed line if it passes as a section heading;
+// otherwise it returns the empty string.
+func heading(line string) string {
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ return ""
+ }
+
+ // a heading must start with an uppercase letter
+ r, _ := utf8.DecodeRuneInString(line)
+ if !unicode.IsLetter(r) || !unicode.IsUpper(r) {
+ return ""
+ }
+
+ // it must end in a letter or digit:
+ r, _ = utf8.DecodeLastRuneInString(line)
+ if !unicode.IsLetter(r) && !unicode.IsDigit(r) {
+ return ""
+ }
+
+ // exclude lines with illegal characters
+ if strings.IndexAny(line, ",.;:!?+*/=()[]{}_^°&§~%#@<\">\\") >= 0 {
+ return ""
+ }
+
+ // allow "'" for possessive "'s" only
+ for b := line; ; {
+ i := strings.IndexRune(b, '\'')
+ if i < 0 {
+ break
+ }
+ if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {
+ return "" // not followed by "s "
+ }
+ b = b[i+2:]
+ }
+
+ return line
+}
+
+type op int
+
+const (
+ opPara op = iota
+ opHead
+ opPre
+)
+
+type block struct {
+ op op
+ lines []string
+}
+
+var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`)
+
+func anchorID(line string) string {
+ // Add a "hdr-" prefix to avoid conflicting with IDs used for package symbols.
+ return "hdr-" + nonAlphaNumRx.ReplaceAllString(line, "_")
+}
+
+// ToHTML converts comment text to formatted HTML.
+// The comment was prepared by DocReader,
+// so it is known not to have leading, trailing blank lines
+// nor to have trailing spaces at the end of lines.
+// The comment markers have already been removed.
+//
+// Each span of unindented non-blank lines is converted into
+// a single paragraph. There is one exception to the rule: a span that
+// consists of a single line, is followed by another paragraph span,
+// begins with a capital letter, and contains no punctuation
+// is formatted as a heading.
+//
+// A span of indented lines is converted into a <pre> block,
+// with the common indent prefix removed.
+//
+// URLs in the comment text are converted into links; if the URL also appears
+// in the words map, the link is taken from the map (if the corresponding map
+// value is the empty string, the URL is not converted into a link).
+//
+// Go identifiers that appear in the words map are italicized; if the corresponding
+// map value is not the empty string, it is considered a URL and the word is converted
+// into a link.
+func ToHTML(w io.Writer, text string, words map[string]string) {
+ for _, b := range blocks(text) {
+ switch b.op {
+ case opPara:
+ w.Write(html_p)
+ for _, line := range b.lines {
+ emphasize(w, line, words, true)
+ }
+ w.Write(html_endp)
+ case opHead:
+ w.Write(html_h)
+ id := ""
+ for _, line := range b.lines {
+ if id == "" {
+ id = anchorID(line)
+ w.Write([]byte(id))
+ w.Write(html_hq)
+ }
+ commentEscape(w, line, true)
+ }
+ if id == "" {
+ w.Write(html_hq)
+ }
+ w.Write(html_endh)
+ case opPre:
+ w.Write(html_pre)
+ for _, line := range b.lines {
+ emphasize(w, line, nil, false)
+ }
+ w.Write(html_endpre)
+ }
+ }
+}
+
+func blocks(text string) []block {
+ var (
+ out []block
+ para []string
+
+ lastWasBlank = false
+ lastWasHeading = false
+ )
+
+ close := func() {
+ if para != nil {
+ out = append(out, block{opPara, para})
+ para = nil
+ }
+ }
+
+ lines := strings.SplitAfter(text, "\n")
+ unindent(lines)
+ for i := 0; i < len(lines); {
+ line := lines[i]
+ if isBlank(line) {
+ // close paragraph
+ close()
+ i++
+ lastWasBlank = true
+ continue
+ }
+ if indentLen(line) > 0 {
+ // close paragraph
+ close()
+
+ // count indented or blank lines
+ j := i + 1
+ for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {
+ j++
+ }
+ // but not trailing blank lines
+ for j > i && isBlank(lines[j-1]) {
+ j--
+ }
+ pre := lines[i:j]
+ i = j
+
+ unindent(pre)
+
+ // put those lines in a pre block
+ out = append(out, block{opPre, pre})
+ lastWasHeading = false
+ continue
+ }
+
+ if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
+ isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
+ // current line is non-blank, surrounded by blank lines
+ // and the next non-blank line is not indented: this
+ // might be a heading.
+ if head := heading(line); head != "" {
+ close()
+ out = append(out, block{opHead, []string{head}})
+ i += 2
+ lastWasHeading = true
+ continue
+ }
+ }
+
+ // open paragraph
+ lastWasBlank = false
+ lastWasHeading = false
+ para = append(para, lines[i])
+ i++
+ }
+ close()
+
+ return out
+}
+
+// ToText prepares comment text for presentation in textual output.
+// It wraps paragraphs of text to width or fewer Unicode code points
+// and then prefixes each line with the indent. In preformatted sections
+// (such as program text), it prefixes each non-blank line with preIndent.
+func ToText(w io.Writer, text string, indent, preIndent string, width int) {
+ l := lineWrapper{
+ out: w,
+ width: width,
+ indent: indent,
+ }
+ for _, b := range blocks(text) {
+ switch b.op {
+ case opPara:
+ // l.write will add leading newline if required
+ for _, line := range b.lines {
+ l.write(line)
+ }
+ l.flush()
+ case opHead:
+ w.Write(nl)
+ for _, line := range b.lines {
+ l.write(line + "\n")
+ }
+ l.flush()
+ case opPre:
+ w.Write(nl)
+ for _, line := range b.lines {
+ if isBlank(line) {
+ w.Write([]byte("\n"))
+ } else {
+ w.Write([]byte(preIndent))
+ w.Write([]byte(line))
+ }
+ }
+ }
+ }
+}
+
+type lineWrapper struct {
+ out io.Writer
+ printed bool
+ width int
+ indent string
+ n int
+ pendSpace int
+}
+
+var nl = []byte("\n")
+var space = []byte(" ")
+
+func (l *lineWrapper) write(text string) {
+ if l.n == 0 && l.printed {
+ l.out.Write(nl) // blank line before new paragraph
+ }
+ l.printed = true
+
+ for _, f := range strings.Fields(text) {
+ w := utf8.RuneCountInString(f)
+ // wrap if line is too long
+ if l.n > 0 && l.n+l.pendSpace+w > l.width {
+ l.out.Write(nl)
+ l.n = 0
+ l.pendSpace = 0
+ }
+ if l.n == 0 {
+ l.out.Write([]byte(l.indent))
+ }
+ l.out.Write(space[:l.pendSpace])
+ l.out.Write([]byte(f))
+ l.n += l.pendSpace + w
+ l.pendSpace = 1
+ }
+}
+
+func (l *lineWrapper) flush() {
+ if l.n == 0 {
+ return
+ }
+ l.out.Write(nl)
+ l.pendSpace = 0
+ l.n = 0
+}
diff --git a/src/go/doc/comment_test.go b/src/go/doc/comment_test.go
new file mode 100644
index 000000000..ad65c2a27
--- /dev/null
+++ b/src/go/doc/comment_test.go
@@ -0,0 +1,207 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+var headingTests = []struct {
+ line string
+ ok bool
+}{
+ {"Section", true},
+ {"A typical usage", true},
+ {"ΔΛΞ is Greek", true},
+ {"Foo 42", true},
+ {"", false},
+ {"section", false},
+ {"A typical usage:", false},
+ {"This code:", false},
+ {"δ is Greek", false},
+ {"Foo §", false},
+ {"Fermat's Last Sentence", true},
+ {"Fermat's", true},
+ {"'sX", false},
+ {"Ted 'Too' Bar", false},
+ {"Use n+m", false},
+ {"Scanning:", false},
+ {"N:M", false},
+}
+
+func TestIsHeading(t *testing.T) {
+ for _, tt := range headingTests {
+ if h := heading(tt.line); (len(h) > 0) != tt.ok {
+ t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
+ }
+ }
+}
+
+var blocksTests = []struct {
+ in string
+ out []block
+ text string
+}{
+ {
+ in: `Para 1.
+Para 1 line 2.
+
+Para 2.
+
+Section
+
+Para 3.
+
+ pre
+ pre1
+
+Para 4.
+
+ pre
+ pre1
+
+ pre2
+
+Para 5.
+
+
+ pre
+
+
+ pre1
+ pre2
+
+Para 6.
+ pre
+ pre2
+`,
+ out: []block{
+ {opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
+ {opPara, []string{"Para 2.\n"}},
+ {opHead, []string{"Section"}},
+ {opPara, []string{"Para 3.\n"}},
+ {opPre, []string{"pre\n", "pre1\n"}},
+ {opPara, []string{"Para 4.\n"}},
+ {opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}},
+ {opPara, []string{"Para 5.\n"}},
+ {opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}},
+ {opPara, []string{"Para 6.\n"}},
+ {opPre, []string{"pre\n", "pre2\n"}},
+ },
+ text: `. Para 1. Para 1 line 2.
+
+. Para 2.
+
+
+. Section
+
+. Para 3.
+
+$ pre
+$ pre1
+
+. Para 4.
+
+$ pre
+$ pre1
+
+$ pre2
+
+. Para 5.
+
+$ pre
+
+
+$ pre1
+$ pre2
+
+. Para 6.
+
+$ pre
+$ pre2
+`,
+ },
+}
+
+func TestBlocks(t *testing.T) {
+ for i, tt := range blocksTests {
+ b := blocks(tt.in)
+ if !reflect.DeepEqual(b, tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
+ }
+ }
+}
+
+func TestToText(t *testing.T) {
+ var buf bytes.Buffer
+ for i, tt := range blocksTests {
+ ToText(&buf, tt.in, ". ", "$\t", 40)
+ if have := buf.String(); have != tt.text {
+ t.Errorf("#%d: mismatch\nhave: %s\nwant: %s\nhave vs want:\n%q\n%q", i, have, tt.text, have, tt.text)
+ }
+ buf.Reset()
+ }
+}
+
+var emphasizeTests = []struct {
+ in, out string
+}{
+ {"http://www.google.com/", `<a href="http://www.google.com/">http://www.google.com/</a>`},
+ {"https://www.google.com/", `<a href="https://www.google.com/">https://www.google.com/</a>`},
+ {"http://www.google.com/path.", `<a href="http://www.google.com/path">http://www.google.com/path</a>.`},
+ {"http://en.wikipedia.org/wiki/Camellia_(cipher)", `<a href="http://en.wikipedia.org/wiki/Camellia_(cipher)">http://en.wikipedia.org/wiki/Camellia_(cipher)</a>`},
+ {"(http://www.google.com/)", `(<a href="http://www.google.com/">http://www.google.com/</a>)`},
+ {"http://gmail.com)", `<a href="http://gmail.com">http://gmail.com</a>)`},
+ {"((http://gmail.com))", `((<a href="http://gmail.com">http://gmail.com</a>))`},
+ {"http://gmail.com ((http://gmail.com)) ()", `<a href="http://gmail.com">http://gmail.com</a> ((<a href="http://gmail.com">http://gmail.com</a>)) ()`},
+ {"Foo bar http://example.com/ quux!", `Foo bar <a href="http://example.com/">http://example.com/</a> quux!`},
+ {"Hello http://example.com/%2f/ /world.", `Hello <a href="http://example.com/%2f/">http://example.com/%2f/</a> /world.`},
+ {"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"},
+ {"javascript://is/not/linked", "javascript://is/not/linked"},
+}
+
+func TestEmphasize(t *testing.T) {
+ for i, tt := range emphasizeTests {
+ var buf bytes.Buffer
+ emphasize(&buf, tt.in, nil, true)
+ out := buf.String()
+ if out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+ }
+ }
+}
+
+var pairedParensPrefixLenTests = []struct {
+ in, out string
+}{
+ {"", ""},
+ {"foo", "foo"},
+ {"()", "()"},
+ {"foo()", "foo()"},
+ {"foo()()()", "foo()()()"},
+ {"foo()((()()))", "foo()((()()))"},
+ {"foo()((()()))bar", "foo()((()()))bar"},
+ {"foo)", "foo"},
+ {"foo))", "foo"},
+ {"foo)))))", "foo"},
+ {"(foo", ""},
+ {"((foo", ""},
+ {"(((((foo", ""},
+ {"(foo)", "(foo)"},
+ {"((((foo))))", "((((foo))))"},
+ {"foo()())", "foo()()"},
+ {"foo((()())", "foo"},
+ {"foo((()())) (() foo ", "foo((()())) "},
+}
+
+func TestPairedParensPrefixLen(t *testing.T) {
+ for i, tt := range pairedParensPrefixLenTests {
+ if out := tt.in[:pairedParensPrefixLen(tt.in)]; out != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
+ }
+ }
+}
diff --git a/src/go/doc/doc.go b/src/go/doc/doc.go
new file mode 100644
index 000000000..4264940a0
--- /dev/null
+++ b/src/go/doc/doc.go
@@ -0,0 +1,111 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package doc extracts source code documentation from a Go AST.
+package doc
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// Package is the documentation for an entire package.
+type Package struct {
+ Doc string
+ Name string
+ ImportPath string
+ Imports []string
+ Filenames []string
+ Notes map[string][]*Note
+ // DEPRECATED. For backward compatibility Bugs is still populated,
+ // but all new code should use Notes instead.
+ Bugs []string
+
+ // declarations
+ Consts []*Value
+ Types []*Type
+ Vars []*Value
+ Funcs []*Func
+}
+
+// Value is the documentation for a (possibly grouped) var or const declaration.
+type Value struct {
+ Doc string
+ Names []string // var or const names in declaration order
+ Decl *ast.GenDecl
+
+ order int
+}
+
+// Type is the documentation for a type declaration.
+type Type struct {
+ Doc string
+ Name string
+ Decl *ast.GenDecl
+
+ // associated declarations
+ Consts []*Value // sorted list of constants of (mostly) this type
+ Vars []*Value // sorted list of variables of (mostly) this type
+ Funcs []*Func // sorted list of functions returning this type
+ Methods []*Func // sorted list of methods (including embedded ones) of this type
+}
+
+// Func is the documentation for a func declaration.
+type Func struct {
+ Doc string
+ Name string
+ Decl *ast.FuncDecl
+
+ // methods
+ // (for functions, these fields have the respective zero value)
+ Recv string // actual receiver "T" or "*T"
+ Orig string // original receiver "T" or "*T"
+ Level int // embedding level; 0 means not embedded
+}
+
+// A Note represents a marked comment starting with "MARKER(uid): note body".
+// Any note with a marker of 2 or more upper case [A-Z] letters and a uid of
+// at least one character is recognized. The ":" following the uid is optional.
+// Notes are collected in the Package.Notes map indexed by the notes marker.
+type Note struct {
+ Pos, End token.Pos // position range of the comment containing the marker
+ UID string // uid found with the marker
+ Body string // note body text
+}
+
+// Mode values control the operation of New.
+type Mode int
+
+const (
+ // extract documentation for all package-level declarations,
+ // not just exported ones
+ AllDecls Mode = 1 << iota
+
+ // show all embedded methods, not just the ones of
+ // invisible (unexported) anonymous fields
+ AllMethods
+)
+
+// New computes the package documentation for the given package AST.
+// New takes ownership of the AST pkg and may edit or overwrite it.
+//
+func New(pkg *ast.Package, importPath string, mode Mode) *Package {
+ var r reader
+ r.readPackage(pkg, mode)
+ r.computeMethodSets()
+ r.cleanupTypes()
+ return &Package{
+ Doc: r.doc,
+ Name: pkg.Name,
+ ImportPath: importPath,
+ Imports: sortedKeys(r.imports),
+ Filenames: r.filenames,
+ Notes: r.notes,
+ Bugs: noteBodies(r.notes["BUG"]),
+ Consts: sortedValues(r.values, token.CONST),
+ Types: sortedTypes(r.types, mode&AllMethods != 0),
+ Vars: sortedValues(r.values, token.VAR),
+ Funcs: sortedFuncs(r.funcs, true),
+ }
+}
diff --git a/src/go/doc/doc_test.go b/src/go/doc/doc_test.go
new file mode 100644
index 000000000..ad8ba5378
--- /dev/null
+++ b/src/go/doc/doc_test.go
@@ -0,0 +1,146 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+ "text/template"
+)
+
+var update = flag.Bool("update", false, "update golden (.out) files")
+var files = flag.String("files", "", "consider only Go test files matching this regular expression")
+
+const dataDir = "testdata"
+
+var templateTxt = readTemplate("template.txt")
+
+func readTemplate(filename string) *template.Template {
+ t := template.New(filename)
+ t.Funcs(template.FuncMap{
+ "node": nodeFmt,
+ "synopsis": synopsisFmt,
+ "indent": indentFmt,
+ })
+ return template.Must(t.ParseFiles(filepath.Join(dataDir, filename)))
+}
+
+func nodeFmt(node interface{}, fset *token.FileSet) string {
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, node)
+ return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1)
+}
+
+func synopsisFmt(s string) string {
+ const n = 64
+ if len(s) > n {
+ // cut off excess text and go back to a word boundary
+ s = s[0:n]
+ if i := strings.LastIndexAny(s, "\t\n "); i >= 0 {
+ s = s[0:i]
+ }
+ s = strings.TrimSpace(s) + " ..."
+ }
+ return "// " + strings.Replace(s, "\n", " ", -1)
+}
+
+func indentFmt(indent, s string) string {
+ end := ""
+ if strings.HasSuffix(s, "\n") {
+ end = "\n"
+ s = s[:len(s)-1]
+ }
+ return indent + strings.Replace(s, "\n", "\n"+indent, -1) + end
+}
+
+func isGoFile(fi os.FileInfo) bool {
+ name := fi.Name()
+ return !fi.IsDir() &&
+ len(name) > 0 && name[0] != '.' && // ignore .files
+ filepath.Ext(name) == ".go"
+}
+
+type bundle struct {
+ *Package
+ FSet *token.FileSet
+}
+
+func test(t *testing.T, mode Mode) {
+ // determine file filter
+ filter := isGoFile
+ if *files != "" {
+ rx, err := regexp.Compile(*files)
+ if err != nil {
+ t.Fatal(err)
+ }
+ filter = func(fi os.FileInfo) bool {
+ return isGoFile(fi) && rx.MatchString(fi.Name())
+ }
+ }
+
+ // get packages
+ fset := token.NewFileSet()
+ pkgs, err := parser.ParseDir(fset, dataDir, filter, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test packages
+ for _, pkg := range pkgs {
+ importpath := dataDir + "/" + pkg.Name
+ doc := New(pkg, importpath, mode)
+
+ // golden files always use / in filenames - canonicalize them
+ for i, filename := range doc.Filenames {
+ doc.Filenames[i] = filepath.ToSlash(filename)
+ }
+
+ // print documentation
+ var buf bytes.Buffer
+ if err := templateTxt.Execute(&buf, bundle{doc, fset}); err != nil {
+ t.Error(err)
+ continue
+ }
+ got := buf.Bytes()
+
+ // update golden file if necessary
+ golden := filepath.Join(dataDir, fmt.Sprintf("%s.%d.golden", pkg.Name, mode))
+ if *update {
+ err := ioutil.WriteFile(golden, got, 0644)
+ if err != nil {
+ t.Error(err)
+ }
+ continue
+ }
+
+ // get golden file
+ want, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ // compare
+ if !bytes.Equal(got, want) {
+ t.Errorf("package %s\n\tgot:\n%s\n\twant:\n%s", pkg.Name, got, want)
+ }
+ }
+}
+
+func Test(t *testing.T) {
+ test(t, 0)
+ test(t, AllDecls)
+ test(t, AllMethods)
+}
diff --git a/src/go/doc/example.go b/src/go/doc/example.go
new file mode 100644
index 000000000..c414e548c
--- /dev/null
+++ b/src/go/doc/example.go
@@ -0,0 +1,355 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Extract example functions from file ASTs.
+
+package doc
+
+import (
+ "go/ast"
+ "go/token"
+ "path"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// An Example represents an example function found in a source files.
+type Example struct {
+ Name string // name of the item being exemplified
+ Doc string // example function doc string
+ Code ast.Node
+ Play *ast.File // a whole program version of the example
+ Comments []*ast.CommentGroup
+ Output string // expected output
+ EmptyOutput bool // expect empty output
+ Order int // original source code order
+}
+
+// Examples returns the examples found in the files, sorted by Name field.
+// The Order fields record the order in which the examples were encountered.
+//
+// Playable Examples must be in a package whose name ends in "_test".
+// An Example is "playable" (the Play field is non-nil) in either of these
+// circumstances:
+// - The example function is self-contained: the function references only
+// identifiers from other packages (or predeclared identifiers, such as
+// "int") and the test file does not include a dot import.
+// - The entire test file is the example: the file contains exactly one
+// example function, zero test or benchmark functions, and at least one
+// top-level function, type, variable, or constant declaration other
+// than the example function.
+func Examples(files ...*ast.File) []*Example {
+ var list []*Example
+ for _, file := range files {
+ hasTests := false // file contains tests or benchmarks
+ numDecl := 0 // number of non-import declarations in the file
+ var flist []*Example
+ for _, decl := range file.Decls {
+ if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
+ numDecl++
+ continue
+ }
+ f, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ numDecl++
+ name := f.Name.Name
+ if isTest(name, "Test") || isTest(name, "Benchmark") {
+ hasTests = true
+ continue
+ }
+ if !isTest(name, "Example") {
+ continue
+ }
+ var doc string
+ if f.Doc != nil {
+ doc = f.Doc.Text()
+ }
+ output, hasOutput := exampleOutput(f.Body, file.Comments)
+ flist = append(flist, &Example{
+ Name: name[len("Example"):],
+ Doc: doc,
+ Code: f.Body,
+ Play: playExample(file, f.Body),
+ Comments: file.Comments,
+ Output: output,
+ EmptyOutput: output == "" && hasOutput,
+ Order: len(flist),
+ })
+ }
+ if !hasTests && numDecl > 1 && len(flist) == 1 {
+ // If this file only has one example function, some
+ // other top-level declarations, and no tests or
+ // benchmarks, use the whole file as the example.
+ flist[0].Code = file
+ flist[0].Play = playExampleFile(file)
+ }
+ list = append(list, flist...)
+ }
+ sort.Sort(exampleByName(list))
+ return list
+}
+
+var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*output:`)
+
+// Extracts the expected output and whether there was a valid output comment
+func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, ok bool) {
+ if _, last := lastComment(b, comments); last != nil {
+ // test that it begins with the correct prefix
+ text := last.Text()
+ if loc := outputPrefix.FindStringIndex(text); loc != nil {
+ text = text[loc[1]:]
+ // Strip zero or more spaces followed by \n or a single space.
+ text = strings.TrimLeft(text, " ")
+ if len(text) > 0 && text[0] == '\n' {
+ text = text[1:]
+ }
+ return text, true
+ }
+ }
+ return "", false // no suitable comment found
+}
+
+// isTest tells whether name looks like a test, example, or benchmark.
+// It is a Test (say) if there is a character after Test that is not a
+// lower-case letter. (We don't want Testiness.)
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(rune)
+}
+
+type exampleByName []*Example
+
+func (s exampleByName) Len() int { return len(s) }
+func (s exampleByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s exampleByName) Less(i, j int) bool { return s[i].Name < s[j].Name }
+
+// playExample synthesizes a new *ast.File based on the provided
+// file with the provided function body as the body of main.
+func playExample(file *ast.File, body *ast.BlockStmt) *ast.File {
+ if !strings.HasSuffix(file.Name.Name, "_test") {
+ // We don't support examples that are part of the
+ // greater package (yet).
+ return nil
+ }
+
+ // Find top-level declarations in the file.
+ topDecls := make(map[*ast.Object]bool)
+ for _, decl := range file.Decls {
+ switch d := decl.(type) {
+ case *ast.FuncDecl:
+ topDecls[d.Name.Obj] = true
+ case *ast.GenDecl:
+ for _, spec := range d.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ topDecls[s.Name.Obj] = true
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ topDecls[id.Obj] = true
+ }
+ }
+ }
+ }
+ }
+
+ // Find unresolved identifiers and uses of top-level declarations.
+ unresolved := make(map[string]bool)
+ usesTopDecl := false
+ var inspectFunc func(ast.Node) bool
+ inspectFunc = func(n ast.Node) bool {
+ // For selector expressions, only inspect the left hand side.
+ // (For an expression like fmt.Println, only add "fmt" to the
+ // set of unresolved names, not "Println".)
+ if e, ok := n.(*ast.SelectorExpr); ok {
+ ast.Inspect(e.X, inspectFunc)
+ return false
+ }
+ // For key value expressions, only inspect the value
+ // as the key should be resolved by the type of the
+ // composite literal.
+ if e, ok := n.(*ast.KeyValueExpr); ok {
+ ast.Inspect(e.Value, inspectFunc)
+ return false
+ }
+ if id, ok := n.(*ast.Ident); ok {
+ if id.Obj == nil {
+ unresolved[id.Name] = true
+ } else if topDecls[id.Obj] {
+ usesTopDecl = true
+ }
+ }
+ return true
+ }
+ ast.Inspect(body, inspectFunc)
+ if usesTopDecl {
+ // We don't support examples that are not self-contained (yet).
+ return nil
+ }
+
+ // Remove predeclared identifiers from unresolved list.
+ for n := range unresolved {
+ if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
+ delete(unresolved, n)
+ }
+ }
+
+ // Use unresolved identifiers to determine the imports used by this
+ // example. The heuristic assumes package names match base import
+ // paths for imports w/o renames (should be good enough most of the time).
+ namedImports := make(map[string]string) // [name]path
+ var blankImports []ast.Spec // _ imports
+ for _, s := range file.Imports {
+ p, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ continue
+ }
+ n := path.Base(p)
+ if s.Name != nil {
+ n = s.Name.Name
+ switch n {
+ case "_":
+ blankImports = append(blankImports, s)
+ continue
+ case ".":
+ // We can't resolve dot imports (yet).
+ return nil
+ }
+ }
+ if unresolved[n] {
+ namedImports[n] = p
+ delete(unresolved, n)
+ }
+ }
+
+ // If there are other unresolved identifiers, give up because this
+ // synthesized file is not going to build.
+ if len(unresolved) > 0 {
+ return nil
+ }
+
+ // Include documentation belonging to blank imports.
+ var comments []*ast.CommentGroup
+ for _, s := range blankImports {
+ if c := s.(*ast.ImportSpec).Doc; c != nil {
+ comments = append(comments, c)
+ }
+ }
+
+ // Include comments that are inside the function body.
+ for _, c := range file.Comments {
+ if body.Pos() <= c.Pos() && c.End() <= body.End() {
+ comments = append(comments, c)
+ }
+ }
+
+ // Strip "Output:" comment and adjust body end position.
+ body, comments = stripOutputComment(body, comments)
+
+ // Synthesize import declaration.
+ importDecl := &ast.GenDecl{
+ Tok: token.IMPORT,
+ Lparen: 1, // Need non-zero Lparen and Rparen so that printer
+ Rparen: 1, // treats this as a factored import.
+ }
+ for n, p := range namedImports {
+ s := &ast.ImportSpec{Path: &ast.BasicLit{Value: strconv.Quote(p)}}
+ if path.Base(p) != n {
+ s.Name = ast.NewIdent(n)
+ }
+ importDecl.Specs = append(importDecl.Specs, s)
+ }
+ importDecl.Specs = append(importDecl.Specs, blankImports...)
+
+ // Synthesize main function.
+ funcDecl := &ast.FuncDecl{
+ Name: ast.NewIdent("main"),
+ Type: &ast.FuncType{Params: &ast.FieldList{}}, // FuncType.Params must be non-nil
+ Body: body,
+ }
+
+ // Synthesize file.
+ return &ast.File{
+ Name: ast.NewIdent("main"),
+ Decls: []ast.Decl{importDecl, funcDecl},
+ Comments: comments,
+ }
+}
+
+// playExampleFile takes a whole file example and synthesizes a new *ast.File
+// such that the example is function main in package main.
+func playExampleFile(file *ast.File) *ast.File {
+ // Strip copyright comment if present.
+ comments := file.Comments
+ if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
+ comments = comments[1:]
+ }
+
+ // Copy declaration slice, rewriting the ExampleX function to main.
+ var decls []ast.Decl
+ for _, d := range file.Decls {
+ if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
+ // Copy the FuncDecl, as it may be used elsewhere.
+ newF := *f
+ newF.Name = ast.NewIdent("main")
+ newF.Body, comments = stripOutputComment(f.Body, comments)
+ d = &newF
+ }
+ decls = append(decls, d)
+ }
+
+ // Copy the File, as it may be used elsewhere.
+ f := *file
+ f.Name = ast.NewIdent("main")
+ f.Decls = decls
+ f.Comments = comments
+ return &f
+}
+
+// stripOutputComment finds and removes an "Output:" comment from body
+// and comments, and adjusts the body block's end position.
+func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
+ // Do nothing if no "Output:" comment found.
+ i, last := lastComment(body, comments)
+ if last == nil || !outputPrefix.MatchString(last.Text()) {
+ return body, comments
+ }
+
+ // Copy body and comments, as the originals may be used elsewhere.
+ newBody := &ast.BlockStmt{
+ Lbrace: body.Lbrace,
+ List: body.List,
+ Rbrace: last.Pos(),
+ }
+ newComments := make([]*ast.CommentGroup, len(comments)-1)
+ copy(newComments, comments[:i])
+ copy(newComments[i:], comments[i+1:])
+ return newBody, newComments
+}
+
+// lastComment returns the last comment inside the provided block.
+func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
+ pos, end := b.Pos(), b.End()
+ for j, cg := range c {
+ if cg.Pos() < pos {
+ continue
+ }
+ if cg.End() > end {
+ break
+ }
+ i, last = j, cg
+ }
+ return
+}
diff --git a/src/go/doc/example_test.go b/src/go/doc/example_test.go
new file mode 100644
index 000000000..e154ea8bf
--- /dev/null
+++ b/src/go/doc/example_test.go
@@ -0,0 +1,191 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc_test
+
+import (
+ "bytes"
+ "go/doc"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "strings"
+ "testing"
+)
+
+const exampleTestFile = `
+package foo_test
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os/exec"
+)
+
+func ExampleHello() {
+ fmt.Println("Hello, world!")
+ // Output: Hello, world!
+}
+
+func ExampleImport() {
+ out, err := exec.Command("date").Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("The date is %s\n", out)
+}
+
+func ExampleKeyValue() {
+ v := struct {
+ a string
+ b int
+ }{
+ a: "A",
+ b: 1,
+ }
+ fmt.Print(v)
+ // Output: a: "A", b: 1
+}
+
+func ExampleKeyValueImport() {
+ f := flag.Flag{
+ Name: "play",
+ }
+ fmt.Print(f)
+ // Output: Name: "play"
+}
+
+var keyValueTopDecl = struct {
+ a string
+ b int
+}{
+ a: "B",
+ b: 2,
+}
+
+func ExampleKeyValueTopDecl() {
+ fmt.Print(keyValueTopDecl)
+}
+`
+
+var exampleTestCases = []struct {
+ Name, Play, Output string
+}{
+ {
+ Name: "Hello",
+ Play: exampleHelloPlay,
+ Output: "Hello, world!\n",
+ },
+ {
+ Name: "Import",
+ Play: exampleImportPlay,
+ },
+ {
+ Name: "KeyValue",
+ Play: exampleKeyValuePlay,
+ Output: "a: \"A\", b: 1\n",
+ },
+ {
+ Name: "KeyValueImport",
+ Play: exampleKeyValueImportPlay,
+ Output: "Name: \"play\"\n",
+ },
+ {
+ Name: "KeyValueTopDecl",
+ Play: "<nil>",
+ },
+}
+
+const exampleHelloPlay = `package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ fmt.Println("Hello, world!")
+}
+`
+const exampleImportPlay = `package main
+
+import (
+ "fmt"
+ "log"
+ "os/exec"
+)
+
+func main() {
+ out, err := exec.Command("date").Output()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("The date is %s\n", out)
+}
+`
+
+const exampleKeyValuePlay = `package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ v := struct {
+ a string
+ b int
+ }{
+ a: "A",
+ b: 1,
+ }
+ fmt.Print(v)
+}
+`
+
+const exampleKeyValueImportPlay = `package main
+
+import (
+ "flag"
+ "fmt"
+)
+
+func main() {
+ f := flag.Flag{
+ Name: "play",
+ }
+ fmt.Print(f)
+}
+`
+
+func TestExamples(t *testing.T) {
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleTestFile), parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, e := range doc.Examples(file) {
+ c := exampleTestCases[i]
+ if e.Name != c.Name {
+ t.Errorf("got Name == %q, want %q", e.Name, c.Name)
+ }
+ if w := c.Play; w != "" {
+ var g string // hah
+ if e.Play == nil {
+ g = "<nil>"
+ } else {
+ var buf bytes.Buffer
+ if err := format.Node(&buf, fset, e.Play); err != nil {
+ t.Fatal(err)
+ }
+ g = buf.String()
+ }
+ if g != w {
+ t.Errorf("%s: got Play == %q, want %q", c.Name, g, w)
+ }
+ }
+ if g, w := e.Output, c.Output; g != w {
+ t.Errorf("%s: got Output == %q, want %q", c.Name, g, w)
+ }
+ }
+}
diff --git a/src/go/doc/exports.go b/src/go/doc/exports.go
new file mode 100644
index 000000000..ff01285d4
--- /dev/null
+++ b/src/go/doc/exports.go
@@ -0,0 +1,199 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements export filtering of an AST.
+
+package doc
+
+import "go/ast"
+
+// filterIdentList removes unexported names from list in place
+// and returns the resulting list.
+//
+func filterIdentList(list []*ast.Ident) []*ast.Ident {
+ j := 0
+ for _, x := range list {
+ if ast.IsExported(x.Name) {
+ list[j] = x
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+// removeErrorField removes anonymous fields named "error" from an interface.
+// This is called when "error" has been determined to be a local name,
+// not the predeclared type.
+//
+func removeErrorField(ityp *ast.InterfaceType) {
+ list := ityp.Methods.List // we know that ityp.Methods != nil
+ j := 0
+ for _, field := range list {
+ keepField := true
+ if n := len(field.Names); n == 0 {
+ // anonymous field
+ if fname, _ := baseTypeName(field.Type); fname == "error" {
+ keepField = false
+ }
+ }
+ if keepField {
+ list[j] = field
+ j++
+ }
+ }
+ if j < len(list) {
+ ityp.Incomplete = true
+ }
+ ityp.Methods.List = list[0:j]
+}
+
+// filterFieldList removes unexported fields (field names) from the field list
+// in place and returns true if fields were removed. Anonymous fields are
+// recorded with the parent type. filterType is called with the types of
+// all remaining fields.
+//
+func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
+ if fields == nil {
+ return
+ }
+ list := fields.List
+ j := 0
+ for _, field := range list {
+ keepField := false
+ if n := len(field.Names); n == 0 {
+ // anonymous field
+ fname := r.recordAnonymousField(parent, field.Type)
+ if ast.IsExported(fname) {
+ keepField = true
+ } else if ityp != nil && fname == "error" {
+ // possibly the predeclared error interface; keep
+ // it for now but remember this interface so that
+ // it can be fixed if error is also defined locally
+ keepField = true
+ r.remember(ityp)
+ }
+ } else {
+ field.Names = filterIdentList(field.Names)
+ if len(field.Names) < n {
+ removedFields = true
+ }
+ if len(field.Names) > 0 {
+ keepField = true
+ }
+ }
+ if keepField {
+ r.filterType(nil, field.Type)
+ list[j] = field
+ j++
+ }
+ }
+ if j < len(list) {
+ removedFields = true
+ }
+ fields.List = list[0:j]
+ return
+}
+
+// filterParamList applies filterType to each parameter type in fields.
+//
+func (r *reader) filterParamList(fields *ast.FieldList) {
+ if fields != nil {
+ for _, f := range fields.List {
+ r.filterType(nil, f.Type)
+ }
+ }
+}
+
+// filterType strips any unexported struct fields or method types from typ
+// in place. If fields (or methods) have been removed, the corresponding
+// struct or interface type has the Incomplete field set to true.
+//
+func (r *reader) filterType(parent *namedType, typ ast.Expr) {
+ switch t := typ.(type) {
+ case *ast.Ident:
+ // nothing to do
+ case *ast.ParenExpr:
+ r.filterType(nil, t.X)
+ case *ast.ArrayType:
+ r.filterType(nil, t.Elt)
+ case *ast.StructType:
+ if r.filterFieldList(parent, t.Fields, nil) {
+ t.Incomplete = true
+ }
+ case *ast.FuncType:
+ r.filterParamList(t.Params)
+ r.filterParamList(t.Results)
+ case *ast.InterfaceType:
+ if r.filterFieldList(parent, t.Methods, t) {
+ t.Incomplete = true
+ }
+ case *ast.MapType:
+ r.filterType(nil, t.Key)
+ r.filterType(nil, t.Value)
+ case *ast.ChanType:
+ r.filterType(nil, t.Value)
+ }
+}
+
+func (r *reader) filterSpec(spec ast.Spec) bool {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ // always keep imports so we can collect them
+ return true
+ case *ast.ValueSpec:
+ s.Names = filterIdentList(s.Names)
+ if len(s.Names) > 0 {
+ r.filterType(nil, s.Type)
+ return true
+ }
+ case *ast.TypeSpec:
+ if name := s.Name.Name; ast.IsExported(name) {
+ r.filterType(r.lookupType(s.Name.Name), s.Type)
+ return true
+ } else if name == "error" {
+ // special case: remember that error is declared locally
+ r.errorDecl = true
+ }
+ }
+ return false
+}
+
+func (r *reader) filterSpecList(list []ast.Spec) []ast.Spec {
+ j := 0
+ for _, s := range list {
+ if r.filterSpec(s) {
+ list[j] = s
+ j++
+ }
+ }
+ return list[0:j]
+}
+
+func (r *reader) filterDecl(decl ast.Decl) bool {
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ d.Specs = r.filterSpecList(d.Specs)
+ return len(d.Specs) > 0
+ case *ast.FuncDecl:
+ // ok to filter these methods early because any
+ // conflicting method will be filtered here, too -
+ // thus, removing these methods early will not lead
+ // to the false removal of possible conflicts
+ return ast.IsExported(d.Name.Name)
+ }
+ return false
+}
+
+// fileExports removes unexported declarations from src in place.
+//
+func (r *reader) fileExports(src *ast.File) {
+ j := 0
+ for _, d := range src.Decls {
+ if r.filterDecl(d) {
+ src.Decls[j] = d
+ j++
+ }
+ }
+ src.Decls = src.Decls[0:j]
+}
diff --git a/src/go/doc/filter.go b/src/go/doc/filter.go
new file mode 100644
index 000000000..a6f243f33
--- /dev/null
+++ b/src/go/doc/filter.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "go/ast"
+
+type Filter func(string) bool
+
+func matchFields(fields *ast.FieldList, f Filter) bool {
+ if fields != nil {
+ for _, field := range fields.List {
+ for _, name := range field.Names {
+ if f(name.Name) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func matchDecl(d *ast.GenDecl, f Filter) bool {
+ for _, d := range d.Specs {
+ switch v := d.(type) {
+ case *ast.ValueSpec:
+ for _, name := range v.Names {
+ if f(name.Name) {
+ return true
+ }
+ }
+ case *ast.TypeSpec:
+ if f(v.Name.Name) {
+ return true
+ }
+ switch t := v.Type.(type) {
+ case *ast.StructType:
+ if matchFields(t.Fields, f) {
+ return true
+ }
+ case *ast.InterfaceType:
+ if matchFields(t.Methods, f) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func filterValues(a []*Value, f Filter) []*Value {
+ w := 0
+ for _, vd := range a {
+ if matchDecl(vd.Decl, f) {
+ a[w] = vd
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+func filterFuncs(a []*Func, f Filter) []*Func {
+ w := 0
+ for _, fd := range a {
+ if f(fd.Name) {
+ a[w] = fd
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+func filterTypes(a []*Type, f Filter) []*Type {
+ w := 0
+ for _, td := range a {
+ n := 0 // number of matches
+ if matchDecl(td.Decl, f) {
+ n = 1
+ } else {
+ // type name doesn't match, but we may have matching consts, vars, factories or methods
+ td.Consts = filterValues(td.Consts, f)
+ td.Vars = filterValues(td.Vars, f)
+ td.Funcs = filterFuncs(td.Funcs, f)
+ td.Methods = filterFuncs(td.Methods, f)
+ n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
+ }
+ if n > 0 {
+ a[w] = td
+ w++
+ }
+ }
+ return a[0:w]
+}
+
+// Filter eliminates documentation for names that don't pass through the filter f.
+// TODO(gri): Recognize "Type.Method" as a name.
+//
+func (p *Package) Filter(f Filter) {
+ p.Consts = filterValues(p.Consts, f)
+ p.Vars = filterValues(p.Vars, f)
+ p.Types = filterTypes(p.Types, f)
+ p.Funcs = filterFuncs(p.Funcs, f)
+ p.Doc = "" // don't show top-level package doc
+}
diff --git a/src/go/doc/headscan.go b/src/go/doc/headscan.go
new file mode 100644
index 000000000..1ccaa1581
--- /dev/null
+++ b/src/go/doc/headscan.go
@@ -0,0 +1,114 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+ The headscan command extracts comment headings from package files;
+ it is used to detect false positives which may require an adjustment
+ to the comment formatting heuristics in comment.go.
+
+ Usage: headscan [-root root_directory]
+
+ By default, the $GOROOT/src directory is scanned.
+*/
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+var (
+ root = flag.String("root", filepath.Join(runtime.GOROOT(), "src"), "root of filesystem tree to scan")
+ verbose = flag.Bool("v", false, "verbose mode")
+)
+
+// ToHTML in comment.go assigns a (possibly blank) ID to each heading
+var html_h = regexp.MustCompile(`<h3 id="[^"]*">`)
+
+const html_endh = "</h3>\n"
+
+func isGoFile(fi os.FileInfo) bool {
+ return strings.HasSuffix(fi.Name(), ".go") &&
+ !strings.HasSuffix(fi.Name(), "_test.go")
+}
+
+func appendHeadings(list []string, comment string) []string {
+ var buf bytes.Buffer
+ doc.ToHTML(&buf, comment, nil)
+ for s := buf.String(); ; {
+ loc := html_h.FindStringIndex(s)
+ if len(loc) == 0 {
+ break
+ }
+ i := loc[1]
+ j := strings.Index(s, html_endh)
+ if j < 0 {
+ list = append(list, s[i:]) // incorrect HTML
+ break
+ }
+ list = append(list, s[i:j])
+ s = s[j+len(html_endh):]
+ }
+ return list
+}
+
+func main() {
+ flag.Parse()
+ fset := token.NewFileSet()
+ nheadings := 0
+ err := filepath.Walk(*root, func(path string, fi os.FileInfo, err error) error {
+ if !fi.IsDir() {
+ return nil
+ }
+ pkgs, err := parser.ParseDir(fset, path, isGoFile, parser.ParseComments)
+ if err != nil {
+ if *verbose {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ return nil
+ }
+ for _, pkg := range pkgs {
+ d := doc.New(pkg, path, doc.Mode(0))
+ list := appendHeadings(nil, d.Doc)
+ for _, d := range d.Consts {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Types {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Vars {
+ list = appendHeadings(list, d.Doc)
+ }
+ for _, d := range d.Funcs {
+ list = appendHeadings(list, d.Doc)
+ }
+ if len(list) > 0 {
+ // directories may contain multiple packages;
+ // print path and package name
+ fmt.Printf("%s (package %s)\n", path, pkg.Name)
+ for _, h := range list {
+ fmt.Printf("\t%s\n", h)
+ }
+ nheadings += len(list)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ fmt.Println(nheadings, "headings found")
+}
diff --git a/src/go/doc/reader.go b/src/go/doc/reader.go
new file mode 100644
index 000000000..ed82c47cd
--- /dev/null
+++ b/src/go/doc/reader.go
@@ -0,0 +1,853 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "go/ast"
+ "go/token"
+ "regexp"
+ "sort"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// function/method sets
+//
+// Internally, we treat functions like methods and collect them in method sets.
+
+// A methodSet describes a set of methods. Entries where Decl == nil are conflict
+// entries (more then one method with the same name at the same embedding level).
+//
+type methodSet map[string]*Func
+
+// recvString returns a string representation of recv of the
+// form "T", "*T", or "BADRECV" (if not a proper receiver type).
+//
+func recvString(recv ast.Expr) string {
+ switch t := recv.(type) {
+ case *ast.Ident:
+ return t.Name
+ case *ast.StarExpr:
+ return "*" + recvString(t.X)
+ }
+ return "BADRECV"
+}
+
+// set creates the corresponding Func for f and adds it to mset.
+// If there are multiple f's with the same name, set keeps the first
+// one with documentation; conflicts are ignored.
+//
+func (mset methodSet) set(f *ast.FuncDecl) {
+ name := f.Name.Name
+ if g := mset[name]; g != nil && g.Doc != "" {
+ // A function with the same name has already been registered;
+ // since it has documentation, assume f is simply another
+ // implementation and ignore it. This does not happen if the
+ // caller is using go/build.ScanDir to determine the list of
+ // files implementing a package.
+ return
+ }
+ // function doesn't exist or has no documentation; use f
+ recv := ""
+ if f.Recv != nil {
+ var typ ast.Expr
+ // be careful in case of incorrect ASTs
+ if list := f.Recv.List; len(list) == 1 {
+ typ = list[0].Type
+ }
+ recv = recvString(typ)
+ }
+ mset[name] = &Func{
+ Doc: f.Doc.Text(),
+ Name: name,
+ Decl: f,
+ Recv: recv,
+ Orig: recv,
+ }
+ f.Doc = nil // doc consumed - remove from AST
+}
+
+// add adds method m to the method set; m is ignored if the method set
+// already contains a method with the same name at the same or a higher
+// level then m.
+//
+func (mset methodSet) add(m *Func) {
+ old := mset[m.Name]
+ if old == nil || m.Level < old.Level {
+ mset[m.Name] = m
+ return
+ }
+ if old != nil && m.Level == old.Level {
+ // conflict - mark it using a method with nil Decl
+ mset[m.Name] = &Func{
+ Name: m.Name,
+ Level: m.Level,
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Named types
+
+// baseTypeName returns the name of the base type of x (or "")
+// and whether the type is imported or not.
+//
+func baseTypeName(x ast.Expr) (name string, imported bool) {
+ switch t := x.(type) {
+ case *ast.Ident:
+ return t.Name, false
+ case *ast.SelectorExpr:
+ if _, ok := t.X.(*ast.Ident); ok {
+ // only possible for qualified type names;
+ // assume type is imported
+ return t.Sel.Name, true
+ }
+ case *ast.StarExpr:
+ return baseTypeName(t.X)
+ }
+ return
+}
+
+// An embeddedSet describes a set of embedded types.
+type embeddedSet map[*namedType]bool
+
+// A namedType represents a named unqualified (package local, or possibly
+// predeclared) type. The namedType for a type name is always found via
+// reader.lookupType.
+//
+type namedType struct {
+ doc string // doc comment for type
+ name string // type name
+ decl *ast.GenDecl // nil if declaration hasn't been seen yet
+
+ isEmbedded bool // true if this type is embedded
+ isStruct bool // true if this type is a struct
+ embedded embeddedSet // true if the embedded type is a pointer
+
+ // associated declarations
+ values []*Value // consts and vars
+ funcs methodSet
+ methods methodSet
+}
+
+// ----------------------------------------------------------------------------
+// AST reader
+
+// reader accumulates documentation for a single package.
+// It modifies the AST: Comments (declaration documentation)
+// that have been collected by the reader are set to nil
+// in the respective AST nodes so that they are not printed
+// twice (once when printing the documentation and once when
+// printing the corresponding AST node).
+//
+type reader struct {
+ mode Mode
+
+ // package properties
+ doc string // package documentation, if any
+ filenames []string
+ notes map[string][]*Note
+
+ // declarations
+ imports map[string]int
+ values []*Value // consts and vars
+ types map[string]*namedType
+ funcs methodSet
+
+ // support for package-local error type declarations
+ errorDecl bool // if set, type "error" was declared locally
+ fixlist []*ast.InterfaceType // list of interfaces containing anonymous field "error"
+}
+
+func (r *reader) isVisible(name string) bool {
+ return r.mode&AllDecls != 0 || ast.IsExported(name)
+}
+
+// lookupType returns the base type with the given name.
+// If the base type has not been encountered yet, a new
+// type with the given name but no associated declaration
+// is added to the type map.
+//
+func (r *reader) lookupType(name string) *namedType {
+ if name == "" || name == "_" {
+ return nil // no type docs for anonymous types
+ }
+ if typ, found := r.types[name]; found {
+ return typ
+ }
+ // type not found - add one without declaration
+ typ := &namedType{
+ name: name,
+ embedded: make(embeddedSet),
+ funcs: make(methodSet),
+ methods: make(methodSet),
+ }
+ r.types[name] = typ
+ return typ
+}
+
+// recordAnonymousField registers fieldType as the type of an
+// anonymous field in the parent type. If the field is imported
+// (qualified name) or the parent is nil, the field is ignored.
+// The function returns the field name.
+//
+func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) {
+ fname, imp := baseTypeName(fieldType)
+ if parent == nil || imp {
+ return
+ }
+ if ftype := r.lookupType(fname); ftype != nil {
+ ftype.isEmbedded = true
+ _, ptr := fieldType.(*ast.StarExpr)
+ parent.embedded[ftype] = ptr
+ }
+ return
+}
+
+func (r *reader) readDoc(comment *ast.CommentGroup) {
+ // By convention there should be only one package comment
+ // but collect all of them if there are more then one.
+ text := comment.Text()
+ if r.doc == "" {
+ r.doc = text
+ return
+ }
+ r.doc += "\n" + text
+}
+
+func (r *reader) remember(typ *ast.InterfaceType) {
+ r.fixlist = append(r.fixlist, typ)
+}
+
+func specNames(specs []ast.Spec) []string {
+ names := make([]string, 0, len(specs)) // reasonable estimate
+ for _, s := range specs {
+ // s guaranteed to be an *ast.ValueSpec by readValue
+ for _, ident := range s.(*ast.ValueSpec).Names {
+ names = append(names, ident.Name)
+ }
+ }
+ return names
+}
+
+// readValue processes a const or var declaration.
+//
+func (r *reader) readValue(decl *ast.GenDecl) {
+ // determine if decl should be associated with a type
+ // Heuristic: For each typed entry, determine the type name, if any.
+ // If there is exactly one type name that is sufficiently
+ // frequent, associate the decl with the respective type.
+ domName := ""
+ domFreq := 0
+ prev := ""
+ n := 0
+ for _, spec := range decl.Specs {
+ s, ok := spec.(*ast.ValueSpec)
+ if !ok {
+ continue // should not happen, but be conservative
+ }
+ name := ""
+ switch {
+ case s.Type != nil:
+ // a type is present; determine its name
+ if n, imp := baseTypeName(s.Type); !imp {
+ name = n
+ }
+ case decl.Tok == token.CONST:
+ // no type is present but we have a constant declaration;
+ // use the previous type name (w/o more type information
+ // we cannot handle the case of unnamed variables with
+ // initializer expressions except for some trivial cases)
+ name = prev
+ }
+ if name != "" {
+ // entry has a named type
+ if domName != "" && domName != name {
+ // more than one type name - do not associate
+ // with any type
+ domName = ""
+ break
+ }
+ domName = name
+ domFreq++
+ }
+ prev = name
+ n++
+ }
+
+ // nothing to do w/o a legal declaration
+ if n == 0 {
+ return
+ }
+
+ // determine values list with which to associate the Value for this decl
+ values := &r.values
+ const threshold = 0.75
+ if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) {
+ // typed entries are sufficiently frequent
+ if typ := r.lookupType(domName); typ != nil {
+ values = &typ.values // associate with that type
+ }
+ }
+
+ *values = append(*values, &Value{
+ Doc: decl.Doc.Text(),
+ Names: specNames(decl.Specs),
+ Decl: decl,
+ order: len(*values),
+ })
+ decl.Doc = nil // doc consumed - remove from AST
+}
+
+// fields returns a struct's fields or an interface's methods.
+//
+func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) {
+ var fields *ast.FieldList
+ switch t := typ.(type) {
+ case *ast.StructType:
+ fields = t.Fields
+ isStruct = true
+ case *ast.InterfaceType:
+ fields = t.Methods
+ }
+ if fields != nil {
+ list = fields.List
+ }
+ return
+}
+
+// readType processes a type declaration.
+//
+func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) {
+ typ := r.lookupType(spec.Name.Name)
+ if typ == nil {
+ return // no name or blank name - ignore the type
+ }
+
+ // A type should be added at most once, so typ.decl
+ // should be nil - if it is not, simply overwrite it.
+ typ.decl = decl
+
+ // compute documentation
+ doc := spec.Doc
+ spec.Doc = nil // doc consumed - remove from AST
+ if doc == nil {
+ // no doc associated with the spec, use the declaration doc, if any
+ doc = decl.Doc
+ }
+ decl.Doc = nil // doc consumed - remove from AST
+ typ.doc = doc.Text()
+
+ // record anonymous fields (they may contribute methods)
+ // (some fields may have been recorded already when filtering
+ // exports, but that's ok)
+ var list []*ast.Field
+ list, typ.isStruct = fields(spec.Type)
+ for _, field := range list {
+ if len(field.Names) == 0 {
+ r.recordAnonymousField(typ, field.Type)
+ }
+ }
+}
+
+// readFunc processes a func or method declaration.
+//
+func (r *reader) readFunc(fun *ast.FuncDecl) {
+ // strip function body
+ fun.Body = nil
+
+ // associate methods with the receiver type, if any
+ if fun.Recv != nil {
+ // method
+ recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type)
+ if imp {
+ // should not happen (incorrect AST);
+ // don't show this method
+ return
+ }
+ if typ := r.lookupType(recvTypeName); typ != nil {
+ typ.methods.set(fun)
+ }
+ // otherwise ignore the method
+ // TODO(gri): There may be exported methods of non-exported types
+ // that can be called because of exported values (consts, vars, or
+ // function results) of that type. Could determine if that is the
+ // case and then show those methods in an appropriate section.
+ return
+ }
+
+ // associate factory functions with the first visible result type, if any
+ if fun.Type.Results.NumFields() >= 1 {
+ res := fun.Type.Results.List[0]
+ if len(res.Names) <= 1 {
+ // exactly one (named or anonymous) result associated
+ // with the first type in result signature (there may
+ // be more than one result)
+ if n, imp := baseTypeName(res.Type); !imp && r.isVisible(n) {
+ if typ := r.lookupType(n); typ != nil {
+ // associate function with typ
+ typ.funcs.set(fun)
+ return
+ }
+ }
+ }
+ }
+
+ // just an ordinary function
+ r.funcs.set(fun)
+}
+
+var (
+ noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char
+ noteMarkerRx = regexp.MustCompile(`^[ \t]*` + noteMarker) // MARKER(uid) at text start
+ noteCommentRx = regexp.MustCompile(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start
+)
+
+// readNote collects a single note from a sequence of comments.
+//
+func (r *reader) readNote(list []*ast.Comment) {
+ text := (&ast.CommentGroup{List: list}).Text()
+ if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil {
+ // The note body starts after the marker.
+ // We remove any formatting so that we don't
+ // get spurious line breaks/indentation when
+ // showing the TODO body.
+ body := clean(text[m[1]:], keepNL)
+ if body != "" {
+ marker := text[m[2]:m[3]]
+ r.notes[marker] = append(r.notes[marker], &Note{
+ Pos: list[0].Pos(),
+ End: list[len(list)-1].End(),
+ UID: text[m[4]:m[5]],
+ Body: body,
+ })
+ }
+ }
+}
+
+// readNotes extracts notes from comments.
+// A note must start at the beginning of a comment with "MARKER(uid):"
+// and is followed by the note body (e.g., "// BUG(gri): fix this").
+// The note ends at the end of the comment group or at the start of
+// another note in the same comment group, whichever comes first.
+//
+func (r *reader) readNotes(comments []*ast.CommentGroup) {
+ for _, group := range comments {
+ i := -1 // comment index of most recent note start, valid if >= 0
+ list := group.List
+ for j, c := range list {
+ if noteCommentRx.MatchString(c.Text) {
+ if i >= 0 {
+ r.readNote(list[i:j])
+ }
+ i = j
+ }
+ }
+ if i >= 0 {
+ r.readNote(list[i:])
+ }
+ }
+}
+
+// readFile adds the AST for a source file to the reader.
+//
+func (r *reader) readFile(src *ast.File) {
+ // add package documentation
+ if src.Doc != nil {
+ r.readDoc(src.Doc)
+ src.Doc = nil // doc consumed - remove from AST
+ }
+
+ // add all declarations
+ for _, decl := range src.Decls {
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ switch d.Tok {
+ case token.IMPORT:
+ // imports are handled individually
+ for _, spec := range d.Specs {
+ if s, ok := spec.(*ast.ImportSpec); ok {
+ if import_, err := strconv.Unquote(s.Path.Value); err == nil {
+ r.imports[import_] = 1
+ }
+ }
+ }
+ case token.CONST, token.VAR:
+ // constants and variables are always handled as a group
+ r.readValue(d)
+ case token.TYPE:
+ // types are handled individually
+ if len(d.Specs) == 1 && !d.Lparen.IsValid() {
+ // common case: single declaration w/o parentheses
+ // (if a single declaration is parenthesized,
+ // create a new fake declaration below, so that
+ // go/doc type declarations always appear w/o
+ // parentheses)
+ if s, ok := d.Specs[0].(*ast.TypeSpec); ok {
+ r.readType(d, s)
+ }
+ break
+ }
+ for _, spec := range d.Specs {
+ if s, ok := spec.(*ast.TypeSpec); ok {
+ // use an individual (possibly fake) declaration
+ // for each type; this also ensures that each type
+ // gets to (re-)use the declaration documentation
+ // if there's none associated with the spec itself
+ fake := &ast.GenDecl{
+ Doc: d.Doc,
+ // don't use the existing TokPos because it
+ // will lead to the wrong selection range for
+ // the fake declaration if there are more
+ // than one type in the group (this affects
+ // src/cmd/godoc/godoc.go's posLink_urlFunc)
+ TokPos: s.Pos(),
+ Tok: token.TYPE,
+ Specs: []ast.Spec{s},
+ }
+ r.readType(fake, s)
+ }
+ }
+ }
+ case *ast.FuncDecl:
+ r.readFunc(d)
+ }
+ }
+
+ // collect MARKER(...): annotations
+ r.readNotes(src.Comments)
+ src.Comments = nil // consumed unassociated comments - remove from AST
+}
+
+func (r *reader) readPackage(pkg *ast.Package, mode Mode) {
+ // initialize reader
+ r.filenames = make([]string, len(pkg.Files))
+ r.imports = make(map[string]int)
+ r.mode = mode
+ r.types = make(map[string]*namedType)
+ r.funcs = make(methodSet)
+ r.notes = make(map[string][]*Note)
+
+ // sort package files before reading them so that the
+ // result does not depend on map iteration order
+ i := 0
+ for filename := range pkg.Files {
+ r.filenames[i] = filename
+ i++
+ }
+ sort.Strings(r.filenames)
+
+ // process files in sorted order
+ for _, filename := range r.filenames {
+ f := pkg.Files[filename]
+ if mode&AllDecls == 0 {
+ r.fileExports(f)
+ }
+ r.readFile(f)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func {
+ if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 {
+ return f // shouldn't happen, but be safe
+ }
+
+ // copy existing receiver field and set new type
+ newField := *f.Decl.Recv.List[0]
+ origPos := newField.Type.Pos()
+ _, origRecvIsPtr := newField.Type.(*ast.StarExpr)
+ newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName}
+ var typ ast.Expr = newIdent
+ if !embeddedIsPtr && origRecvIsPtr {
+ newIdent.NamePos++ // '*' is one character
+ typ = &ast.StarExpr{Star: origPos, X: newIdent}
+ }
+ newField.Type = typ
+
+ // copy existing receiver field list and set new receiver field
+ newFieldList := *f.Decl.Recv
+ newFieldList.List = []*ast.Field{&newField}
+
+ // copy existing function declaration and set new receiver field list
+ newFuncDecl := *f.Decl
+ newFuncDecl.Recv = &newFieldList
+
+ // copy existing function documentation and set new declaration
+ newF := *f
+ newF.Decl = &newFuncDecl
+ newF.Recv = recvString(typ)
+ // the Orig field never changes
+ newF.Level = level
+
+ return &newF
+}
+
+// collectEmbeddedMethods collects the embedded methods of typ in mset.
+//
+func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) {
+ visited[typ] = true
+ for embedded, isPtr := range typ.embedded {
+ // Once an embedded type is embedded as a pointer type
+ // all embedded types in those types are treated like
+ // pointer types for the purpose of the receiver type
+ // computation; i.e., embeddedIsPtr is sticky for this
+ // embedding hierarchy.
+ thisEmbeddedIsPtr := embeddedIsPtr || isPtr
+ for _, m := range embedded.methods {
+ // only top-level methods are embedded
+ if m.Level == 0 {
+ mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level))
+ }
+ }
+ if !visited[embedded] {
+ r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited)
+ }
+ }
+ delete(visited, typ)
+}
+
+// computeMethodSets determines the actual method sets for each type encountered.
+//
+func (r *reader) computeMethodSets() {
+ for _, t := range r.types {
+ // collect embedded methods for t
+ if t.isStruct {
+ // struct
+ r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet))
+ } else {
+ // interface
+ // TODO(gri) fix this
+ }
+ }
+
+ // if error was declared locally, don't treat it as exported field anymore
+ if r.errorDecl {
+ for _, ityp := range r.fixlist {
+ removeErrorField(ityp)
+ }
+ }
+}
+
+// cleanupTypes removes the association of functions and methods with
+// types that have no declaration. Instead, these functions and methods
+// are shown at the package level. It also removes types with missing
+// declarations or which are not visible.
+//
+func (r *reader) cleanupTypes() {
+ for _, t := range r.types {
+ visible := r.isVisible(t.name)
+ if t.decl == nil && (predeclaredTypes[t.name] || t.isEmbedded && visible) {
+ // t.name is a predeclared type (and was not redeclared in this package),
+ // or it was embedded somewhere but its declaration is missing (because
+ // the AST is incomplete): move any associated values, funcs, and methods
+ // back to the top-level so that they are not lost.
+ // 1) move values
+ r.values = append(r.values, t.values...)
+ // 2) move factory functions
+ for name, f := range t.funcs {
+ // in a correct AST, package-level function names
+ // are all different - no need to check for conflicts
+ r.funcs[name] = f
+ }
+ // 3) move methods
+ for name, m := range t.methods {
+ // don't overwrite functions with the same name - drop them
+ if _, found := r.funcs[name]; !found {
+ r.funcs[name] = m
+ }
+ }
+ }
+ // remove types w/o declaration or which are not visible
+ if t.decl == nil || !visible {
+ delete(r.types, t.name)
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Sorting
+
+type data struct {
+ n int
+ swap func(i, j int)
+ less func(i, j int) bool
+}
+
+func (d *data) Len() int { return d.n }
+func (d *data) Swap(i, j int) { d.swap(i, j) }
+func (d *data) Less(i, j int) bool { return d.less(i, j) }
+
+// sortBy is a helper function for sorting
+func sortBy(less func(i, j int) bool, swap func(i, j int), n int) {
+ sort.Sort(&data{n, swap, less})
+}
+
+func sortedKeys(m map[string]int) []string {
+ list := make([]string, len(m))
+ i := 0
+ for key := range m {
+ list[i] = key
+ i++
+ }
+ sort.Strings(list)
+ return list
+}
+
+// sortingName returns the name to use when sorting d into place.
+//
+func sortingName(d *ast.GenDecl) string {
+ if len(d.Specs) == 1 {
+ if s, ok := d.Specs[0].(*ast.ValueSpec); ok {
+ return s.Names[0].Name
+ }
+ }
+ return ""
+}
+
+func sortedValues(m []*Value, tok token.Token) []*Value {
+ list := make([]*Value, len(m)) // big enough in any case
+ i := 0
+ for _, val := range m {
+ if val.Decl.Tok == tok {
+ list[i] = val
+ i++
+ }
+ }
+ list = list[0:i]
+
+ sortBy(
+ func(i, j int) bool {
+ if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj {
+ return ni < nj
+ }
+ return list[i].order < list[j].order
+ },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+
+ return list
+}
+
+func sortedTypes(m map[string]*namedType, allMethods bool) []*Type {
+ list := make([]*Type, len(m))
+ i := 0
+ for _, t := range m {
+ list[i] = &Type{
+ Doc: t.doc,
+ Name: t.name,
+ Decl: t.decl,
+ Consts: sortedValues(t.values, token.CONST),
+ Vars: sortedValues(t.values, token.VAR),
+ Funcs: sortedFuncs(t.funcs, true),
+ Methods: sortedFuncs(t.methods, allMethods),
+ }
+ i++
+ }
+
+ sortBy(
+ func(i, j int) bool { return list[i].Name < list[j].Name },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+
+ return list
+}
+
+func removeStar(s string) string {
+ if len(s) > 0 && s[0] == '*' {
+ return s[1:]
+ }
+ return s
+}
+
+func sortedFuncs(m methodSet, allMethods bool) []*Func {
+ list := make([]*Func, len(m))
+ i := 0
+ for _, m := range m {
+ // determine which methods to include
+ switch {
+ case m.Decl == nil:
+ // exclude conflict entry
+ case allMethods, m.Level == 0, !ast.IsExported(removeStar(m.Orig)):
+ // forced inclusion, method not embedded, or method
+ // embedded but original receiver type not exported
+ list[i] = m
+ i++
+ }
+ }
+ list = list[0:i]
+ sortBy(
+ func(i, j int) bool { return list[i].Name < list[j].Name },
+ func(i, j int) { list[i], list[j] = list[j], list[i] },
+ len(list),
+ )
+ return list
+}
+
+// noteBodies returns a list of note body strings given a list of notes.
+// This is only used to populate the deprecated Package.Bugs field.
+//
+func noteBodies(notes []*Note) []string {
+ var list []string
+ for _, n := range notes {
+ list = append(list, n.Body)
+ }
+ return list
+}
+
+// ----------------------------------------------------------------------------
+// Predeclared identifiers
+
+var predeclaredTypes = map[string]bool{
+ "bool": true,
+ "byte": true,
+ "complex64": true,
+ "complex128": true,
+ "error": true,
+ "float32": true,
+ "float64": true,
+ "int": true,
+ "int8": true,
+ "int16": true,
+ "int32": true,
+ "int64": true,
+ "rune": true,
+ "string": true,
+ "uint": true,
+ "uint8": true,
+ "uint16": true,
+ "uint32": true,
+ "uint64": true,
+ "uintptr": true,
+}
+
+var predeclaredFuncs = map[string]bool{
+ "append": true,
+ "cap": true,
+ "close": true,
+ "complex": true,
+ "copy": true,
+ "delete": true,
+ "imag": true,
+ "len": true,
+ "make": true,
+ "new": true,
+ "panic": true,
+ "print": true,
+ "println": true,
+ "real": true,
+ "recover": true,
+}
+
+var predeclaredConstants = map[string]bool{
+ "false": true,
+ "iota": true,
+ "nil": true,
+ "true": true,
+}
diff --git a/src/go/doc/synopsis.go b/src/go/doc/synopsis.go
new file mode 100644
index 000000000..c90080b7c
--- /dev/null
+++ b/src/go/doc/synopsis.go
@@ -0,0 +1,82 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import (
+ "strings"
+ "unicode"
+)
+
+// firstSentenceLen returns the length of the first sentence in s.
+// The sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter.
+//
+func firstSentenceLen(s string) int {
+ var ppp, pp, p rune
+ for i, q := range s {
+ if q == '\n' || q == '\r' || q == '\t' {
+ q = ' '
+ }
+ if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) {
+ return i
+ }
+ if p == '。' || p == '.' {
+ return i
+ }
+ ppp, pp, p = pp, p, q
+ }
+ return len(s)
+}
+
+const (
+ keepNL = 1 << iota
+)
+
+// clean replaces each sequence of space, \n, \r, or \t characters
+// with a single space and removes any trailing and leading spaces.
+// If the keepNL flag is set, newline characters are passed through
+// instead of being change to spaces.
+func clean(s string, flags int) string {
+ var b []byte
+ p := byte(' ')
+ for i := 0; i < len(s); i++ {
+ q := s[i]
+ if (flags&keepNL) == 0 && q == '\n' || q == '\r' || q == '\t' {
+ q = ' '
+ }
+ if q != ' ' || p != ' ' {
+ b = append(b, q)
+ p = q
+ }
+ }
+ // remove trailing blank, if any
+ if n := len(b); n > 0 && p == ' ' {
+ b = b[0 : n-1]
+ }
+ return string(b)
+}
+
+// Synopsis returns a cleaned version of the first sentence in s.
+// That sentence ends after the first period followed by space and
+// not preceded by exactly one uppercase letter. The result string
+// has no \n, \r, or \t characters and uses only single spaces between
+// words. If s starts with any of the IllegalPrefixes, the result
+// is the empty string.
+//
+func Synopsis(s string) string {
+ s = clean(s[0:firstSentenceLen(s)], 0)
+ for _, prefix := range IllegalPrefixes {
+ if strings.HasPrefix(strings.ToLower(s), prefix) {
+ return ""
+ }
+ }
+ return s
+}
+
+var IllegalPrefixes = []string{
+ "copyright",
+ "all rights",
+ "author",
+}
diff --git a/src/go/doc/synopsis_test.go b/src/go/doc/synopsis_test.go
new file mode 100644
index 000000000..59b253cb8
--- /dev/null
+++ b/src/go/doc/synopsis_test.go
@@ -0,0 +1,51 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package doc
+
+import "testing"
+
+var tests = []struct {
+ txt string
+ fsl int
+ syn string
+}{
+ {"", 0, ""},
+ {"foo", 3, "foo"},
+ {"foo.", 4, "foo."},
+ {"foo.bar", 7, "foo.bar"},
+ {" foo. ", 6, "foo."},
+ {" foo\t bar.\n", 12, "foo bar."},
+ {" foo\t bar.\n", 12, "foo bar."},
+ {"a b\n\nc\r\rd\t\t", 12, "a b c d"},
+ {"a b\n\nc\r\rd\t\t . BLA", 15, "a b c d ."},
+ {"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."},
+ {"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."},
+ {"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."},
+ {"Package\nfoo. ..", 12, "Package foo."},
+ {"P . Q.", 3, "P ."},
+ {"P. Q. ", 8, "P. Q."},
+ {"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."},
+ {"Package こんにちは 世界\n", 31, "Package こんにちは 世界"},
+ {"Package こんにちは。世界", 26, "Package こんにちは。"},
+ {"Package 안녕.世界", 17, "Package 안녕."},
+ {"Package foo does bar.", 21, "Package foo does bar."},
+ {"Copyright 2012 Google, Inc. Package foo does bar.", 27, ""},
+ {"All Rights reserved. Package foo does bar.", 20, ""},
+ {"All rights reserved. Package foo does bar.", 20, ""},
+ {"Authors: foo@bar.com. Package foo does bar.", 21, ""},
+}
+
+func TestSynopsis(t *testing.T) {
+ for _, e := range tests {
+ fsl := firstSentenceLen(e.txt)
+ if fsl != e.fsl {
+ t.Errorf("got fsl = %d; want %d for %q\n", fsl, e.fsl, e.txt)
+ }
+ syn := Synopsis(e.txt)
+ if syn != e.syn {
+ t.Errorf("got syn = %q; want %q for %q\n", syn, e.syn, e.txt)
+ }
+ }
+}
diff --git a/src/go/doc/testdata/a.0.golden b/src/go/doc/testdata/a.0.golden
new file mode 100644
index 000000000..7e680b80b
--- /dev/null
+++ b/src/go/doc/testdata/a.0.golden
@@ -0,0 +1,52 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ bug0
+
+ bug1
+
+
+BUGS
+BUG(uid) bug0
+
+BUG(uid) bug1
+
+
+NOTES
+NOTE(uid)
+
+NOTE(foo) 1 of 4 - this is the first line of note 1
+ - note 1 continues on this 2nd line
+ - note 1 continues on this 3rd line
+
+NOTE(foo) 2 of 4
+
+NOTE(bar) 3 of 4
+
+NOTE(bar) 4 of 4
+ - this is the last line of note 4
+
+NOTE(bam) This note which contains a (parenthesized) subphrase
+ must appear in its entirety.
+
+NOTE(xxx) The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid) sec hole 0
+ need to fix asap
+
+
+TODOS
+TODO(uid) todo0
+
+TODO(uid) todo1
+
diff --git a/src/go/doc/testdata/a.1.golden b/src/go/doc/testdata/a.1.golden
new file mode 100644
index 000000000..7e680b80b
--- /dev/null
+++ b/src/go/doc/testdata/a.1.golden
@@ -0,0 +1,52 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ bug0
+
+ bug1
+
+
+BUGS
+BUG(uid) bug0
+
+BUG(uid) bug1
+
+
+NOTES
+NOTE(uid)
+
+NOTE(foo) 1 of 4 - this is the first line of note 1
+ - note 1 continues on this 2nd line
+ - note 1 continues on this 3rd line
+
+NOTE(foo) 2 of 4
+
+NOTE(bar) 3 of 4
+
+NOTE(bar) 4 of 4
+ - this is the last line of note 4
+
+NOTE(bam) This note which contains a (parenthesized) subphrase
+ must appear in its entirety.
+
+NOTE(xxx) The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid) sec hole 0
+ need to fix asap
+
+
+TODOS
+TODO(uid) todo0
+
+TODO(uid) todo1
+
diff --git a/src/go/doc/testdata/a.2.golden b/src/go/doc/testdata/a.2.golden
new file mode 100644
index 000000000..7e680b80b
--- /dev/null
+++ b/src/go/doc/testdata/a.2.golden
@@ -0,0 +1,52 @@
+// comment 0 comment 1
+PACKAGE a
+
+IMPORTPATH
+ testdata/a
+
+FILENAMES
+ testdata/a0.go
+ testdata/a1.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ bug0
+
+ bug1
+
+
+BUGS
+BUG(uid) bug0
+
+BUG(uid) bug1
+
+
+NOTES
+NOTE(uid)
+
+NOTE(foo) 1 of 4 - this is the first line of note 1
+ - note 1 continues on this 2nd line
+ - note 1 continues on this 3rd line
+
+NOTE(foo) 2 of 4
+
+NOTE(bar) 3 of 4
+
+NOTE(bar) 4 of 4
+ - this is the last line of note 4
+
+NOTE(bam) This note which contains a (parenthesized) subphrase
+ must appear in its entirety.
+
+NOTE(xxx) The ':' after the marker and uid is optional.
+
+
+SECBUGS
+SECBUG(uid) sec hole 0
+ need to fix asap
+
+
+TODOS
+TODO(uid) todo0
+
+TODO(uid) todo1
+
diff --git a/src/go/doc/testdata/a0.go b/src/go/doc/testdata/a0.go
new file mode 100644
index 000000000..2420c8a48
--- /dev/null
+++ b/src/go/doc/testdata/a0.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 0
+package a
+
+//BUG(uid): bug0
+
+//TODO(uid): todo0
+
+// A note with some spaces after it, should be ignored (watch out for
+// emacs modes that remove trailing whitespace).
+//NOTE(uid):
+
+// SECBUG(uid): sec hole 0
+// need to fix asap
+
+// Multiple notes may be in the same comment group and should be
+// recognized individually. Notes may start in the middle of a
+// comment group as long as they start at the beginning of an
+// individual comment.
+//
+// NOTE(foo): 1 of 4 - this is the first line of note 1
+// - note 1 continues on this 2nd line
+// - note 1 continues on this 3rd line
+// NOTE(foo): 2 of 4
+// NOTE(bar): 3 of 4
+/* NOTE(bar): 4 of 4 */
+// - this is the last line of note 4
+//
+//
+
+// NOTE(bam): This note which contains a (parenthesized) subphrase
+// must appear in its entirety.
+
+// NOTE(xxx) The ':' after the marker and uid is optional.
+
+// NOTE(): NO uid - should not show up.
+// NOTE() NO uid - should not show up.
diff --git a/src/go/doc/testdata/a1.go b/src/go/doc/testdata/a1.go
new file mode 100644
index 000000000..9fad1e09b
--- /dev/null
+++ b/src/go/doc/testdata/a1.go
@@ -0,0 +1,12 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comment 1
+package a
+
+//BUG(uid): bug1
+
+//TODO(uid): todo1
+
+//TODO(): ignored
diff --git a/src/go/doc/testdata/b.0.golden b/src/go/doc/testdata/b.0.golden
new file mode 100644
index 000000000..9d93392ea
--- /dev/null
+++ b/src/go/doc/testdata/b.0.golden
@@ -0,0 +1,71 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const (
+ C1 notExported = iota
+ C2
+
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var (
+ U1, U2, U4, U5 notExported
+
+ U7 notExported = 7
+ )
+
+ //
+ var MaxInt int // MaxInt
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ //
+ func F1() notExported
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
diff --git a/src/go/doc/testdata/b.1.golden b/src/go/doc/testdata/b.1.golden
new file mode 100644
index 000000000..66c47b5c2
--- /dev/null
+++ b/src/go/doc/testdata/b.1.golden
@@ -0,0 +1,83 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var MaxInt int // MaxInt
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
+ //
+ type notExported int
+
+ //
+ const (
+ C1 notExported = iota
+ C2
+ c3
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ var (
+ U1, U2, u3, U4, U5 notExported
+ u6 notExported
+ U7 notExported = 7
+ )
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, v3, V4, V5 notExported
+
+ //
+ func F1() notExported
+
+ //
+ func f2() notExported
+
+ // Should only appear if AllDecls is set.
+ type uint struct{} // overrides a predeclared type uint
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+ // Associated with uint type if AllDecls is set.
+ func uintFactory() uint
+
diff --git a/src/go/doc/testdata/b.2.golden b/src/go/doc/testdata/b.2.golden
new file mode 100644
index 000000000..9d93392ea
--- /dev/null
+++ b/src/go/doc/testdata/b.2.golden
@@ -0,0 +1,71 @@
+//
+PACKAGE b
+
+IMPORTPATH
+ testdata/b
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/b.go
+
+CONSTANTS
+ //
+ const (
+ C1 notExported = iota
+ C2
+
+ C4
+ C5
+ )
+
+ //
+ const C notExported = 0
+
+ //
+ const Pi = 3.14 // Pi
+
+
+VARIABLES
+ //
+ var (
+ U1, U2, U4, U5 notExported
+
+ U7 notExported = 7
+ )
+
+ //
+ var MaxInt int // MaxInt
+
+ //
+ var V notExported
+
+ //
+ var V1, V2, V4, V5 notExported
+
+
+FUNCTIONS
+ //
+ func F(x int) int
+
+ //
+ func F1() notExported
+
+ // Always under the package functions list.
+ func NotAFactory() int
+
+ // Associated with uint type if AllDecls is set.
+ func UintFactory() uint
+
+
+TYPES
+ //
+ type T struct{} // T
+
+ //
+ var V T // v
+
+ //
+ func (x *T) M()
+
diff --git a/src/go/doc/testdata/b.go b/src/go/doc/testdata/b.go
new file mode 100644
index 000000000..e50663b3d
--- /dev/null
+++ b/src/go/doc/testdata/b.go
@@ -0,0 +1,58 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Basic declarations
+
+const Pi = 3.14 // Pi
+var MaxInt int // MaxInt
+type T struct{} // T
+var V T // v
+func F(x int) int {} // F
+func (x *T) M() {} // M
+
+// Corner cases: association with (presumed) predeclared types
+
+// Always under the package functions list.
+func NotAFactory() int {}
+
+// Associated with uint type if AllDecls is set.
+func UintFactory() uint {}
+
+// Associated with uint type if AllDecls is set.
+func uintFactory() uint {}
+
+// Should only appear if AllDecls is set.
+type uint struct{} // overrides a predeclared type uint
+
+// ----------------------------------------------------------------------------
+// Exported declarations associated with non-exported types must always be shown.
+
+type notExported int
+
+const C notExported = 0
+
+const (
+ C1 notExported = iota
+ C2
+ c3
+ C4
+ C5
+)
+
+var V notExported
+var V1, V2, v3, V4, V5 notExported
+
+var (
+ U1, U2, u3, U4, U5 notExported
+ u6 notExported
+ U7 notExported = 7
+)
+
+func F1() notExported {}
+func f2() notExported {}
diff --git a/src/go/doc/testdata/benchmark.go b/src/go/doc/testdata/benchmark.go
new file mode 100644
index 000000000..905e49644
--- /dev/null
+++ b/src/go/doc/testdata/benchmark.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+}
+
+// B is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type B struct {
+ common
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ result BenchmarkResult
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (b *B) StartTimer() {
+ if !b.timerOn {
+ b.start = time.Now()
+ b.timerOn = true
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (b *B) StopTimer() {
+ if b.timerOn {
+ b.duration += time.Now().Sub(b.start)
+ b.timerOn = false
+ }
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (b *B) ResetTimer() {
+ if b.timerOn {
+ b.start = time.Now()
+ }
+ b.duration = 0
+}
+
+// SetBytes records the number of bytes processed in a single operation.
+// If this is called, the benchmark will report ns/op and MB/s.
+func (b *B) SetBytes(n int64) { b.bytes = n }
+
+func (b *B) nsPerOp() int64 {
+ if b.N <= 0 {
+ return 0
+ }
+ return b.duration.Nanoseconds() / int64(b.N)
+}
+
+// runN runs a single benchmark for the specified number of iterations.
+func (b *B) runN(n int) {
+ // Try to get a comparable environment for each run
+ // by clearing garbage from previous runs.
+ runtime.GC()
+ b.N = n
+ b.ResetTimer()
+ b.StartTimer()
+ b.benchmark.F(b)
+ b.StopTimer()
+}
+
+func min(x, y int) int {
+ if x > y {
+ return y
+ }
+ return x
+}
+
+func max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+ var tens = 0
+ // tens = floor(log_10(n))
+ for n > 10 {
+ n = n / 10
+ tens++
+ }
+ // result = 10^tens
+ result := 1
+ for i := 0; i < tens; i++ {
+ result *= 10
+ }
+ return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+ base := roundDown10(n)
+ if n < (2 * base) {
+ return 2 * base
+ }
+ if n < (5 * base) {
+ return 5 * base
+ }
+ return 10 * base
+}
+
+// run times the benchmark function in a separate goroutine.
+func (b *B) run() BenchmarkResult {
+ go b.launch()
+ <-b.signal
+ return b.result
+}
+
+// launch launches the benchmark function. It gradually increases the number
+// of benchmark iterations until the benchmark runs for a second in order
+// to get a reasonable measurement. It prints timing information in this form
+// testing.BenchmarkHello 100000 19 ns/op
+// launch is run by the fun function as a separate goroutine.
+func (b *B) launch() {
+ // Run the benchmark for a single iteration in case it's expensive.
+ n := 1
+
+ // Signal that we're done whether we return normally
+ // or by FailNow's runtime.Goexit.
+ defer func() {
+ b.signal <- b
+ }()
+
+ b.runN(n)
+ // Run the benchmark for at least the specified amount of time.
+ d := *benchTime
+ for !b.failed && b.duration < d && n < 1e9 {
+ last := n
+ // Predict iterations/sec.
+ if b.nsPerOp() == 0 {
+ n = 1e9
+ } else {
+ n = int(d.Nanoseconds() / b.nsPerOp())
+ }
+ // Run more iterations than we think we'll need for a second (1.5x).
+ // Don't grow too fast in case we had timing errors previously.
+ // Be sure to run at least one more than last time.
+ n = max(min(n+n/2, 100*last), last+1)
+ // Round up to something easy to read.
+ n = roundUp(n)
+ b.runN(n)
+ }
+ b.result = BenchmarkResult{b.N, b.duration, b.bytes}
+}
+
+// The results of a benchmark run.
+type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+}
+
+func (r BenchmarkResult) NsPerOp() int64 {
+ if r.N <= 0 {
+ return 0
+ }
+ return r.T.Nanoseconds() / int64(r.N)
+}
+
+func (r BenchmarkResult) mbPerSec() float64 {
+ if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
+ return 0
+ }
+ return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
+}
+
+func (r BenchmarkResult) String() string {
+ mbs := r.mbPerSec()
+ mb := ""
+ if mbs != 0 {
+ mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+ }
+ nsop := r.NsPerOp()
+ ns := fmt.Sprintf("%10d ns/op", nsop)
+ if r.N > 0 && nsop < 100 {
+ // The format specifiers here make sure that
+ // the ones digits line up for all three possible formats.
+ if nsop < 10 {
+ ns = fmt.Sprintf("%13.2f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ } else {
+ ns = fmt.Sprintf("%12.1f ns/op", float64(r.T.Nanoseconds())/float64(r.N))
+ }
+ }
+ return fmt.Sprintf("%8d\t%s%s", r.N, ns, mb)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
+ // If no flag was specified, don't run benchmarks.
+ if len(*matchBenchmarks) == 0 {
+ return
+ }
+ for _, Benchmark := range benchmarks {
+ matched, err := matchString(*matchBenchmarks, Benchmark.Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.bench: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: Benchmark,
+ }
+ benchName := Benchmark.Name
+ if procs != 1 {
+ benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
+ }
+ fmt.Printf("%s\t", benchName)
+ r := b.run()
+ if b.failed {
+ // The output could be very long here, but probably isn't.
+ // We print it all, regardless, because we don't want to trim the reason
+ // the benchmark failed.
+ fmt.Printf("--- FAIL: %s\n%s", benchName, b.output)
+ continue
+ }
+ fmt.Printf("%v\n", r)
+ // Unlike with tests, we ignore the -chatty flag and always print output for
+ // benchmarks since the output generation time will skew the results.
+ if len(b.output) > 0 {
+ b.trimOutput()
+ fmt.Printf("--- BENCH: %s\n%s", benchName, b.output)
+ }
+ if p := runtime.GOMAXPROCS(-1); p != procs {
+ fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
+ }
+ }
+ }
+}
+
+// trimOutput shortens the output from a benchmark, which can be very long.
+func (b *B) trimOutput() {
+ // The output is likely to appear multiple times because the benchmark
+ // is run multiple times, but at least it will be seen. This is not a big deal
+ // because benchmarks rarely print, but just in case, we trim it if it's too long.
+ const maxNewlines = 10
+ for nlCount, j := 0, 0; j < len(b.output); j++ {
+ if b.output[j] == '\n' {
+ nlCount++
+ if nlCount >= maxNewlines {
+ b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
+ break
+ }
+ }
+ }
+}
+
+// Benchmark benchmarks a single function. Useful for creating
+// custom benchmarks that do not use go test.
+func Benchmark(f func(b *B)) BenchmarkResult {
+ b := &B{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ benchmark: InternalBenchmark{"", f},
+ }
+ return b.run()
+}
diff --git a/src/go/doc/testdata/bugpara.0.golden b/src/go/doc/testdata/bugpara.0.golden
new file mode 100644
index 000000000..580485950
--- /dev/null
+++ b/src/go/doc/testdata/bugpara.0.golden
@@ -0,0 +1,20 @@
+//
+PACKAGE bugpara
+
+IMPORTPATH
+ testdata/bugpara
+
+FILENAMES
+ testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
+
+BUGS
+BUG(rsc) Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
diff --git a/src/go/doc/testdata/bugpara.1.golden b/src/go/doc/testdata/bugpara.1.golden
new file mode 100644
index 000000000..580485950
--- /dev/null
+++ b/src/go/doc/testdata/bugpara.1.golden
@@ -0,0 +1,20 @@
+//
+PACKAGE bugpara
+
+IMPORTPATH
+ testdata/bugpara
+
+FILENAMES
+ testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
+
+BUGS
+BUG(rsc) Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
diff --git a/src/go/doc/testdata/bugpara.2.golden b/src/go/doc/testdata/bugpara.2.golden
new file mode 100644
index 000000000..580485950
--- /dev/null
+++ b/src/go/doc/testdata/bugpara.2.golden
@@ -0,0 +1,20 @@
+//
+PACKAGE bugpara
+
+IMPORTPATH
+ testdata/bugpara
+
+FILENAMES
+ testdata/bugpara.go
+
+BUGS .Bugs is now deprecated, please use .Notes instead
+ Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
+
+BUGS
+BUG(rsc) Sometimes bugs have multiple paragraphs.
+
+ Like this one.
+
diff --git a/src/go/doc/testdata/bugpara.go b/src/go/doc/testdata/bugpara.go
new file mode 100644
index 000000000..f5345a797
--- /dev/null
+++ b/src/go/doc/testdata/bugpara.go
@@ -0,0 +1,5 @@
+package bugpara
+
+// BUG(rsc): Sometimes bugs have multiple paragraphs.
+//
+// Like this one.
diff --git a/src/go/doc/testdata/c.0.golden b/src/go/doc/testdata/c.0.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/go/doc/testdata/c.0.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/go/doc/testdata/c.1.golden b/src/go/doc/testdata/c.1.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/go/doc/testdata/c.1.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/go/doc/testdata/c.2.golden b/src/go/doc/testdata/c.2.golden
new file mode 100644
index 000000000..e21959b19
--- /dev/null
+++ b/src/go/doc/testdata/c.2.golden
@@ -0,0 +1,48 @@
+//
+PACKAGE c
+
+IMPORTPATH
+ testdata/c
+
+IMPORTS
+ a
+
+FILENAMES
+ testdata/c.go
+
+TYPES
+ // A (should see this)
+ type A struct{}
+
+ // B (should see this)
+ type B struct{}
+
+ // C (should see this)
+ type C struct{}
+
+ // D (should see this)
+ type D struct{}
+
+ // E1 (should see this)
+ type E1 struct{}
+
+ // E (should see this for E2 and E3)
+ type E2 struct{}
+
+ // E (should see this for E2 and E3)
+ type E3 struct{}
+
+ // E4 (should see this)
+ type E4 struct{}
+
+ //
+ type T1 struct{}
+
+ //
+ func (t1 *T1) M()
+
+ // T2 must not show methods of local T1
+ type T2 struct {
+ a.T1 // not the same as locally declared T1
+ }
+
diff --git a/src/go/doc/testdata/c.go b/src/go/doc/testdata/c.go
new file mode 100644
index 000000000..e0f39196d
--- /dev/null
+++ b/src/go/doc/testdata/c.go
@@ -0,0 +1,62 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+import "a"
+
+// ----------------------------------------------------------------------------
+// Test that empty declarations don't cause problems
+
+const ()
+
+type ()
+
+var ()
+
+// ----------------------------------------------------------------------------
+// Test that types with documentation on both, the Decl and the Spec node
+// are handled correctly.
+
+// A (should see this)
+type A struct{}
+
+// B (should see this)
+type (
+ B struct{}
+)
+
+type (
+ // C (should see this)
+ C struct{}
+)
+
+// D (should not see this)
+type (
+ // D (should see this)
+ D struct{}
+)
+
+// E (should see this for E2 and E3)
+type (
+ // E1 (should see this)
+ E1 struct{}
+ E2 struct{}
+ E3 struct{}
+ // E4 (should see this)
+ E4 struct{}
+)
+
+// ----------------------------------------------------------------------------
+// Test that local and imported types are different when
+// handling anonymous fields.
+
+type T1 struct{}
+
+func (t1 *T1) M() {}
+
+// T2 must not show methods of local T1
+type T2 struct {
+ a.T1 // not the same as locally declared T1
+}
diff --git a/src/go/doc/testdata/d.0.golden b/src/go/doc/testdata/d.0.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/go/doc/testdata/d.0.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/go/doc/testdata/d.1.golden b/src/go/doc/testdata/d.1.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/go/doc/testdata/d.1.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/go/doc/testdata/d.2.golden b/src/go/doc/testdata/d.2.golden
new file mode 100644
index 000000000..c00519953
--- /dev/null
+++ b/src/go/doc/testdata/d.2.golden
@@ -0,0 +1,104 @@
+//
+PACKAGE d
+
+IMPORTPATH
+ testdata/d
+
+FILENAMES
+ testdata/d1.go
+ testdata/d2.go
+
+CONSTANTS
+ // CBx constants should appear before CAx constants.
+ const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+ )
+
+ // CAx constants should appear after CBx constants.
+ const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+ )
+
+ // C0 should be first.
+ const C0 = 0
+
+ // C1 should be second.
+ const C1 = 1
+
+ // C2 should be third.
+ const C2 = 2
+
+ //
+ const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+ )
+
+
+VARIABLES
+ // VBx variables should appear before VAx variables.
+ var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+ )
+
+ // VAx variables should appear after VBx variables.
+ var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+ )
+
+ // V0 should be first.
+ var V0 uintptr
+
+ // V1 should be second.
+ var V1 uint
+
+ // V2 should be third.
+ var V2 int
+
+ //
+ var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+ )
+
+
+FUNCTIONS
+ // F0 should be first.
+ func F0()
+
+ // F1 should be second.
+ func F1()
+
+ // F2 should be third.
+ func F2()
+
+
+TYPES
+ // T0 should be first.
+ type T0 struct{}
+
+ // T1 should be second.
+ type T1 struct{}
+
+ // T2 should be third.
+ type T2 struct{}
+
+ // TG0 should be first.
+ type TG0 struct{}
+
+ // TG1 should be second.
+ type TG1 struct{}
+
+ // TG2 should be third.
+ type TG2 struct{}
+
diff --git a/src/go/doc/testdata/d1.go b/src/go/doc/testdata/d1.go
new file mode 100644
index 000000000..ebd694195
--- /dev/null
+++ b/src/go/doc/testdata/d1.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C2 should be third.
+const C2 = 2
+
+// V2 should be third.
+var V2 int
+
+// CBx constants should appear before CAx constants.
+const (
+ CB2 = iota // before CB1
+ CB1 // before CB0
+ CB0 // at end
+)
+
+// VBx variables should appear before VAx variables.
+var (
+ VB2 int // before VB1
+ VB1 int // before VB0
+ VB0 int // at end
+)
+
+const (
+ // Single const declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Cungrouped = 0
+)
+
+var (
+ // Single var declarations inside ()'s are considered ungrouped
+ // and show up in sorted order.
+ Vungrouped = 0
+)
+
+// T2 should be third.
+type T2 struct{}
+
+// Grouped types are sorted nevertheless.
+type (
+ // TG2 should be third.
+ TG2 struct{}
+
+ // TG1 should be second.
+ TG1 struct{}
+
+ // TG0 should be first.
+ TG0 struct{}
+)
+
+// F2 should be third.
+func F2() {}
diff --git a/src/go/doc/testdata/d2.go b/src/go/doc/testdata/d2.go
new file mode 100644
index 000000000..2f56f4fa4
--- /dev/null
+++ b/src/go/doc/testdata/d2.go
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for sort order of declarations.
+
+package d
+
+// C1 should be second.
+const C1 = 1
+
+// C0 should be first.
+const C0 = 0
+
+// V1 should be second.
+var V1 uint
+
+// V0 should be first.
+var V0 uintptr
+
+// CAx constants should appear after CBx constants.
+const (
+ CA2 = iota // before CA1
+ CA1 // before CA0
+ CA0 // at end
+)
+
+// VAx variables should appear after VBx variables.
+var (
+ VA2 int // before VA1
+ VA1 int // before VA0
+ VA0 int // at end
+)
+
+// T1 should be second.
+type T1 struct{}
+
+// T0 should be first.
+type T0 struct{}
+
+// F1 should be second.
+func F1() {}
+
+// F0 should be first.
+func F0() {}
diff --git a/src/go/doc/testdata/e.0.golden b/src/go/doc/testdata/e.0.golden
new file mode 100644
index 000000000..6987e5867
--- /dev/null
+++ b/src/go/doc/testdata/e.0.golden
@@ -0,0 +1,109 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ // contains filtered or unexported fields
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
diff --git a/src/go/doc/testdata/e.1.golden b/src/go/doc/testdata/e.1.golden
new file mode 100644
index 000000000..cbe22e0bf
--- /dev/null
+++ b/src/go/doc/testdata/e.1.golden
@@ -0,0 +1,144 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ t1
+ t2
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ t1
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ t1e
+ t2e
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ *u5
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
+ //
+ type t1 struct{}
+
+ // t1.M should not appear as method in a Tx type.
+ func (t1) M()
+
+ //
+ type t1e struct {
+ t1
+ }
+
+ // t1.M should not appear as method in a Tx type.
+ func (t1e) M()
+
+ //
+ type t2 struct{}
+
+ // t2.M should not appear as method in a Tx type.
+ func (t2) M()
+
+ //
+ type t2e struct {
+ t2
+ }
+
+ // t2.M should not appear as method in a Tx type.
+ func (t2e) M()
+
+ //
+ type u5 struct {
+ *U4
+ }
+
diff --git a/src/go/doc/testdata/e.2.golden b/src/go/doc/testdata/e.2.golden
new file mode 100644
index 000000000..e7b05e80f
--- /dev/null
+++ b/src/go/doc/testdata/e.2.golden
@@ -0,0 +1,130 @@
+// The package e is a go/doc test for embedded methods.
+PACKAGE e
+
+IMPORTPATH
+ testdata/e
+
+FILENAMES
+ testdata/e.go
+
+TYPES
+ // T1 has no embedded (level 1) M method due to conflict.
+ type T1 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2 has only M as top-level method.
+ type T2 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T2.M should appear as method of T2.
+ func (T2) M()
+
+ // T3 has only M as top-level method.
+ type T3 struct {
+ // contains filtered or unexported fields
+ }
+
+ // T3.M should appear as method of T3.
+ func (T3) M()
+
+ //
+ type T4 struct{}
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T4) M()
+
+ //
+ type T5 struct {
+ T4
+ }
+
+ // T4.M should appear as method of T5 only if AllMethods is set.
+ func (*T5) M()
+
+ //
+ type U1 struct {
+ *U1
+ }
+
+ // U1.M should appear as method of U1.
+ func (*U1) M()
+
+ //
+ type U2 struct {
+ *U3
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (*U2) M()
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (U2) N()
+
+ //
+ type U3 struct {
+ *U2
+ }
+
+ // U2.M should appear as method of U2 and as method of U3 only if ...
+ func (U3) M()
+
+ // U3.N should appear as method of U3 and as method of U2 only if ...
+ func (*U3) N()
+
+ //
+ type U4 struct {
+ // contains filtered or unexported fields
+ }
+
+ // U4.M should appear as method of U4.
+ func (*U4) M()
+
+ //
+ type V1 struct {
+ *V2
+ *V5
+ }
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (V1) M()
+
+ //
+ type V2 struct {
+ *V3
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (V2) M()
+
+ //
+ type V3 struct {
+ *V4
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (V3) M()
+
+ //
+ type V4 struct {
+ *V5
+ }
+
+ // V4.M should appear as method of V2 and V3 if AllMethods is set.
+ func (*V4) M()
+
+ //
+ type V5 struct {
+ *V6
+ }
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (V5) M()
+
+ //
+ type V6 struct{}
+
+ // V6.M should appear as method of V1 and V5 if AllMethods is set.
+ func (*V6) M()
+
diff --git a/src/go/doc/testdata/e.go b/src/go/doc/testdata/e.go
new file mode 100644
index 000000000..ec432e3e5
--- /dev/null
+++ b/src/go/doc/testdata/e.go
@@ -0,0 +1,147 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package e is a go/doc test for embedded methods.
+package e
+
+// ----------------------------------------------------------------------------
+// Conflicting methods M must not show up.
+
+type t1 struct{}
+
+// t1.M should not appear as method in a Tx type.
+func (t1) M() {}
+
+type t2 struct{}
+
+// t2.M should not appear as method in a Tx type.
+func (t2) M() {}
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T1 struct {
+ t1
+ t2
+}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level method M.
+
+// T2 has only M as top-level method.
+type T2 struct {
+ t1
+}
+
+// T2.M should appear as method of T2.
+func (T2) M() {}
+
+// ----------------------------------------------------------------------------
+// Higher-level method M wins over lower-level conflicting methods M.
+
+type t1e struct {
+ t1
+}
+
+type t2e struct {
+ t2
+}
+
+// T3 has only M as top-level method.
+type T3 struct {
+ t1e
+ t2e
+}
+
+// T3.M should appear as method of T3.
+func (T3) M() {}
+
+// ----------------------------------------------------------------------------
+// Don't show conflicting methods M embedded via an exported and non-exported
+// type.
+
+// T1 has no embedded (level 1) M method due to conflict.
+type T4 struct {
+ t2
+ T2
+}
+
+// ----------------------------------------------------------------------------
+// Don't show embedded methods of exported anonymous fields unless AllMethods
+// is set.
+
+type T4 struct{}
+
+// T4.M should appear as method of T5 only if AllMethods is set.
+func (*T4) M() {}
+
+type T5 struct {
+ T4
+}
+
+// ----------------------------------------------------------------------------
+// Recursive type declarations must not lead to endless recursion.
+
+type U1 struct {
+ *U1
+}
+
+// U1.M should appear as method of U1.
+func (*U1) M() {}
+
+type U2 struct {
+ *U3
+}
+
+// U2.M should appear as method of U2 and as method of U3 only if AllMethods is set.
+func (*U2) M() {}
+
+type U3 struct {
+ *U2
+}
+
+// U3.N should appear as method of U3 and as method of U2 only if AllMethods is set.
+func (*U3) N() {}
+
+type U4 struct {
+ *u5
+}
+
+// U4.M should appear as method of U4.
+func (*U4) M() {}
+
+type u5 struct {
+ *U4
+}
+
+// ----------------------------------------------------------------------------
+// A higher-level embedded type (and its methods) wins over the same type (and
+// its methods) embedded at a lower level.
+
+type V1 struct {
+ *V2
+ *V5
+}
+
+type V2 struct {
+ *V3
+}
+
+type V3 struct {
+ *V4
+}
+
+type V4 struct {
+ *V5
+}
+
+type V5 struct {
+ *V6
+}
+
+type V6 struct{}
+
+// V4.M should appear as method of V2 and V3 if AllMethods is set.
+func (*V4) M() {}
+
+// V6.M should appear as method of V1 and V5 if AllMethods is set.
+func (*V6) M() {}
diff --git a/src/go/doc/testdata/error1.0.golden b/src/go/doc/testdata/error1.0.golden
new file mode 100644
index 000000000..6c6fe5d49
--- /dev/null
+++ b/src/go/doc/testdata/error1.0.golden
@@ -0,0 +1,30 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/go/doc/testdata/error1.1.golden b/src/go/doc/testdata/error1.1.golden
new file mode 100644
index 000000000..a8dc2e71d
--- /dev/null
+++ b/src/go/doc/testdata/error1.1.golden
@@ -0,0 +1,32 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/go/doc/testdata/error1.2.golden b/src/go/doc/testdata/error1.2.golden
new file mode 100644
index 000000000..6c6fe5d49
--- /dev/null
+++ b/src/go/doc/testdata/error1.2.golden
@@ -0,0 +1,30 @@
+//
+PACKAGE error1
+
+IMPORTPATH
+ testdata/error1
+
+FILENAMES
+ testdata/error1.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+ }
+
diff --git a/src/go/doc/testdata/error1.go b/src/go/doc/testdata/error1.go
new file mode 100644
index 000000000..3c777a780
--- /dev/null
+++ b/src/go/doc/testdata/error1.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error1
+
+type I0 interface {
+ // When embedded, the predeclared error interface
+ // must remain visible in interface types.
+ error
+}
+
+type T0 struct {
+ ExportedField interface {
+ // error should be visible
+ error
+ }
+}
+
+type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+}
diff --git a/src/go/doc/testdata/error2.0.golden b/src/go/doc/testdata/error2.0.golden
new file mode 100644
index 000000000..dedfe412a
--- /dev/null
+++ b/src/go/doc/testdata/error2.0.golden
@@ -0,0 +1,27 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // contains filtered or unexported methods
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // contains filtered or unexported methods
+ }
+ }
+
diff --git a/src/go/doc/testdata/error2.1.golden b/src/go/doc/testdata/error2.1.golden
new file mode 100644
index 000000000..dbcc1b03e
--- /dev/null
+++ b/src/go/doc/testdata/error2.1.golden
@@ -0,0 +1,37 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // When embedded, the locally-declared error interface
+ // is only visible if all declarations are shown.
+ error
+ }
+
+ //
+ type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // error should not be visible
+ error
+ }
+ }
+
+ // This error declaration shadows the predeclared error type.
+ type error interface {
+ Error() string
+ }
+
diff --git a/src/go/doc/testdata/error2.2.golden b/src/go/doc/testdata/error2.2.golden
new file mode 100644
index 000000000..dedfe412a
--- /dev/null
+++ b/src/go/doc/testdata/error2.2.golden
@@ -0,0 +1,27 @@
+//
+PACKAGE error2
+
+IMPORTPATH
+ testdata/error2
+
+FILENAMES
+ testdata/error2.go
+
+TYPES
+ //
+ type I0 interface {
+ // contains filtered or unexported methods
+ }
+
+ //
+ type S0 struct {
+ // contains filtered or unexported fields
+ }
+
+ //
+ type T0 struct {
+ ExportedField interface {
+ // contains filtered or unexported methods
+ }
+ }
+
diff --git a/src/go/doc/testdata/error2.go b/src/go/doc/testdata/error2.go
new file mode 100644
index 000000000..6ee96c245
--- /dev/null
+++ b/src/go/doc/testdata/error2.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package error2
+
+type I0 interface {
+ // When embedded, the locally-declared error interface
+ // is only visible if all declarations are shown.
+ error
+}
+
+type T0 struct {
+ ExportedField interface {
+ // error should not be visible
+ error
+ }
+}
+
+type S0 struct {
+ // In struct types, an embedded error must only be visible
+ // if AllDecls is set.
+ error
+}
+
+// This error declaration shadows the predeclared error type.
+type error interface {
+ Error() string
+}
diff --git a/src/go/doc/testdata/example.go b/src/go/doc/testdata/example.go
new file mode 100644
index 000000000..fdeda137e
--- /dev/null
+++ b/src/go/doc/testdata/example.go
@@ -0,0 +1,81 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+)
+
+type InternalExample struct {
+ Name string
+ F func()
+ Output string
+}
+
+func RunExamples(examples []InternalExample) (ok bool) {
+ ok = true
+
+ var eg InternalExample
+
+ stdout, stderr := os.Stdout, os.Stderr
+ defer func() {
+ os.Stdout, os.Stderr = stdout, stderr
+ if e := recover(); e != nil {
+ fmt.Printf("--- FAIL: %s\npanic: %v\n", eg.Name, e)
+ os.Exit(1)
+ }
+ }()
+
+ for _, eg = range examples {
+ if *chatty {
+ fmt.Printf("=== RUN: %s\n", eg.Name)
+ }
+
+ // capture stdout and stderr
+ r, w, err := os.Pipe()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Stdout, os.Stderr = w, w
+ outC := make(chan string)
+ go func() {
+ buf := new(bytes.Buffer)
+ _, err := io.Copy(buf, r)
+ if err != nil {
+ fmt.Fprintf(stderr, "testing: copying pipe: %v\n", err)
+ os.Exit(1)
+ }
+ outC <- buf.String()
+ }()
+
+ // run example
+ t0 := time.Now()
+ eg.F()
+ dt := time.Now().Sub(t0)
+
+ // close pipe, restore stdout/stderr, get output
+ w.Close()
+ os.Stdout, os.Stderr = stdout, stderr
+ out := <-outC
+
+ // report any errors
+ tstr := fmt.Sprintf("(%.2f seconds)", dt.Seconds())
+ if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e {
+ fmt.Printf("--- FAIL: %s %s\ngot:\n%s\nwant:\n%s\n",
+ eg.Name, tstr, g, e)
+ ok = false
+ } else if *chatty {
+ fmt.Printf("--- PASS: %s %s\n", eg.Name, tstr)
+ }
+ }
+
+ return
+}
diff --git a/src/go/doc/testdata/f.0.golden b/src/go/doc/testdata/f.0.golden
new file mode 100644
index 000000000..817590186
--- /dev/null
+++ b/src/go/doc/testdata/f.0.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/go/doc/testdata/f.1.golden b/src/go/doc/testdata/f.1.golden
new file mode 100644
index 000000000..ba68e884c
--- /dev/null
+++ b/src/go/doc/testdata/f.1.golden
@@ -0,0 +1,16 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+TYPES
+ //
+ type private struct{}
+
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/go/doc/testdata/f.2.golden b/src/go/doc/testdata/f.2.golden
new file mode 100644
index 000000000..817590186
--- /dev/null
+++ b/src/go/doc/testdata/f.2.golden
@@ -0,0 +1,13 @@
+// The package f is a go/doc test for functions and factory ...
+PACKAGE f
+
+IMPORTPATH
+ testdata/f
+
+FILENAMES
+ testdata/f.go
+
+FUNCTIONS
+ // Exported must always be visible. Was issue 2824.
+ func Exported() private
+
diff --git a/src/go/doc/testdata/f.go b/src/go/doc/testdata/f.go
new file mode 100644
index 000000000..7e9add907
--- /dev/null
+++ b/src/go/doc/testdata/f.go
@@ -0,0 +1,14 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The package f is a go/doc test for functions and factory methods.
+package f
+
+// ----------------------------------------------------------------------------
+// Factory functions for non-exported types must not get lost.
+
+type private struct{}
+
+// Exported must always be visible. Was issue 2824.
+func Exported() private {}
diff --git a/src/go/doc/testdata/template.txt b/src/go/doc/testdata/template.txt
new file mode 100644
index 000000000..1b0738261
--- /dev/null
+++ b/src/go/doc/testdata/template.txt
@@ -0,0 +1,68 @@
+{{synopsis .Doc}}
+PACKAGE {{.Name}}
+
+IMPORTPATH
+ {{.ImportPath}}
+
+{{with .Imports}}IMPORTS
+{{range .}} {{.}}
+{{end}}
+{{end}}{{/*
+
+*/}}FILENAMES
+{{range .Filenames}} {{.}}
+{{end}}{{/*
+
+*/}}{{with .Consts}}
+CONSTANTS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Vars}}
+VARIABLES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Funcs}}
+FUNCTIONS
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{/*
+
+*/}}{{with .Types}}
+TYPES
+{{range .}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{range .Consts}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Vars}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Funcs}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{/*
+
+*/}}{{range .Methods}} {{synopsis .Doc}}
+ {{node .Decl $.FSet}}
+
+{{end}}{{end}}{{end}}{{/*
+
+*/}}{{with .Bugs}}
+BUGS .Bugs is now deprecated, please use .Notes instead
+{{range .}}{{indent "\t" .}}
+{{end}}{{end}}{{with .Notes}}{{range $marker, $content := .}}
+{{$marker}}S
+{{range $content}}{{$marker}}({{.UID}}){{indent "\t" .Body}}
+{{end}}{{end}}{{end}} \ No newline at end of file
diff --git a/src/go/doc/testdata/testing.0.golden b/src/go/doc/testdata/testing.0.golden
new file mode 100644
index 000000000..f8348f1ac
--- /dev/null
+++ b/src/go/doc/testdata/testing.0.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
diff --git a/src/go/doc/testdata/testing.1.golden b/src/go/doc/testdata/testing.1.golden
new file mode 100644
index 000000000..282bb1015
--- /dev/null
+++ b/src/go/doc/testdata/testing.1.golden
@@ -0,0 +1,298 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+VARIABLES
+ //
+ var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "go test" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // Report as tests are run; default is silent for success.
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+ cpuList []int
+ )
+
+ //
+ var benchTime = flag.Duration("test.benchtime", 1*time.Second, "approximate run time for each benchmark")
+
+ //
+ var matchBenchmarks = flag.String("test.bench", "", "regular expression to select benchmarks to run")
+
+ //
+ var timer *time.Timer
+
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+ // after runs after all testing.
+ func after()
+
+ // alarm is called if the timeout expires.
+ func alarm()
+
+ // before runs before all testing.
+ func before()
+
+ // decorate inserts the final newline if needed and indentation ...
+ func decorate(s string, addFileLine bool) string
+
+ //
+ func max(x, y int) int
+
+ //
+ func min(x, y int) int
+
+ //
+ func parseCpuList()
+
+ // roundDown10 rounds a number down to the nearest power of 10.
+ func roundDown10(n int) int
+
+ // roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+ func roundUp(n int) int
+
+ // startAlarm starts an alarm if requested.
+ func startAlarm()
+
+ // stopAlarm turns off the alarm.
+ func stopAlarm()
+
+ //
+ func tRunner(t *T, test *InternalTest)
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ common
+ N int
+ benchmark InternalBenchmark
+ bytes int64
+ timerOn bool
+ result BenchmarkResult
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // launch launches the benchmark function. It gradually increases ...
+ func (b *B) launch()
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *B) log(s string)
+
+ //
+ func (b *B) nsPerOp() int64
+
+ // run times the benchmark function in a separate goroutine.
+ func (b *B) run() BenchmarkResult
+
+ // runN runs a single benchmark for the specified number of ...
+ func (b *B) runN(n int)
+
+ // trimOutput shortens the output from a benchmark, which can be ...
+ func (b *B) trimOutput()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ //
+ func (r BenchmarkResult) mbPerSec() float64
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ common
+ name string // Name of test.
+ startParallel chan bool // Parallel tests will wait on this.
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *T) log(s string)
+
+ //
+ func (t *T) report()
+
+ // common holds the elements common between T and B and captures ...
+ type common struct {
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ self interface{} // To be sent on signal channel when done.
+ signal chan interface{} // Output for serial tests.
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *common) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *common) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *common) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *common) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *common) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *common) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *common) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *common) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *common) Logf(format string, args ...interface{})
+
+ // log generates the output. It's always at the same stack depth.
+ func (c *common) log(s string)
+
diff --git a/src/go/doc/testdata/testing.2.golden b/src/go/doc/testdata/testing.2.golden
new file mode 100644
index 000000000..f8348f1ac
--- /dev/null
+++ b/src/go/doc/testdata/testing.2.golden
@@ -0,0 +1,156 @@
+// Package testing provides support for automated testing of Go ...
+PACKAGE testing
+
+IMPORTPATH
+ testdata/testing
+
+IMPORTS
+ bytes
+ flag
+ fmt
+ io
+ os
+ runtime
+ runtime/pprof
+ strconv
+ strings
+ time
+
+FILENAMES
+ testdata/benchmark.go
+ testdata/example.go
+ testdata/testing.go
+
+FUNCTIONS
+ // An internal function but exported because it is cross-package; ...
+ func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)
+
+ // An internal function but exported because it is cross-package; ...
+ func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark)
+
+ //
+ func RunExamples(examples []InternalExample) (ok bool)
+
+ //
+ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool)
+
+ // Short reports whether the -test.short flag is set.
+ func Short() bool
+
+
+TYPES
+ // B is a type passed to Benchmark functions to manage benchmark ...
+ type B struct {
+ N int
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *B) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *B) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *B) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *B) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *B) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *B) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *B) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *B) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *B) Logf(format string, args ...interface{})
+
+ // ResetTimer sets the elapsed benchmark time to zero. It does not ...
+ func (b *B) ResetTimer()
+
+ // SetBytes records the number of bytes processed in a single ...
+ func (b *B) SetBytes(n int64)
+
+ // StartTimer starts timing a test. This function is called ...
+ func (b *B) StartTimer()
+
+ // StopTimer stops timing a test. This can be used to pause the ...
+ func (b *B) StopTimer()
+
+ // The results of a benchmark run.
+ type BenchmarkResult struct {
+ N int // The number of iterations.
+ T time.Duration // The total time taken.
+ Bytes int64 // Bytes processed in one iteration.
+ }
+
+ // Benchmark benchmarks a single function. Useful for creating ...
+ func Benchmark(f func(b *B)) BenchmarkResult
+
+ //
+ func (r BenchmarkResult) NsPerOp() int64
+
+ //
+ func (r BenchmarkResult) String() string
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalBenchmark struct {
+ Name string
+ F func(b *B)
+ }
+
+ //
+ type InternalExample struct {
+ Name string
+ F func()
+ Output string
+ }
+
+ // An internal type but exported because it is cross-package; part ...
+ type InternalTest struct {
+ Name string
+ F func(*T)
+ }
+
+ // T is a type passed to Test functions to manage test state and ...
+ type T struct {
+ // contains filtered or unexported fields
+ }
+
+ // Error is equivalent to Log() followed by Fail().
+ func (c *T) Error(args ...interface{})
+
+ // Errorf is equivalent to Logf() followed by Fail().
+ func (c *T) Errorf(format string, args ...interface{})
+
+ // Fail marks the function as having failed but continues ...
+ func (c *T) Fail()
+
+ // FailNow marks the function as having failed and stops its ...
+ func (c *T) FailNow()
+
+ // Failed reports whether the function has failed.
+ func (c *T) Failed() bool
+
+ // Fatal is equivalent to Log() followed by FailNow().
+ func (c *T) Fatal(args ...interface{})
+
+ // Fatalf is equivalent to Logf() followed by FailNow().
+ func (c *T) Fatalf(format string, args ...interface{})
+
+ // Log formats its arguments using default formatting, analogous ...
+ func (c *T) Log(args ...interface{})
+
+ // Logf formats its arguments according to the format, analogous ...
+ func (c *T) Logf(format string, args ...interface{})
+
+ // Parallel signals that this test is to be run in parallel with ...
+ func (t *T) Parallel()
+
diff --git a/src/go/doc/testdata/testing.go b/src/go/doc/testdata/testing.go
new file mode 100644
index 000000000..93ed494c3
--- /dev/null
+++ b/src/go/doc/testdata/testing.go
@@ -0,0 +1,404 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package testing provides support for automated testing of Go packages.
+// It is intended to be used in concert with the ``go test'' utility, which automates
+// execution of any function of the form
+// func TestXxx(*testing.T)
+// where Xxx can be any alphanumeric string (but the first letter must not be in
+// [a-z]) and serves to identify the test routine.
+// These TestXxx routines should be declared within the package they are testing.
+//
+// Functions of the form
+// func BenchmarkXxx(*testing.B)
+// are considered benchmarks, and are executed by go test when the -test.bench
+// flag is provided.
+//
+// A sample benchmark function looks like this:
+// func BenchmarkHello(b *testing.B) {
+// for i := 0; i < b.N; i++ {
+// fmt.Sprintf("hello")
+// }
+// }
+// The benchmark package will vary b.N until the benchmark function lasts
+// long enough to be timed reliably. The output
+// testing.BenchmarkHello 10000000 282 ns/op
+// means that the loop ran 10000000 times at a speed of 282 ns per loop.
+//
+// If a benchmark needs some expensive setup before running, the timer
+// may be stopped:
+// func BenchmarkBigLen(b *testing.B) {
+// b.StopTimer()
+// big := NewBig()
+// b.StartTimer()
+// for i := 0; i < b.N; i++ {
+// big.Len()
+// }
+// }
+package testing
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "go test" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
+ // Report as tests are run; default is silent for success.
+ chatty = flag.Bool("test.v", false, "verbose: print additional output")
+ match = flag.String("test.run", "", "regular expression to select tests to run")
+ memProfile = flag.String("test.memprofile", "", "write a memory profile to the named file after execution")
+ memProfileRate = flag.Int("test.memprofilerate", 0, "if >=0, sets runtime.MemProfileRate")
+ cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
+ timeout = flag.Duration("test.timeout", 0, "if positive, sets an aggregate time limit for all tests")
+ cpuListStr = flag.String("test.cpu", "", "comma-separated list of number of CPUs to use for each test")
+ parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "maximum test parallelism")
+
+ cpuList []int
+)
+
+// common holds the elements common between T and B and
+// captures common methods such as Errorf.
+type common struct {
+ output []byte // Output generated by test or benchmark.
+ failed bool // Test or benchmark has failed.
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ self interface{} // To be sent on signal channel when done.
+ signal chan interface{} // Output for serial tests.
+}
+
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+ return *short
+}
+
+// decorate inserts the final newline if needed and indentation tabs for formatting.
+// If addFileLine is true, it also prefixes the string with the file and line of the call site.
+func decorate(s string, addFileLine bool) string {
+ if addFileLine {
+ _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+ if ok {
+ // Truncate file name at last file name separator.
+ if index := strings.LastIndex(file, "/"); index >= 0 {
+ file = file[index+1:]
+ } else if index = strings.LastIndex(file, "\\"); index >= 0 {
+ file = file[index+1:]
+ }
+ } else {
+ file = "???"
+ line = 1
+ }
+ s = fmt.Sprintf("%s:%d: %s", file, line, s)
+ }
+ s = "\t" + s // Every line is indented at least one tab.
+ n := len(s)
+ if n > 0 && s[n-1] != '\n' {
+ s += "\n"
+ n++
+ }
+ for i := 0; i < n-1; i++ { // -1 to avoid final newline
+ if s[i] == '\n' {
+ // Second and subsequent lines are indented an extra tab.
+ return s[0:i+1] + "\t" + decorate(s[i+1:n], false)
+ }
+ }
+ return s
+}
+
+// T is a type passed to Test functions to manage test state and support formatted test logs.
+// Logs are accumulated during execution and dumped to standard error when done.
+type T struct {
+ common
+ name string // Name of test.
+ startParallel chan bool // Parallel tests will wait on this.
+}
+
+// Fail marks the function as having failed but continues execution.
+func (c *common) Fail() { c.failed = true }
+
+// Failed reports whether the function has failed.
+func (c *common) Failed() bool { return c.failed }
+
+// FailNow marks the function as having failed and stops its execution.
+// Execution will continue at the next Test.
+func (c *common) FailNow() {
+ c.Fail()
+
+ // Calling runtime.Goexit will exit the goroutine, which
+ // will run the deferred functions in this goroutine,
+ // which will eventually run the deferred lines in tRunner,
+ // which will signal to the test loop that this test is done.
+ //
+ // A previous version of this code said:
+ //
+ // c.duration = ...
+ // c.signal <- c.self
+ // runtime.Goexit()
+ //
+ // This previous version duplicated code (those lines are in
+ // tRunner no matter what), but worse the goroutine teardown
+ // implicit in runtime.Goexit was not guaranteed to complete
+ // before the test exited. If a test deferred an important cleanup
+ // function (like removing temporary files), there was no guarantee
+ // it would run on a test failure. Because we send on c.signal during
+ // a top-of-stack deferred function now, we know that the send
+ // only happens after any other stacked defers have completed.
+ runtime.Goexit()
+}
+
+// log generates the output. It's always at the same stack depth.
+func (c *common) log(s string) {
+ c.output = append(c.output, decorate(s, true)...)
+}
+
+// Log formats its arguments using default formatting, analogous to Println(),
+// and records the text in the error log.
+func (c *common) Log(args ...interface{}) { c.log(fmt.Sprintln(args...)) }
+
+// Logf formats its arguments according to the format, analogous to Printf(),
+// and records the text in the error log.
+func (c *common) Logf(format string, args ...interface{}) { c.log(fmt.Sprintf(format, args...)) }
+
+// Error is equivalent to Log() followed by Fail().
+func (c *common) Error(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.Fail()
+}
+
+// Errorf is equivalent to Logf() followed by Fail().
+func (c *common) Errorf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.Fail()
+}
+
+// Fatal is equivalent to Log() followed by FailNow().
+func (c *common) Fatal(args ...interface{}) {
+ c.log(fmt.Sprintln(args...))
+ c.FailNow()
+}
+
+// Fatalf is equivalent to Logf() followed by FailNow().
+func (c *common) Fatalf(format string, args ...interface{}) {
+ c.log(fmt.Sprintf(format, args...))
+ c.FailNow()
+}
+
+// Parallel signals that this test is to be run in parallel with (and only with)
+// other parallel tests in this CPU group.
+func (t *T) Parallel() {
+ t.signal <- (*T)(nil) // Release main testing loop
+ <-t.startParallel // Wait for serial tests to finish
+}
+
+// An internal type but exported because it is cross-package; part of the implementation
+// of go test.
+type InternalTest struct {
+ Name string
+ F func(*T)
+}
+
+func tRunner(t *T, test *InternalTest) {
+ t.start = time.Now()
+
+ // When this goroutine is done, either because test.F(t)
+ // returned normally or because a test failure triggered
+ // a call to runtime.Goexit, record the duration and send
+ // a signal saying that the test is done.
+ defer func() {
+ t.duration = time.Now().Sub(t.start)
+ t.signal <- t
+ }()
+
+ test.F(t)
+}
+
+// An internal function but exported because it is cross-package; part of the implementation
+// of go test.
+func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+ flag.Parse()
+ parseCpuList()
+
+ before()
+ startAlarm()
+ testOk := RunTests(matchString, tests)
+ exampleOk := RunExamples(examples)
+ if !testOk || !exampleOk {
+ fmt.Println("FAIL")
+ os.Exit(1)
+ }
+ fmt.Println("PASS")
+ stopAlarm()
+ RunBenchmarks(matchString, benchmarks)
+ after()
+}
+
+func (t *T) report() {
+ tstr := fmt.Sprintf("(%.2f seconds)", t.duration.Seconds())
+ format := "--- %s: %s %s\n%s"
+ if t.failed {
+ fmt.Printf(format, "FAIL", t.name, tstr, t.output)
+ } else if *chatty {
+ fmt.Printf(format, "PASS", t.name, tstr, t.output)
+ }
+}
+
+func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
+ ok = true
+ if len(tests) == 0 {
+ fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
+ return
+ }
+ for _, procs := range cpuList {
+ runtime.GOMAXPROCS(procs)
+ // We build a new channel tree for each run of the loop.
+ // collector merges in one channel all the upstream signals from parallel tests.
+ // If all tests pump to the same channel, a bug can occur where a test
+ // kicks off a goroutine that Fails, yet the test still delivers a completion signal,
+ // which skews the counting.
+ var collector = make(chan interface{})
+
+ numParallel := 0
+ startParallel := make(chan bool)
+
+ for i := 0; i < len(tests); i++ {
+ matched, err := matchString(*match, tests[i].Name)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp for -test.run: %s\n", err)
+ os.Exit(1)
+ }
+ if !matched {
+ continue
+ }
+ testName := tests[i].Name
+ if procs != 1 {
+ testName = fmt.Sprintf("%s-%d", tests[i].Name, procs)
+ }
+ t := &T{
+ common: common{
+ signal: make(chan interface{}),
+ },
+ name: testName,
+ startParallel: startParallel,
+ }
+ t.self = t
+ if *chatty {
+ fmt.Printf("=== RUN %s\n", t.name)
+ }
+ go tRunner(t, &tests[i])
+ out := (<-t.signal).(*T)
+ if out == nil { // Parallel run.
+ go func() {
+ collector <- <-t.signal
+ }()
+ numParallel++
+ continue
+ }
+ t.report()
+ ok = ok && !out.failed
+ }
+
+ running := 0
+ for numParallel+running > 0 {
+ if running < *parallel && numParallel > 0 {
+ startParallel <- true
+ running++
+ numParallel--
+ continue
+ }
+ t := (<-collector).(*T)
+ t.report()
+ ok = ok && !t.failed
+ running--
+ }
+ }
+ return
+}
+
+// before runs before all testing.
+func before() {
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ }
+ if *cpuProfile != "" {
+ f, err := os.Create(*cpuProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s", err)
+ f.Close()
+ return
+ }
+ // Could save f so after can call f.Close; not worth the effort.
+ }
+
+}
+
+// after runs after all testing.
+func after() {
+ if *cpuProfile != "" {
+ pprof.StopCPUProfile() // flushes profile to disk
+ }
+ if *memProfile != "" {
+ f, err := os.Create(*memProfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "testing: %s", err)
+ return
+ }
+ if err = pprof.WriteHeapProfile(f); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: can't write %s: %s", *memProfile, err)
+ }
+ f.Close()
+ }
+}
+
+var timer *time.Timer
+
+// startAlarm starts an alarm if requested.
+func startAlarm() {
+ if *timeout > 0 {
+ timer = time.AfterFunc(*timeout, alarm)
+ }
+}
+
+// stopAlarm turns off the alarm.
+func stopAlarm() {
+ if *timeout > 0 {
+ timer.Stop()
+ }
+}
+
+// alarm is called if the timeout expires.
+func alarm() {
+ panic("test timed out")
+}
+
+func parseCpuList() {
+ if len(*cpuListStr) == 0 {
+ cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
+ } else {
+ for _, val := range strings.Split(*cpuListStr, ",") {
+ cpu, err := strconv.Atoi(val)
+ if err != nil || cpu <= 0 {
+ fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu", val)
+ os.Exit(1)
+ }
+ cpuList = append(cpuList, cpu)
+ }
+ }
+}
diff --git a/src/go/format/format.go b/src/go/format/format.go
new file mode 100644
index 000000000..3d00a645d
--- /dev/null
+++ b/src/go/format/format.go
@@ -0,0 +1,199 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package format implements standard formatting of Go source.
+package format
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "strings"
+)
+
+var config = printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 8}
+
+// Node formats node in canonical gofmt style and writes the result to dst.
+//
+// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl,
+// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec,
+// or ast.Stmt. Node does not modify node. Imports are not sorted for
+// nodes representing partial source files (i.e., if the node is not an
+// *ast.File or a *printer.CommentedNode not wrapping an *ast.File).
+//
+// The function may return early (before the entire result is written)
+// and return a formatting error, for instance due to an incorrect AST.
+//
+func Node(dst io.Writer, fset *token.FileSet, node interface{}) error {
+ // Determine if we have a complete source file (file != nil).
+ var file *ast.File
+ var cnode *printer.CommentedNode
+ switch n := node.(type) {
+ case *ast.File:
+ file = n
+ case *printer.CommentedNode:
+ if f, ok := n.Node.(*ast.File); ok {
+ file = f
+ cnode = n
+ }
+ }
+
+ // Sort imports if necessary.
+ if file != nil && hasUnsortedImports(file) {
+ // Make a copy of the AST because ast.SortImports is destructive.
+ // TODO(gri) Do this more efficiently.
+ var buf bytes.Buffer
+ err := config.Fprint(&buf, fset, file)
+ if err != nil {
+ return err
+ }
+ file, err = parser.ParseFile(fset, "", buf.Bytes(), parser.ParseComments)
+ if err != nil {
+ // We should never get here. If we do, provide good diagnostic.
+ return fmt.Errorf("format.Node internal error (%s)", err)
+ }
+ ast.SortImports(fset, file)
+
+ // Use new file with sorted imports.
+ node = file
+ if cnode != nil {
+ node = &printer.CommentedNode{Node: file, Comments: cnode.Comments}
+ }
+ }
+
+ return config.Fprint(dst, fset, node)
+}
+
+// Source formats src in canonical gofmt style and returns the result
+// or an (I/O or syntax) error. src is expected to be a syntactically
+// correct Go source file, or a list of Go declarations or statements.
+//
+// If src is a partial source file, the leading and trailing space of src
+// is applied to the result (such that it has the same leading and trailing
+// space as src), and the result is indented by the same amount as the first
+// line of src containing code. Imports are not sorted for partial source files.
+//
+func Source(src []byte) ([]byte, error) {
+ fset := token.NewFileSet()
+ node, err := parse(fset, src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if file, ok := node.(*ast.File); ok {
+ // Complete source file.
+ ast.SortImports(fset, file)
+ err := config.Fprint(&buf, fset, file)
+ if err != nil {
+ return nil, err
+ }
+
+ } else {
+ // Partial source file.
+ // Determine and prepend leading space.
+ i, j := 0, 0
+ for j < len(src) && isSpace(src[j]) {
+ if src[j] == '\n' {
+ i = j + 1 // index of last line in leading space
+ }
+ j++
+ }
+ buf.Write(src[:i])
+
+ // Determine indentation of first code line.
+ // Spaces are ignored unless there are no tabs,
+ // in which case spaces count as one tab.
+ indent := 0
+ hasSpace := false
+ for _, b := range src[i:j] {
+ switch b {
+ case ' ':
+ hasSpace = true
+ case '\t':
+ indent++
+ }
+ }
+ if indent == 0 && hasSpace {
+ indent = 1
+ }
+
+ // Format the source.
+ cfg := config
+ cfg.Indent = indent
+ err := cfg.Fprint(&buf, fset, node)
+ if err != nil {
+ return nil, err
+ }
+
+ // Determine and append trailing space.
+ i = len(src)
+ for i > 0 && isSpace(src[i-1]) {
+ i--
+ }
+ buf.Write(src[i:])
+ }
+
+ return buf.Bytes(), nil
+}
+
+func hasUnsortedImports(file *ast.File) bool {
+ for _, d := range file.Decls {
+ d, ok := d.(*ast.GenDecl)
+ if !ok || d.Tok != token.IMPORT {
+ // Not an import declaration, so we're done.
+ // Imports are always first.
+ return false
+ }
+ if d.Lparen.IsValid() {
+ // For now assume all grouped imports are unsorted.
+ // TODO(gri) Should check if they are sorted already.
+ return true
+ }
+ // Ungrouped imports are sorted by default.
+ }
+ return false
+}
+
+func isSpace(b byte) bool {
+ return b == ' ' || b == '\t' || b == '\n' || b == '\r'
+}
+
+func parse(fset *token.FileSet, src []byte) (interface{}, error) {
+ // Try as a complete source file.
+ file, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err == nil {
+ return file, nil
+ }
+ // If the source is missing a package clause, try as a source fragment; otherwise fail.
+ if !strings.Contains(err.Error(), "expected 'package'") {
+ return nil, err
+ }
+
+ // Try as a declaration list by prepending a package clause in front of src.
+ // Use ';' not '\n' to keep line numbers intact.
+ psrc := append([]byte("package p;"), src...)
+ file, err = parser.ParseFile(fset, "", psrc, parser.ParseComments)
+ if err == nil {
+ return file.Decls, nil
+ }
+ // If the source is missing a declaration, try as a statement list; otherwise fail.
+ if !strings.Contains(err.Error(), "expected declaration") {
+ return nil, err
+ }
+
+ // Try as statement list by wrapping a function around src.
+ fsrc := append(append([]byte("package p; func _() {"), src...), '}')
+ file, err = parser.ParseFile(fset, "", fsrc, parser.ParseComments)
+ if err == nil {
+ return file.Decls[0].(*ast.FuncDecl).Body.List, nil
+ }
+
+ // Failed, and out of options.
+ return nil, err
+}
diff --git a/src/go/format/format_test.go b/src/go/format/format_test.go
new file mode 100644
index 000000000..93f099247
--- /dev/null
+++ b/src/go/format/format_test.go
@@ -0,0 +1,124 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format
+
+import (
+ "bytes"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+const testfile = "format_test.go"
+
+func diff(t *testing.T, dst, src []byte) {
+ line := 1
+ offs := 0 // line offset
+ for i := 0; i < len(dst) && i < len(src); i++ {
+ d := dst[i]
+ s := src[i]
+ if d != s {
+ t.Errorf("dst:%d: %s\n", line, dst[offs:i+1])
+ t.Errorf("src:%d: %s\n", line, src[offs:i+1])
+ return
+ }
+ if s == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+ if len(dst) != len(src) {
+ t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src)
+ }
+}
+
+func TestNode(t *testing.T) {
+ src, err := ioutil.ReadFile(testfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, testfile, src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+
+ if err = Node(&buf, fset, file); err != nil {
+ t.Fatal("Node failed:", err)
+ }
+
+ diff(t, buf.Bytes(), src)
+}
+
+func TestSource(t *testing.T) {
+ src, err := ioutil.ReadFile(testfile)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := Source(src)
+ if err != nil {
+ t.Fatal("Source failed:", err)
+ }
+
+ diff(t, res, src)
+}
+
+// Test cases that are expected to fail are marked by the prefix "ERROR".
+var tests = []string{
+ // declaration lists
+ `import "go/format"`,
+ "var x int",
+ "var x int\n\ntype T struct{}",
+
+ // statement lists
+ "x := 0",
+ "f(a, b, c)\nvar x int = f(1, 2, 3)",
+
+ // indentation, leading and trailing space
+ "\tx := 0\n\tgo f()",
+ "\tx := 0\n\tgo f()\n\n\n",
+ "\n\t\t\n\n\tx := 0\n\tgo f()\n\n\n",
+ "\n\t\t\n\n\t\t\tx := 0\n\t\t\tgo f()\n\n\n",
+ "\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n", // no indentation inside raw strings
+
+ // erroneous programs
+ "ERROR1 + 2 +",
+ "ERRORx := 0",
+}
+
+func String(s string) (string, error) {
+ res, err := Source([]byte(s))
+ if err != nil {
+ return "", err
+ }
+ return string(res), nil
+}
+
+func TestPartial(t *testing.T) {
+ for _, src := range tests {
+ if strings.HasPrefix(src, "ERROR") {
+ // test expected to fail
+ src = src[5:] // remove ERROR prefix
+ res, err := String(src)
+ if err == nil && res == src {
+ t.Errorf("formatting succeeded but was expected to fail:\n%q", src)
+ }
+ } else {
+ // test expected to succeed
+ res, err := String(src)
+ if err != nil {
+ t.Errorf("formatting failed (%s):\n%q", err, src)
+ } else if res != src {
+ t.Errorf("formatting incorrect:\nsource: %q\nresult: %q", src, res)
+ }
+ }
+ }
+}
diff --git a/src/go/parser/error_test.go b/src/go/parser/error_test.go
new file mode 100644
index 000000000..48fb53e5b
--- /dev/null
+++ b/src/go/parser/error_test.go
@@ -0,0 +1,182 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a parser test harness. The files in the testdata
+// directory are parsed and the errors reported are compared against the
+// error messages expected in the test files. The test files must end in
+// .src rather than .go so that they are not disturbed by gofmt runs.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+package parser
+
+import (
+ "go/scanner"
+ "go/token"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+const testdata = "testdata"
+
+var fsetErrs = token.NewFileSet()
+
+// getFile assumes that each filename occurs at most once
+func getFile(filename string) (file *token.File) {
+ fsetErrs.Iterate(func(f *token.File) bool {
+ if f.Name() == filename {
+ if file != nil {
+ panic(filename + " used multiple times")
+ }
+ file = f
+ }
+ return true
+ })
+ return file
+}
+
+func getPos(filename string, offset int) token.Pos {
+ if f := getFile(filename); f != nil {
+ return f.Pos(offset)
+ }
+ return token.NoPos
+}
+
+// ERROR comments must be of the form /* ERROR "rx" */ and rx is
+// a regular expression that matches the expected error message.
+// The special form /* ERROR HERE "rx" */ must be used for error
+// messages that appear immediately after a token, rather than at
+// a token's position.
+//
+var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`)
+
+// expectedErrors collects the regular expressions of ERROR comments found
+// in files and returns them as a map of error positions to error messages.
+//
+func expectedErrors(t *testing.T, filename string, src []byte) map[token.Pos]string {
+ errors := make(map[token.Pos]string)
+
+ var s scanner.Scanner
+ // file was parsed already - do not add it again to the file
+ // set otherwise the position information returned here will
+ // not match the position information collected by the parser
+ s.Init(getFile(filename), src, nil, scanner.ScanComments)
+ var prev token.Pos // position of last non-comment, non-semicolon token
+ var here token.Pos // position immediately after the token at position prev
+
+ for {
+ pos, tok, lit := s.Scan()
+ switch tok {
+ case token.EOF:
+ return errors
+ case token.COMMENT:
+ s := errRx.FindStringSubmatch(lit)
+ if len(s) == 3 {
+ pos := prev
+ if s[1] == "HERE" {
+ pos = here
+ }
+ errors[pos] = string(s[2])
+ }
+ default:
+ prev = pos
+ var l int // token length
+ if tok.IsLiteral() {
+ l = len(lit)
+ } else {
+ l = len(tok.String())
+ }
+ here = prev + token.Pos(l)
+ }
+ }
+}
+
+// compareErrors compares the map of expected error messages with the list
+// of found errors and reports discrepancies.
+//
+func compareErrors(t *testing.T, expected map[token.Pos]string, found scanner.ErrorList) {
+ for _, error := range found {
+ // error.Pos is a token.Position, but we want
+ // a token.Pos so we can do a map lookup
+ pos := getPos(error.Pos.Filename, error.Pos.Offset)
+ if msg, found := expected[pos]; found {
+ // we expect a message at pos; check if it matches
+ rx, err := regexp.Compile(msg)
+ if err != nil {
+ t.Errorf("%s: %v", error.Pos, err)
+ continue
+ }
+ if match := rx.MatchString(error.Msg); !match {
+ t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
+ continue
+ }
+ // we have a match - eliminate this error
+ delete(expected, pos)
+ } else {
+ // To keep in mind when analyzing failed test output:
+ // If the same error position occurs multiple times in errors,
+ // this message will be triggered (because the first error at
+ // the position removes this position from the expected errors).
+ t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg)
+ }
+ }
+
+ // there should be no expected errors left
+ if len(expected) > 0 {
+ t.Errorf("%d errors not reported:", len(expected))
+ for pos, msg := range expected {
+ t.Errorf("%s: %s\n", fsetErrs.Position(pos), msg)
+ }
+ }
+}
+
+func checkErrors(t *testing.T, filename string, input interface{}) {
+ src, err := readSource(filename, input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ _, err = ParseFile(fsetErrs, filename, src, DeclarationErrors|AllErrors)
+ found, ok := err.(scanner.ErrorList)
+ if err != nil && !ok {
+ t.Error(err)
+ return
+ }
+ found.RemoveMultiples()
+
+ // we are expecting the following errors
+ // (collect these after parsing a file so that it is found in the file set)
+ expected := expectedErrors(t, filename, src)
+
+ // verify errors returned by the parser
+ compareErrors(t, expected, found)
+}
+
+func TestErrors(t *testing.T) {
+ list, err := ioutil.ReadDir(testdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".src") {
+ checkErrors(t, filepath.Join(testdata, name), nil)
+ }
+ }
+}
diff --git a/src/go/parser/example_test.go b/src/go/parser/example_test.go
new file mode 100644
index 000000000..3c58e63a9
--- /dev/null
+++ b/src/go/parser/example_test.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser_test
+
+import (
+ "fmt"
+ "go/parser"
+ "go/token"
+)
+
+func ExampleParseFile() {
+ fset := token.NewFileSet() // positions are relative to fset
+
+ // Parse the file containing this very example
+ // but stop after processing the imports.
+ f, err := parser.ParseFile(fset, "example_test.go", nil, parser.ImportsOnly)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ // Print the imports from the file's AST.
+ for _, s := range f.Imports {
+ fmt.Println(s.Path.Value)
+ }
+
+ // output:
+ //
+ // "fmt"
+ // "go/parser"
+ // "go/token"
+}
diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go
new file mode 100644
index 000000000..49103058b
--- /dev/null
+++ b/src/go/parser/interface.go
@@ -0,0 +1,198 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the exported entry points for invoking the parser.
+
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "go/ast"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// If src != nil, readSource converts src to a []byte if possible;
+// otherwise it returns an error. If src == nil, readSource returns
+// the result of reading the file specified by filename.
+//
+func readSource(filename string, src interface{}) ([]byte, error) {
+ if src != nil {
+ switch s := src.(type) {
+ case string:
+ return []byte(s), nil
+ case []byte:
+ return s, nil
+ case *bytes.Buffer:
+ // is io.Reader, but src is already available in []byte form
+ if s != nil {
+ return s.Bytes(), nil
+ }
+ case io.Reader:
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, s); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+ }
+ return nil, errors.New("invalid source")
+ }
+ return ioutil.ReadFile(filename)
+}
+
+// A Mode value is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+//
+type Mode uint
+
+const (
+ PackageClauseOnly Mode = 1 << iota // stop parsing after package clause
+ ImportsOnly // stop parsing after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+ SpuriousErrors // same as AllErrors, for backward-compatibility
+ AllErrors = SpuriousErrors // report all errors (not just the first 10 on different lines)
+)
+
+// ParseFile parses the source code of a single Go source file and returns
+// the corresponding ast.File node. The source code may be provided via
+// the filename of the source file, or via the src parameter.
+//
+// If src != nil, ParseFile parses the source from src and the filename is
+// only used when recording position information. The type of the argument
+// for the src parameter must be string, []byte, or io.Reader.
+// If src == nil, ParseFile parses the file specified by filename.
+//
+// The mode parameter controls the amount of source text parsed and other
+// optional parser functionality. Position information is recorded in the
+// file set fset.
+//
+// If the source couldn't be read, the returned AST is nil and the error
+// indicates the specific failure. If the source was read but syntax
+// errors were found, the result is a partial AST (with ast.Bad* nodes
+// representing the fragments of erroneous source code). Multiple errors
+// are returned via a scanner.ErrorList which is sorted by file position.
+//
+func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode) (f *ast.File, err error) {
+ // get source
+ text, err := readSource(filename, src)
+ if err != nil {
+ return nil, err
+ }
+
+ var p parser
+ defer func() {
+ if e := recover(); e != nil {
+ _ = e.(bailout) // re-panics if it's not a bailout
+ }
+
+ // set result values
+ if f == nil {
+ // source is not a valid Go source file - satisfy
+ // ParseFile API and return a valid (but) empty
+ // *ast.File
+ f = &ast.File{
+ Name: new(ast.Ident),
+ Scope: ast.NewScope(nil),
+ }
+ }
+
+ p.errors.Sort()
+ err = p.errors.Err()
+ }()
+
+ // parse source
+ p.init(fset, filename, text, mode)
+ f = p.parseFile()
+
+ return
+}
+
+// ParseDir calls ParseFile for all files with names ending in ".go" in the
+// directory specified by path and returns a map of package name -> package
+// AST with all the packages found.
+//
+// If filter != nil, only the files with os.FileInfo entries passing through
+// the filter (and ending in ".go") are considered. The mode bits are passed
+// to ParseFile unchanged. Position information is recorded in fset.
+//
+// If the directory couldn't be read, a nil map and the respective error are
+// returned. If a parse error occurred, a non-nil but incomplete map and the
+// first error encountered are returned.
+//
+func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
+ fd, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ list, err := fd.Readdir(-1)
+ if err != nil {
+ return nil, err
+ }
+
+ pkgs = make(map[string]*ast.Package)
+ for _, d := range list {
+ if strings.HasSuffix(d.Name(), ".go") && (filter == nil || filter(d)) {
+ filename := filepath.Join(path, d.Name())
+ if src, err := ParseFile(fset, filename, nil, mode); err == nil {
+ name := src.Name.Name
+ pkg, found := pkgs[name]
+ if !found {
+ pkg = &ast.Package{
+ Name: name,
+ Files: make(map[string]*ast.File),
+ }
+ pkgs[name] = pkg
+ }
+ pkg.Files[filename] = src
+ } else if first == nil {
+ first = err
+ }
+ }
+ }
+
+ return
+}
+
+// ParseExpr is a convenience function for obtaining the AST of an expression x.
+// The position information recorded in the AST is undefined. The filename used
+// in error messages is the empty string.
+//
+func ParseExpr(x string) (ast.Expr, error) {
+ var p parser
+ p.init(token.NewFileSet(), "", []byte(x), 0)
+
+ // Set up pkg-level scopes to avoid nil-pointer errors.
+ // This is not needed for a correct expression x as the
+ // parser will be ok with a nil topScope, but be cautious
+ // in case of an erroneous x.
+ p.openScope()
+ p.pkgScope = p.topScope
+ e := p.parseRhsOrType()
+ p.closeScope()
+ assert(p.topScope == nil, "unbalanced scopes")
+
+ // If a semicolon was inserted, consume it;
+ // report an error if there's more tokens.
+ if p.tok == token.SEMICOLON && p.lit == "\n" {
+ p.next()
+ }
+ p.expect(token.EOF)
+
+ if p.errors.Len() > 0 {
+ p.errors.Sort()
+ return nil, p.errors.Err()
+ }
+
+ return e, nil
+}
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
new file mode 100644
index 000000000..9c62076f2
--- /dev/null
+++ b/src/go/parser/parser.go
@@ -0,0 +1,2460 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+//
+package parser
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+ file *token.File
+ errors scanner.ErrorList
+ scanner scanner.Scanner
+
+ // Tracing/debugging
+ mode Mode // parsing mode
+ trace bool // == (mode & Trace != 0)
+ indent int // indentation used for tracing output
+
+ // Comments
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ // Next token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+
+ // Error recovery
+ // (used to limit the number of calls to syncXXX functions
+ // w/o making scanning progress - avoids potential endless
+ // loops across multiple parser functions during error recovery)
+ syncPos token.Pos // last synchronization position
+ syncCnt int // number of calls to syncXXX without progress
+
+ // Non-syntactic parser control
+ exprLev int // < 0: in control clause, >= 0: in expression
+ inRhs bool // if set, the parser is parsing a rhs expression
+
+ // Ordinary identifier scopes
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
+ // Label scopes
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+ targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
+ p.file = fset.AddFile(filename, -1, len(src))
+ var m scanner.Mode
+ if mode&ParseComments != 0 {
+ m = scanner.ScanComments
+ }
+ eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
+ p.scanner.Init(p.file, src, eh, m)
+
+ p.mode = mode
+ p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
+ p.next()
+}
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+ p.topScope = ast.NewScope(p.topScope)
+}
+
+func (p *parser) closeScope() {
+ p.topScope = p.topScope.Outer
+}
+
+func (p *parser) openLabelScope() {
+ p.labelScope = ast.NewScope(p.labelScope)
+ p.targetStack = append(p.targetStack, nil)
+}
+
+func (p *parser) closeLabelScope() {
+ // resolve labels
+ n := len(p.targetStack) - 1
+ scope := p.labelScope
+ for _, ident := range p.targetStack[n] {
+ ident.Obj = scope.Lookup(ident.Name)
+ if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+ p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+ }
+ }
+ // pop label scope
+ p.targetStack = p.targetStack[0:n]
+ p.labelScope = p.labelScope.Outer
+}
+
+func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ obj.Data = data
+ ident.Obj = obj
+ if ident.Name != "_" {
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+ }
+ p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ }
+ }
+}
+
+func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
+ // Go spec: A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block with
+ // the same type, and at least one of the non-blank variables is new.
+ n := 0 // number of new variables
+ for _, x := range list {
+ if ident, isIdent := x.(*ast.Ident); isIdent {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // remember corresponding assignment for other tools
+ obj.Decl = decl
+ ident.Obj = obj
+ if ident.Name != "_" {
+ if alt := p.topScope.Insert(obj); alt != nil {
+ ident.Obj = alt // redeclaration
+ } else {
+ n++ // new declaration
+ }
+ }
+ } else {
+ p.errorExpected(x.Pos(), "identifier on left side of :=")
+ }
+ }
+ if n == 0 && p.mode&DeclarationErrors != 0 {
+ p.error(list[0].Pos(), "no new variables on left side of :=")
+ }
+}
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+// If x is an identifier, tryResolve attempts to resolve x by looking up
+// the object it denotes. If no object is found and collectUnresolved is
+// set, x is marked as unresolved and collected in the list of unresolved
+// identifiers.
+//
+func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name == "_" {
+ return
+ }
+ // try to resolve the identifier
+ for s := p.topScope; s != nil; s = s.Outer {
+ if obj := s.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return
+ }
+ }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ if collectUnresolved {
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
+ }
+}
+
+func (p *parser) resolve(x ast.Expr) {
+ p.tryResolve(x, true)
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ pos := p.file.Position(p.pos)
+ fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *parser) {
+ p.indent--
+ p.printTrace(")")
+}
+
+// Advance to the next token.
+func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+ // when tracing as it provides a more readable output. The
+ // very first token (!p.pos.IsValid()) is not initialized
+ // (it is token.ILLEGAL), so don't print it .
+ if p.trace && p.pos.IsValid() {
+ s := p.tok.String()
+ switch {
+ case p.tok.IsLiteral():
+ p.printTrace(s, p.lit)
+ case p.tok.IsOperator(), p.tok.IsKeyword():
+ p.printTrace("\"" + s + "\"")
+ default:
+ p.printTrace(s)
+ }
+ }
+
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+ // /*-style comments may end on a different line than where they start.
+ // Scan the comment for '\n' chars and adjust endline accordingly.
+ endline = p.file.Line(p.pos)
+ if p.lit[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Slash: p.pos, Text: p.lit}
+ p.next0()
+
+ return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. A non-comment token or n
+// empty lines terminate a comment group.
+//
+func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.file.Line(p.pos)
+ for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// and line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+//
+func (p *parser) next() {
+ p.leadComment = nil
+ p.lineComment = nil
+ prev := p.pos
+ p.next0()
+
+ if p.tok == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ if p.file.Line(p.pos) == p.file.Line(prev) {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.file.Line(p.pos) != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.file.Line(p.pos) {
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+}
+
+// A bailout panic is raised to indicate early termination.
+type bailout struct{}
+
+func (p *parser) error(pos token.Pos, msg string) {
+ epos := p.file.Position(pos)
+
+ // If AllErrors is not set, discard errors reported on the same line
+ // as the last recorded error and stop parsing if there are more than
+ // 10 errors.
+ if p.mode&AllErrors == 0 {
+ n := len(p.errors)
+ if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
+ return // discard - likely a spurious error
+ }
+ if n > 10 {
+ panic(bailout{})
+ }
+ }
+
+ p.errors.Add(epos, msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // the error happened at the current position;
+ // make the error message more specific
+ if p.tok == token.SEMICOLON && p.lit == "\n" {
+ msg += ", found newline"
+ } else {
+ msg += ", found '" + p.tok.String() + "'"
+ if p.tok.IsLiteral() {
+ msg += " " + p.lit
+ }
+ }
+ }
+ p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress
+ return pos
+}
+
+// expectClosing is like expect but provides a better error message
+// for the common case of a missing comma before a newline.
+//
+func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
+ if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
+ p.error(p.pos, "missing ',' before newline in "+context)
+ p.next()
+ }
+ return p.expect(tok)
+}
+
+func (p *parser) expectSemi() {
+ // semicolon is optional before a closing ')' or '}'
+ if p.tok != token.RPAREN && p.tok != token.RBRACE {
+ if p.tok == token.SEMICOLON {
+ p.next()
+ } else {
+ p.errorExpected(p.pos, "';'")
+ syncStmt(p)
+ }
+ }
+}
+
+func (p *parser) atComma(context string) bool {
+ if p.tok == token.COMMA {
+ return true
+ }
+ if p.tok == token.SEMICOLON && p.lit == "\n" {
+ p.error(p.pos, "missing ',' before newline in "+context)
+ return true // "insert" the comma and continue
+
+ }
+ return false
+}
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+// syncStmt advances to the next statement.
+// Used for synchronization after an error.
+//
+func syncStmt(p *parser) {
+ for {
+ switch p.tok {
+ case token.BREAK, token.CONST, token.CONTINUE, token.DEFER,
+ token.FALLTHROUGH, token.FOR, token.GO, token.GOTO,
+ token.IF, token.RETURN, token.SELECT, token.SWITCH,
+ token.TYPE, token.VAR:
+ // Return only if parser made some progress since last
+ // sync or if it has not reached 10 sync calls without
+ // progress. Otherwise consume at least one token to
+ // avoid an endless parser loop (it is possible that
+ // both parseOperand and parseStmt call syncStmt and
+ // correctly do not advance, thus the need for the
+ // invocation limit p.syncCnt).
+ if p.pos == p.syncPos && p.syncCnt < 10 {
+ p.syncCnt++
+ return
+ }
+ if p.pos > p.syncPos {
+ p.syncPos = p.pos
+ p.syncCnt = 0
+ return
+ }
+ // Reaching here indicates a parser bug, likely an
+ // incorrect token list in this function, but it only
+ // leads to skipping of possibly correct code if a
+ // previous error is present, and thus is preferred
+ // over a non-terminating parse.
+ case token.EOF:
+ return
+ }
+ p.next()
+ }
+}
+
+// syncDecl advances to the next declaration.
+// Used for synchronization after an error.
+//
+func syncDecl(p *parser) {
+ for {
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ // see comments in syncStmt
+ if p.pos == p.syncPos && p.syncCnt < 10 {
+ p.syncCnt++
+ return
+ }
+ if p.pos > p.syncPos {
+ p.syncPos = p.pos
+ p.syncCnt = 0
+ return
+ }
+ case token.EOF:
+ return
+ }
+ p.next()
+ }
+}
+
+// safePos returns a valid file position for a given position: If pos
+// is valid to begin with, safePos returns pos. If pos is out-of-range,
+// safePos returns the EOF position.
+//
+// This is hack to work around "artificial" end positions in the AST which
+// are computed by adding 1 to (presumably valid) token positions. If the
+// token positions are invalid due to parse errors, the resulting end position
+// may be past the file's EOF position, which would lead to panics if used
+// later on.
+//
+func (p *parser) safePos(pos token.Pos) (res token.Pos) {
+ defer func() {
+ if recover() != nil {
+ res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
+ }
+ }()
+ _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
+ return pos
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+ pos := p.pos
+ name := "_"
+ if p.tok == token.IDENT {
+ name = p.lit
+ p.next()
+ } else {
+ p.expect(token.IDENT) // use expect() error handling
+ }
+ return &ast.Ident{NamePos: pos, Name: name}
+}
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "IdentList"))
+ }
+
+ list = append(list, p.parseIdent())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseIdent())
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ExpressionList"))
+ }
+
+ list = append(list, p.checkExpr(p.parseExpr(lhs)))
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.checkExpr(p.parseExpr(lhs)))
+ }
+
+ return
+}
+
+func (p *parser) parseLhsList() []ast.Expr {
+ old := p.inRhs
+ p.inRhs = false
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ // but doesn't enter scope until later:
+ // caller must call p.shortVarDecl(p.makeIdentList(list))
+ // at appropriate time.
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ p.inRhs = old
+ return list
+}
+
+func (p *parser) parseRhsList() []ast.Expr {
+ old := p.inRhs
+ p.inRhs = true
+ list := p.parseExprList(false)
+ p.inRhs = old
+ return list
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Type"))
+ }
+
+ typ := p.tryType()
+
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ return &ast.BadExpr{From: pos, To: p.pos}
+ }
+
+ return typ
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeName"))
+ }
+
+ ident := p.parseIdent()
+ // don't resolve ident yet - it may be a parameter or field name
+
+ if p.tok == token.PERIOD {
+ // ident is a package name
+ p.next()
+ p.resolve(ident)
+ sel := p.parseIdent()
+ return &ast.SelectorExpr{X: ident, Sel: sel}
+ }
+
+ return ident
+}
+
+func (p *parser) parseArrayType() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "ArrayType"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ var len ast.Expr
+ // always permit ellipsis for more fault-tolerant parsing
+ if p.tok == token.ELLIPSIS {
+ len = &ast.Ellipsis{Ellipsis: p.pos}
+ p.next()
+ } else if p.tok != token.RBRACK {
+ len = p.parseRhs()
+ }
+ p.expect(token.RBRACK)
+ elt := p.parseType()
+
+ return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
+}
+
+func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
+ idents := make([]*ast.Ident, len(list))
+ for i, x := range list {
+ ident, isIdent := x.(*ast.Ident)
+ if !isIdent {
+ if _, isBad := x.(*ast.BadExpr); !isBad {
+ // only report error if it's a new one
+ p.errorExpected(x.Pos(), "identifier")
+ }
+ ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
+ }
+ idents[i] = ident
+ }
+ return idents
+}
+
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "FieldDecl"))
+ }
+
+ doc := p.leadComment
+
+ // FieldDecl
+ list, typ := p.parseVarList(false)
+
+ // Tag
+ var tag *ast.BasicLit
+ if p.tok == token.STRING {
+ tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+ p.next()
+ }
+
+ // analyze case
+ var idents []*ast.Ident
+ if typ != nil {
+ // IdentifierList Type
+ idents = p.makeIdentList(list)
+ } else {
+ // ["*"] TypeName (AnonymousField)
+ typ = list[0] // we always have at least one element
+ if n := len(list); n > 1 || !isTypeName(deref(typ)) {
+ pos := typ.Pos()
+ p.errorExpected(pos, "anonymous field")
+ typ = &ast.BadExpr{From: pos, To: p.safePos(list[n-1].End())}
+ }
+ }
+
+ p.expectSemi() // call before accessing p.linecomment
+
+ field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
+ p.declare(field, nil, scope, ast.Var, idents...)
+ p.resolve(typ)
+
+ return field
+}
+
+func (p *parser) parseStructType() *ast.StructType {
+ if p.trace {
+ defer un(trace(p, "StructType"))
+ }
+
+ pos := p.expect(token.STRUCT)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
+ var list []*ast.Field
+ for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+ // a field declaration cannot start with a '(' but we accept
+ // it here for more robust parsing and better error messages
+ // (parseFieldDecl will check and complain if necessary)
+ list = append(list, p.parseFieldDecl(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.StructType{
+ Struct: pos,
+ Fields: &ast.FieldList{
+ Opening: lbrace,
+ List: list,
+ Closing: rbrace,
+ },
+ }
+}
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+ if p.trace {
+ defer un(trace(p, "PointerType"))
+ }
+
+ star := p.expect(token.MUL)
+ base := p.parseType()
+
+ return &ast.StarExpr{Star: star, X: base}
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryVarType(isParam bool) ast.Expr {
+ if isParam && p.tok == token.ELLIPSIS {
+ pos := p.pos
+ p.next()
+ typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
+ if typ != nil {
+ p.resolve(typ)
+ } else {
+ p.error(pos, "'...' parameter is missing type")
+ typ = &ast.BadExpr{From: pos, To: p.pos}
+ }
+ return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
+ }
+ return p.tryIdentOrType()
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseVarType(isParam bool) ast.Expr {
+ typ := p.tryVarType(isParam)
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ typ = &ast.BadExpr{From: pos, To: p.pos}
+ }
+ return typ
+}
+
+// If any of the results are identifiers, they are not resolved.
+func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "VarList"))
+ }
+
+ // a list of identifiers looks like a list of type names
+ //
+ // parse/tryVarType accepts any type (including parenthesized
+ // ones) even though the syntax does not permit them here: we
+ // accept them all for more robust parsing and complain later
+ for typ := p.parseVarType(isParam); typ != nil; {
+ list = append(list, typ)
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ typ = p.tryVarType(isParam) // maybe nil as in: func f(int,) {}
+ }
+
+ // if we had a list of identifiers, it must be followed by a type
+ typ = p.tryVarType(isParam)
+
+ return
+}
+
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
+ if p.trace {
+ defer un(trace(p, "ParameterList"))
+ }
+
+ // ParameterDecl
+ list, typ := p.parseVarList(ellipsisOk)
+
+ // analyze case
+ if typ != nil {
+ // IdentifierList Type
+ idents := p.makeIdentList(list)
+ field := &ast.Field{Names: idents, Type: typ}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, nil, scope, ast.Var, idents...)
+ p.resolve(typ)
+ if !p.atComma("parameter list") {
+ return
+ }
+ p.next()
+ for p.tok != token.RPAREN && p.tok != token.EOF {
+ idents := p.parseIdentList()
+ typ := p.parseVarType(ellipsisOk)
+ field := &ast.Field{Names: idents, Type: typ}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, nil, scope, ast.Var, idents...)
+ p.resolve(typ)
+ if !p.atComma("parameter list") {
+ break
+ }
+ p.next()
+ }
+ return
+ }
+
+ // Type { "," Type } (anonymous parameters)
+ params = make([]*ast.Field, len(list))
+ for i, typ := range list {
+ p.resolve(typ)
+ params[i] = &ast.Field{Type: typ}
+ }
+ return
+}
+
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Parameters"))
+ }
+
+ var params []*ast.Field
+ lparen := p.expect(token.LPAREN)
+ if p.tok != token.RPAREN {
+ params = p.parseParameterList(scope, ellipsisOk)
+ }
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
+}
+
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Result"))
+ }
+
+ if p.tok == token.LPAREN {
+ return p.parseParameters(scope, false)
+ }
+
+ typ := p.tryType()
+ if typ != nil {
+ list := make([]*ast.Field, 1)
+ list[0] = &ast.Field{Type: typ}
+ return &ast.FieldList{List: list}
+ }
+
+ return nil
+}
+
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
+ if p.trace {
+ defer un(trace(p, "Signature"))
+ }
+
+ params = p.parseParameters(scope, true)
+ results = p.parseResult(scope)
+
+ return
+}
+
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
+ if p.trace {
+ defer un(trace(p, "FuncType"))
+ }
+
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+ params, results := p.parseSignature(scope)
+
+ return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
+}
+
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "MethodSpec"))
+ }
+
+ doc := p.leadComment
+ var idents []*ast.Ident
+ var typ ast.Expr
+ x := p.parseTypeName()
+ if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
+ // method
+ idents = []*ast.Ident{ident}
+ scope := ast.NewScope(nil) // method scope
+ params, results := p.parseSignature(scope)
+ typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
+ } else {
+ // embedded interface
+ typ = x
+ p.resolve(typ)
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
+ p.declare(spec, nil, scope, ast.Fun, idents...)
+
+ return spec
+}
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+ if p.trace {
+ defer un(trace(p, "InterfaceType"))
+ }
+
+ pos := p.expect(token.INTERFACE)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
+ var list []*ast.Field
+ for p.tok == token.IDENT {
+ list = append(list, p.parseMethodSpec(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.InterfaceType{
+ Interface: pos,
+ Methods: &ast.FieldList{
+ Opening: lbrace,
+ List: list,
+ Closing: rbrace,
+ },
+ }
+}
+
+func (p *parser) parseMapType() *ast.MapType {
+ if p.trace {
+ defer un(trace(p, "MapType"))
+ }
+
+ pos := p.expect(token.MAP)
+ p.expect(token.LBRACK)
+ key := p.parseType()
+ p.expect(token.RBRACK)
+ value := p.parseType()
+
+ return &ast.MapType{Map: pos, Key: key, Value: value}
+}
+
+func (p *parser) parseChanType() *ast.ChanType {
+ if p.trace {
+ defer un(trace(p, "ChanType"))
+ }
+
+ pos := p.pos
+ dir := ast.SEND | ast.RECV
+ var arrow token.Pos
+ if p.tok == token.CHAN {
+ p.next()
+ if p.tok == token.ARROW {
+ arrow = p.pos
+ p.next()
+ dir = ast.SEND
+ }
+ } else {
+ arrow = p.expect(token.ARROW)
+ p.expect(token.CHAN)
+ dir = ast.RECV
+ }
+ value := p.parseType()
+
+ return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType() ast.Expr {
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+ case token.LBRACK:
+ return p.parseArrayType()
+ case token.STRUCT:
+ return p.parseStructType()
+ case token.MUL:
+ return p.parsePointerType()
+ case token.FUNC:
+ typ, _ := p.parseFuncType()
+ return typ
+ case token.INTERFACE:
+ return p.parseInterfaceType()
+ case token.MAP:
+ return p.parseMapType()
+ case token.CHAN, token.ARROW:
+ return p.parseChanType()
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ typ := p.parseType()
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
+ }
+
+ // no type found
+ return nil
+}
+
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType()
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "StatementList"))
+ }
+
+ for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseStmt())
+ }
+
+ return
+}
+
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "Body"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.topScope = scope // open function scope
+ p.openLabelScope()
+ list := p.parseStmtList()
+ p.closeLabelScope()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+}
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "BlockStmt"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.openScope()
+ list := p.parseStmtList()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "FuncTypeOrLit"))
+ }
+
+ typ, scope := p.parseFuncType()
+ if p.tok != token.LBRACE {
+ // function type only
+ return typ
+ }
+
+ p.exprLev++
+ body := p.parseBody(scope)
+ p.exprLev--
+
+ return &ast.FuncLit{Type: typ, Body: body}
+}
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
+//
+func (p *parser) parseOperand(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Operand"))
+ }
+
+ switch p.tok {
+ case token.IDENT:
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
+
+ case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+ x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
+ p.next()
+ return x
+
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ p.exprLev++
+ x := p.parseRhsOrType() // types may be parenthesized: (some type)
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
+
+ case token.FUNC:
+ return p.parseFuncTypeOrLit()
+ }
+
+ if typ := p.tryIdentOrType(); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
+ }
+
+ // we have an error
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ syncStmt(p)
+ return &ast.BadExpr{From: pos, To: p.pos}
+}
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Selector"))
+ }
+
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{X: x, Sel: sel}
+}
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
+ }
+
+ lparen := p.expect(token.LPAREN)
+ var typ ast.Expr
+ if p.tok == token.TYPE {
+ // type switch: typ == nil
+ p.next()
+ } else {
+ typ = p.parseType()
+ }
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "IndexOrSlice"))
+ }
+
+ const N = 3 // change the 3 to 2 to disable 3-index slices
+ lbrack := p.expect(token.LBRACK)
+ p.exprLev++
+ var index [N]ast.Expr
+ var colons [N - 1]token.Pos
+ if p.tok != token.COLON {
+ index[0] = p.parseRhs()
+ }
+ ncolons := 0
+ for p.tok == token.COLON && ncolons < len(colons) {
+ colons[ncolons] = p.pos
+ ncolons++
+ p.next()
+ if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
+ index[ncolons] = p.parseRhs()
+ }
+ }
+ p.exprLev--
+ rbrack := p.expect(token.RBRACK)
+
+ if ncolons > 0 {
+ // slice expression
+ slice3 := false
+ if ncolons == 2 {
+ slice3 = true
+ // Check presence of 2nd and 3rd index here rather than during type-checking
+ // to prevent erroneous programs from passing through gofmt (was issue 7305).
+ if index[1] == nil {
+ p.error(colons[0], "2nd index required in 3-index slice")
+ index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
+ }
+ if index[2] == nil {
+ p.error(colons[1], "3rd index required in 3-index slice")
+ index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
+ }
+ }
+ return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
+ }
+
+ return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+ if p.trace {
+ defer un(trace(p, "CallOrConversion"))
+ }
+
+ lparen := p.expect(token.LPAREN)
+ p.exprLev++
+ var list []ast.Expr
+ var ellipsis token.Pos
+ for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+ list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
+ if p.tok == token.ELLIPSIS {
+ ellipsis = p.pos
+ p.next()
+ }
+ if !p.atComma("argument list") {
+ break
+ }
+ p.next()
+ }
+ p.exprLev--
+ rparen := p.expectClosing(token.RPAREN, "argument list")
+
+ return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
+}
+
+func (p *parser) parseElement(keyOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Element"))
+ }
+
+ if p.tok == token.LBRACE {
+ return p.parseLiteralValue(nil)
+ }
+
+ // Because the parser doesn't know the composite literal type, it cannot
+ // know if a key that's an identifier is a struct field name or a name
+ // denoting a value. The former is not resolved by the parser or the
+ // resolver.
+ //
+ // Instead, _try_ to resolve such a key if possible. If it resolves,
+ // it a) has correctly resolved, or b) incorrectly resolved because
+ // the key is a struct field with a name matching another identifier.
+ // In the former case we are done, and in the latter case we don't
+ // care because the type checker will do a separate field lookup.
+ //
+ // If the key does not resolve, it a) must be defined at the top
+ // level in another file of the same package, the universe scope, or be
+ // undeclared; or b) it is a struct field. In the former case, the type
+ // checker can do a top-level lookup, and in the latter case it will do
+ // a separate field lookup.
+ x := p.checkExpr(p.parseExpr(keyOk))
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ // Try to resolve the key but don't collect it
+ // as unresolved identifier if it fails so that
+ // we don't get (possibly false) errors about
+ // undeclared names.
+ p.tryResolve(x, false)
+ return &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseElement(false)}
+ }
+ p.resolve(x) // not a key
+ }
+
+ return x
+}
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ElementList"))
+ }
+
+ for p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseElement(true))
+ if !p.atComma("composite literal") {
+ break
+ }
+ p.next()
+ }
+
+ return
+}
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "LiteralValue"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ var elts []ast.Expr
+ p.exprLev++
+ if p.tok != token.RBRACE {
+ elts = p.parseElementList()
+ }
+ p.exprLev--
+ rbrace := p.expectClosing(token.RBRACE, "composite literal")
+ return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+ switch unparen(x).(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ case *ast.CompositeLit:
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.SelectorExpr:
+ case *ast.IndexExpr:
+ case *ast.SliceExpr:
+ case *ast.TypeAssertExpr:
+ // If t.Type == nil we have a type assertion of the form
+ // y.(type), which is only allowed in type switch expressions.
+ // It's hard to exclude those but for the case where we are in
+ // a type switch. Instead be lenient and test this in the type
+ // checker.
+ case *ast.CallExpr:
+ case *ast.StarExpr:
+ case *ast.UnaryExpr:
+ case *ast.BinaryExpr:
+ default:
+ // all other nodes are not proper expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
+ }
+ return x
+}
+
+// isTypeName returns true iff x is a (qualified) TypeName.
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ default:
+ return false // all other nodes are not type names
+ }
+ return true
+}
+
+// isLiteralType returns true iff x is a legal composite literal type.
+func isLiteralType(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ case *ast.ArrayType:
+ case *ast.StructType:
+ case *ast.MapType:
+ default:
+ return false // all other nodes are not legal composite literal types
+ }
+ return true
+}
+
+// If x is of the form *T, deref returns T, otherwise it returns x.
+func deref(x ast.Expr) ast.Expr {
+ if p, isPtr := x.(*ast.StarExpr); isPtr {
+ x = p.X
+ }
+ return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+ if p, isParen := x.(*ast.ParenExpr); isParen {
+ x = unparen(p.X)
+ }
+ return x
+}
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+//
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.UnaryExpr:
+ case *ast.ArrayType:
+ if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+ p.error(len.Pos(), "expected array length, found '...'")
+ x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
+ }
+ }
+
+ // all other nodes are expressions or types
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "PrimaryExpr"))
+ }
+
+ x := p.parseOperand(lhs)
+L:
+ for {
+ switch p.tok {
+ case token.PERIOD:
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExprOrType(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "selector or type assertion")
+ p.next() // make progress
+ x = &ast.BadExpr{From: pos, To: p.pos}
+ }
+ case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseIndexOrSlice(p.checkExpr(x))
+ case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseCallOrConversion(p.checkExprOrType(x))
+ case token.LBRACE:
+ if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseLiteralValue(x)
+ } else {
+ break L
+ }
+ default:
+ break L
+ }
+ lhs = false // no need to try to resolve again
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "UnaryExpr"))
+ }
+
+ switch p.tok {
+ case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
+ pos, op := p.pos, p.tok
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
+
+ case token.ARROW:
+ // channel type or receive expression
+ arrow := p.pos
+ p.next()
+
+ // If the next token is token.CHAN we still don't know if it
+ // is a channel type or a receive operation - we only know
+ // once we have found the end of the unary expression. There
+ // are two cases:
+ //
+ // <- type => (<-type) must be channel type
+ // <- expr => <-(expr) is a receive from an expression
+ //
+ // In the first case, the arrow must be re-associated with
+ // the channel type parsed already:
+ //
+ // <- (chan type) => (<-chan type)
+ // <- (chan<- type) => (<-chan (<-type))
+
+ x := p.parseUnaryExpr(false)
+
+ // determine which case we have
+ if typ, ok := x.(*ast.ChanType); ok {
+ // (<-type)
+
+ // re-associate position info and <-
+ dir := ast.SEND
+ for ok && dir == ast.SEND {
+ if typ.Dir == ast.RECV {
+ // error: (<-type) is (<-(<-chan T))
+ p.errorExpected(typ.Arrow, "'chan'")
+ }
+ arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
+ dir, typ.Dir = typ.Dir, ast.RECV
+ typ, ok = typ.Value.(*ast.ChanType)
+ }
+ if dir == ast.SEND {
+ p.errorExpected(arrow, "channel type")
+ }
+
+ return x
+ }
+
+ // <-(expr)
+ return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
+
+ case token.MUL:
+ // pointer type or unary "*" expression
+ pos := p.pos
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
+ }
+
+ return p.parsePrimaryExpr(lhs)
+}
+
+func (p *parser) tokPrec() (token.Token, int) {
+ tok := p.tok
+ if p.inRhs && tok == token.ASSIGN {
+ tok = token.EQL
+ }
+ return tok, tok.Precedence()
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "BinaryExpr"))
+ }
+
+ x := p.parseUnaryExpr(lhs)
+ for _, prec := p.tokPrec(); prec >= prec1; prec-- {
+ for {
+ op, oprec := p.tokPrec()
+ if oprec != prec {
+ break
+ }
+ pos := p.expect(op)
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
+ x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
+ }
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+// The result may be a type or even a raw type ([...]int). Callers must
+// check the result (using checkExpr or checkExprOrType), depending on
+// context.
+func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+func (p *parser) parseRhs() ast.Expr {
+ old := p.inRhs
+ p.inRhs = true
+ x := p.checkExpr(p.parseExpr(false))
+ p.inRhs = old
+ return x
+}
+
+func (p *parser) parseRhsOrType() ast.Expr {
+ old := p.inRhs
+ p.inRhs = true
+ x := p.checkExprOrType(p.parseExpr(false))
+ p.inRhs = old
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// Parsing modes for parseSimpleStmt.
+const (
+ basic = iota
+ labelOk
+ rangeOk
+)
+
+// parseSimpleStmt returns true as 2nd result if it parsed the assignment
+// of a range clause (with mode == rangeOk). The returned statement is an
+// assignment with a right-hand side that is a single unary expression of
+// the form "range x". No guarantees are given for the left-hand side.
+func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
+ if p.trace {
+ defer un(trace(p, "SimpleStmt"))
+ }
+
+ x := p.parseLhsList()
+
+ switch p.tok {
+ case
+ token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+ token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+ token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+ token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+ // assignment statement, possibly part of a range clause
+ pos, tok := p.pos, p.tok
+ p.next()
+ var y []ast.Expr
+ isRange := false
+ if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
+ pos := p.pos
+ p.next()
+ y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
+ isRange = true
+ } else {
+ y = p.parseRhsList()
+ }
+ as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
+ if tok == token.DEFINE {
+ p.shortVarDecl(as, x)
+ }
+ return as, isRange
+ }
+
+ if len(x) > 1 {
+ p.errorExpected(x[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+
+ switch p.tok {
+ case token.COLON:
+ // labeled statement
+ colon := p.pos
+ p.next()
+ if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
+ // Go spec: The scope of a label is the body of the function
+ // in which it is declared and excludes the body of any nested
+ // function.
+ stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
+ p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
+ return stmt, false
+ }
+ // The label declaration typically starts at x[0].Pos(), but the label
+ // declaration may be erroneous due to a token after that position (and
+ // before the ':'). If SpuriousErrors is not set, the (only) error re-
+ // ported for the line is the illegal label error instead of the token
+ // before the ':' that caused the problem. Thus, use the (latest) colon
+ // position for error reporting.
+ p.error(colon, "illegal label declaration")
+ return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
+
+ case token.ARROW:
+ // send statement
+ arrow := p.pos
+ p.next()
+ y := p.parseRhs()
+ return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
+
+ case token.INC, token.DEC:
+ // increment or decrement
+ s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
+ p.next()
+ return s, false
+ }
+
+ // expression
+ return &ast.ExprStmt{X: x[0]}, false
+}
+
+func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
+ x := p.parseRhsOrType() // could be a conversion: (some type)(x)
+ if call, isCall := x.(*ast.CallExpr); isCall {
+ return call
+ }
+ if _, isBad := x.(*ast.BadExpr); !isBad {
+ // only report error if it's a new one
+ p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
+ }
+ return nil
+}
+
+func (p *parser) parseGoStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "GoStmt"))
+ }
+
+ pos := p.expect(token.GO)
+ call := p.parseCallExpr("go")
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
+ }
+
+ return &ast.GoStmt{Go: pos, Call: call}
+}
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "DeferStmt"))
+ }
+
+ pos := p.expect(token.DEFER)
+ call := p.parseCallExpr("defer")
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
+ }
+
+ return &ast.DeferStmt{Defer: pos, Call: call}
+}
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+ if p.trace {
+ defer un(trace(p, "ReturnStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.RETURN)
+ var x []ast.Expr
+ if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+ x = p.parseRhsList()
+ }
+ p.expectSemi()
+
+ return &ast.ReturnStmt{Return: pos, Results: x}
+}
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+ if p.trace {
+ defer un(trace(p, "BranchStmt"))
+ }
+
+ pos := p.expect(tok)
+ var label *ast.Ident
+ if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+ label = p.parseIdent()
+ // add to list of unresolved targets
+ n := len(p.targetStack) - 1
+ p.targetStack[n] = append(p.targetStack[n], label)
+ }
+ p.expectSemi()
+
+ return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
+}
+
+func (p *parser) makeExpr(s ast.Stmt, kind string) ast.Expr {
+ if s == nil {
+ return nil
+ }
+ if es, isExpr := s.(*ast.ExprStmt); isExpr {
+ return p.checkExpr(es.X)
+ }
+ p.error(s.Pos(), fmt.Sprintf("expected %s, found simple statement (missing parentheses around composite literal?)", kind))
+ return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+}
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+
+ pos := p.expect(token.IF)
+ p.openScope()
+ defer p.closeScope()
+
+ var s ast.Stmt
+ var x ast.Expr
+ {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ s, _ = p.parseSimpleStmt(basic)
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ x = p.makeExpr(s, "boolean expression")
+ s = nil
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ var else_ ast.Stmt
+ if p.tok == token.ELSE {
+ p.next()
+ else_ = p.parseStmt()
+ } else {
+ p.expectSemi()
+ }
+
+ return &ast.IfStmt{If: pos, Init: s, Cond: x, Body: body, Else: else_}
+}
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "TypeList"))
+ }
+
+ list = append(list, p.parseType())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseType())
+ }
+
+ return
+}
+
+func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
+ if p.trace {
+ defer un(trace(p, "CaseClause"))
+ }
+
+ pos := p.pos
+ var list []ast.Expr
+ if p.tok == token.CASE {
+ p.next()
+ if typeSwitch {
+ list = p.parseTypeList()
+ } else {
+ list = p.parseRhsList()
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ p.openScope()
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
+}
+
+func isTypeSwitchAssert(x ast.Expr) bool {
+ a, ok := x.(*ast.TypeAssertExpr)
+ return ok && a.Type == nil
+}
+
+func isTypeSwitchGuard(s ast.Stmt) bool {
+ switch t := s.(type) {
+ case *ast.ExprStmt:
+ // x.(nil)
+ return isTypeSwitchAssert(t.X)
+ case *ast.AssignStmt:
+ // v := x.(nil)
+ return len(t.Lhs) == 1 && t.Tok == token.DEFINE && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0])
+ }
+ return false
+}
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SwitchStmt"))
+ }
+
+ pos := p.expect(token.SWITCH)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2, _ = p.parseSimpleStmt(basic)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.LBRACE {
+ // A TypeSwitchGuard may declare a variable in addition
+ // to the variable declared in the initial SimpleStmt.
+ // Introduce extra scope to avoid redeclaration errors:
+ //
+ // switch t := 0; t := x.(T) { ... }
+ //
+ // (this code is not valid Go because the first t
+ // cannot be accessed and thus is never used, the extra
+ // scope is needed for the correct error message).
+ //
+ // If we don't have a type switch, s2 must be an expression.
+ // Having the extra nested but empty scope won't affect it.
+ p.openScope()
+ defer p.closeScope()
+ s2, _ = p.parseSimpleStmt(basic)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ typeSwitch := isTypeSwitchGuard(s2)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCaseClause(typeSwitch))
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+
+ if typeSwitch {
+ return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
+ }
+
+ return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
+}
+
+func (p *parser) parseCommClause() *ast.CommClause {
+ if p.trace {
+ defer un(trace(p, "CommClause"))
+ }
+
+ p.openScope()
+ pos := p.pos
+ var comm ast.Stmt
+ if p.tok == token.CASE {
+ p.next()
+ lhs := p.parseLhsList()
+ if p.tok == token.ARROW {
+ // SendStmt
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ arrow := p.pos
+ p.next()
+ rhs := p.parseRhs()
+ comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
+ } else {
+ // RecvStmt
+ if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
+ // RecvStmt with assignment
+ if len(lhs) > 2 {
+ p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+ // continue with first two expressions
+ lhs = lhs[0:2]
+ }
+ pos := p.pos
+ p.next()
+ rhs := p.parseRhs()
+ as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
+ if tok == token.DEFINE {
+ p.shortVarDecl(as, lhs)
+ }
+ comm = as
+ } else {
+ // lhs must be single receive operation
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ comm = &ast.ExprStmt{X: lhs[0]}
+ }
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
+}
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+ if p.trace {
+ defer un(trace(p, "SelectStmt"))
+ }
+
+ pos := p.expect(token.SELECT)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCommClause())
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
+
+ return &ast.SelectStmt{Select: pos, Body: body}
+}
+
+func (p *parser) parseForStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "ForStmt"))
+ }
+
+ pos := p.expect(token.FOR)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2, s3 ast.Stmt
+ var isRange bool
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ if p.tok == token.RANGE {
+ // "for range x" (nil lhs in assignment)
+ pos := p.pos
+ p.next()
+ y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
+ s2 = &ast.AssignStmt{Rhs: y}
+ isRange = true
+ } else {
+ s2, isRange = p.parseSimpleStmt(rangeOk)
+ }
+ }
+ if !isRange && p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.SEMICOLON {
+ s2, _ = p.parseSimpleStmt(basic)
+ }
+ p.expectSemi()
+ if p.tok != token.LBRACE {
+ s3, _ = p.parseSimpleStmt(basic)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ p.expectSemi()
+
+ if isRange {
+ as := s2.(*ast.AssignStmt)
+ // check lhs
+ var key, value ast.Expr
+ switch len(as.Lhs) {
+ case 0:
+ // nothing to do
+ case 1:
+ key = as.Lhs[0]
+ case 2:
+ key, value = as.Lhs[0], as.Lhs[1]
+ default:
+ p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
+ return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
+ }
+ // parseSimpleStmt returned a right-hand side that
+ // is a single unary expression of the form "range x"
+ x := as.Rhs[0].(*ast.UnaryExpr).X
+ return &ast.RangeStmt{
+ For: pos,
+ Key: key,
+ Value: value,
+ TokPos: as.TokPos,
+ Tok: as.Tok,
+ X: x,
+ Body: body,
+ }
+ }
+
+ // regular for statement
+ return &ast.ForStmt{
+ For: pos,
+ Init: s1,
+ Cond: p.makeExpr(s2, "boolean or range expression"),
+ Post: s3,
+ Body: body,
+ }
+}
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ s = &ast.DeclStmt{Decl: p.parseDecl(syncStmt)}
+ case
+ // tokens that may start an expression
+ token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
+ token.LBRACK, token.STRUCT, // composite types
+ token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
+ s, _ = p.parseSimpleStmt(labelOk)
+ // because of the required look-ahead, labeled statements are
+ // parsed by parseSimpleStmt - don't expect a semicolon after
+ // them
+ if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+ p.expectSemi()
+ }
+ case token.GO:
+ s = p.parseGoStmt()
+ case token.DEFER:
+ s = p.parseDeferStmt()
+ case token.RETURN:
+ s = p.parseReturnStmt()
+ case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+ s = p.parseBranchStmt(p.tok)
+ case token.LBRACE:
+ s = p.parseBlockStmt()
+ p.expectSemi()
+ case token.IF:
+ s = p.parseIfStmt()
+ case token.SWITCH:
+ s = p.parseSwitchStmt()
+ case token.SELECT:
+ s = p.parseSelectStmt()
+ case token.FOR:
+ s = p.parseForStmt()
+ case token.SEMICOLON:
+ s = &ast.EmptyStmt{Semicolon: p.pos}
+ p.next()
+ case token.RBRACE:
+ // a semicolon may be omitted before a closing "}"
+ s = &ast.EmptyStmt{Semicolon: p.pos}
+ default:
+ // no statement found
+ pos := p.pos
+ p.errorExpected(pos, "statement")
+ syncStmt(p)
+ s = &ast.BadStmt{From: pos, To: p.pos}
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
+
+func isValidImport(lit string) bool {
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return false
+ }
+ }
+ return s != ""
+}
+
+func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ImportSpec"))
+ }
+
+ var ident *ast.Ident
+ switch p.tok {
+ case token.PERIOD:
+ ident = &ast.Ident{NamePos: p.pos, Name: "."}
+ p.next()
+ case token.IDENT:
+ ident = p.parseIdent()
+ }
+
+ pos := p.pos
+ var path string
+ if p.tok == token.STRING {
+ path = p.lit
+ if !isValidImport(path) {
+ p.error(pos, "invalid import path: "+path)
+ }
+ p.next()
+ } else {
+ p.expect(token.STRING) // use expect() error handling
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // collect imports
+ spec := &ast.ImportSpec{
+ Doc: doc,
+ Name: ident,
+ Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
+ Comment: p.lineComment,
+ }
+ p.imports = append(p.imports, spec)
+
+ return spec
+}
+
+func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, keyword.String()+"Spec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ // always permit optional initialization for more tolerant parsing
+ if p.tok == token.ASSIGN {
+ p.next()
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{
+ Doc: doc,
+ Names: idents,
+ Type: typ,
+ Values: values,
+ Comment: p.lineComment,
+ }
+ kind := ast.Con
+ if keyword == token.VAR {
+ kind = ast.Var
+ }
+ p.declare(spec, iota, p.topScope, kind, idents...)
+
+ return spec
+}
+
+func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "TypeSpec"))
+ }
+
+ ident := p.parseIdent()
+
+ // Go spec: The scope of a type identifier declared inside a function begins
+ // at the identifier in the TypeSpec and ends at the end of the innermost
+ // containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.TypeSpec{Doc: doc, Name: ident}
+ p.declare(spec, nil, p.topScope, ast.Typ, ident)
+
+ spec.Type = p.parseType()
+ p.expectSemi() // call before accessing p.linecomment
+ spec.Comment = p.lineComment
+
+ return spec
+}
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+ if p.trace {
+ defer un(trace(p, "GenDecl("+keyword.String()+")"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(keyword)
+ var lparen, rparen token.Pos
+ var list []ast.Spec
+ if p.tok == token.LPAREN {
+ lparen = p.pos
+ p.next()
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, f(p.leadComment, keyword, iota))
+ }
+ rparen = p.expect(token.RPAREN)
+ p.expectSemi()
+ } else {
+ list = append(list, f(nil, keyword, 0))
+ }
+
+ return &ast.GenDecl{
+ Doc: doc,
+ TokPos: pos,
+ Tok: keyword,
+ Lparen: lparen,
+ Specs: list,
+ Rparen: rparen,
+ }
+}
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+ if p.trace {
+ defer un(trace(p, "FunctionDecl"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+
+ var recv *ast.FieldList
+ if p.tok == token.LPAREN {
+ recv = p.parseParameters(scope, false)
+ }
+
+ ident := p.parseIdent()
+
+ params, results := p.parseSignature(scope)
+
+ var body *ast.BlockStmt
+ if p.tok == token.LBRACE {
+ body = p.parseBody(scope)
+ }
+ p.expectSemi()
+
+ decl := &ast.FuncDecl{
+ Doc: doc,
+ Recv: recv,
+ Name: ident,
+ Type: &ast.FuncType{
+ Func: pos,
+ Params: params,
+ Results: results,
+ },
+ Body: body,
+ }
+ if recv == nil {
+ // Go spec: The scope of an identifier denoting a constant, type,
+ // variable, or function (but not method) declared at top level
+ // (outside any function) is the package block.
+ //
+ // init() functions cannot be referred to and there may
+ // be more than one - don't put them in the pkgScope
+ if ident.Name != "init" {
+ p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
+ }
+ }
+
+ return decl
+}
+
+func (p *parser) parseDecl(sync func(*parser)) ast.Decl {
+ if p.trace {
+ defer un(trace(p, "Declaration"))
+ }
+
+ var f parseSpecFunction
+ switch p.tok {
+ case token.CONST, token.VAR:
+ f = p.parseValueSpec
+
+ case token.TYPE:
+ f = p.parseTypeSpec
+
+ case token.FUNC:
+ return p.parseFuncDecl()
+
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "declaration")
+ sync(p)
+ return &ast.BadDecl{From: pos, To: p.pos}
+ }
+
+ return p.parseGenDecl(p.tok, f)
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+ if p.trace {
+ defer un(trace(p, "File"))
+ }
+
+ // Don't bother parsing the rest if we had errors scanning the first token.
+ // Likely not a Go source file at all.
+ if p.errors.Len() != 0 {
+ return nil
+ }
+
+ // package clause
+ doc := p.leadComment
+ pos := p.expect(token.PACKAGE)
+ // Go spec: The package clause is not a declaration;
+ // the package name does not appear in any scope.
+ ident := p.parseIdent()
+ if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
+ p.error(p.pos, "invalid package name _")
+ }
+ p.expectSemi()
+
+ // Don't bother parsing the rest if we had errors parsing the package clause.
+ // Likely not a Go source file at all.
+ if p.errors.Len() != 0 {
+ return nil
+ }
+
+ p.openScope()
+ p.pkgScope = p.topScope
+ var decls []ast.Decl
+ if p.mode&PackageClauseOnly == 0 {
+ // import decls
+ for p.tok == token.IMPORT {
+ decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
+ }
+
+ if p.mode&ImportsOnly == 0 {
+ // rest of package body
+ for p.tok != token.EOF {
+ decls = append(decls, p.parseDecl(syncDecl))
+ }
+ }
+ }
+ p.closeScope()
+ assert(p.topScope == nil, "unbalanced scopes")
+ assert(p.labelScope == nil, "unbalanced label scopes")
+
+ // resolve global identifiers within the same file
+ i := 0
+ for _, ident := range p.unresolved {
+ // i <= index for current ident
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+ if ident.Obj == nil {
+ p.unresolved[i] = ident
+ i++
+ }
+ }
+
+ return &ast.File{
+ Doc: doc,
+ Package: pos,
+ Name: ident,
+ Decls: decls,
+ Scope: p.pkgScope,
+ Imports: p.imports,
+ Unresolved: p.unresolved[0:i],
+ Comments: p.comments,
+ }
+}
diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go
new file mode 100644
index 000000000..85065fd18
--- /dev/null
+++ b/src/go/parser/parser_test.go
@@ -0,0 +1,449 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "os"
+ "strings"
+ "testing"
+)
+
+var fset = token.NewFileSet()
+
+var validFiles = []string{
+ "parser.go",
+ "parser_test.go",
+ "error_test.go",
+ "short_test.go",
+}
+
+func TestParse(t *testing.T) {
+ for _, filename := range validFiles {
+ _, err := ParseFile(fset, filename, nil, DeclarationErrors)
+ if err != nil {
+ t.Fatalf("ParseFile(%s): %v", filename, err)
+ }
+ }
+}
+
+func nameFilter(filename string) bool {
+ switch filename {
+ case "parser.go", "interface.go", "parser_test.go":
+ return true
+ case "parser.go.orig":
+ return true // permit but should be ignored by ParseDir
+ }
+ return false
+}
+
+func dirFilter(f os.FileInfo) bool { return nameFilter(f.Name()) }
+
+func TestParseDir(t *testing.T) {
+ path := "."
+ pkgs, err := ParseDir(fset, path, dirFilter, 0)
+ if err != nil {
+ t.Fatalf("ParseDir(%s): %v", path, err)
+ }
+ if n := len(pkgs); n != 1 {
+ t.Errorf("got %d packages; want 1", n)
+ }
+ pkg := pkgs["parser"]
+ if pkg == nil {
+ t.Errorf(`package "parser" not found`)
+ return
+ }
+ if n := len(pkg.Files); n != 3 {
+ t.Errorf("got %d package files; want 3", n)
+ }
+ for filename := range pkg.Files {
+ if !nameFilter(filename) {
+ t.Errorf("unexpected package file: %s", filename)
+ }
+ }
+}
+
+func TestParseExpr(t *testing.T) {
+ // just kicking the tires:
+ // a valid arithmetic expression
+ src := "a + b"
+ x, err := ParseExpr(src)
+ if err != nil {
+ t.Errorf("ParseExpr(%q): %v", src, err)
+ }
+ // sanity check
+ if _, ok := x.(*ast.BinaryExpr); !ok {
+ t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
+ }
+
+ // a valid type expression
+ src = "struct{x *int}"
+ x, err = ParseExpr(src)
+ if err != nil {
+ t.Errorf("ParseExpr(%q): %v", src, err)
+ }
+ // sanity check
+ if _, ok := x.(*ast.StructType); !ok {
+ t.Errorf("ParseExpr(%q): got %T, want *ast.StructType", src, x)
+ }
+
+ // an invalid expression
+ src = "a + *"
+ if _, err := ParseExpr(src); err == nil {
+ t.Errorf("ParseExpr(%q): got no error", src)
+ }
+
+ // a valid expression followed by extra tokens is invalid
+ src = "a[i] := x"
+ if _, err := ParseExpr(src); err == nil {
+ t.Errorf("ParseExpr(%q): got no error", src)
+ }
+
+ // a semicolon is not permitted unless automatically inserted
+ src = "a + b\n"
+ if _, err := ParseExpr(src); err != nil {
+ t.Errorf("ParseExpr(%q): got error %s", src, err)
+ }
+ src = "a + b;"
+ if _, err := ParseExpr(src); err == nil {
+ t.Errorf("ParseExpr(%q): got no error", src)
+ }
+
+ // various other stuff following a valid expression
+ const validExpr = "a + b"
+ const anything = "dh3*#D)#_"
+ for _, c := range "!)]};," {
+ src := validExpr + string(c) + anything
+ if _, err := ParseExpr(src); err == nil {
+ t.Errorf("ParseExpr(%q): got no error", src)
+ }
+ }
+
+ // ParseExpr must not crash
+ for _, src := range valids {
+ ParseExpr(src)
+ }
+}
+
+func TestColonEqualsScope(t *testing.T) {
+ f, err := ParseFile(fset, "", `package p; func f() { x, y, z := x, y, z }`, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // RHS refers to undefined globals; LHS does not.
+ as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt)
+ for _, v := range as.Rhs {
+ id := v.(*ast.Ident)
+ if id.Obj != nil {
+ t.Errorf("rhs %s has Obj, should not", id.Name)
+ }
+ }
+ for _, v := range as.Lhs {
+ id := v.(*ast.Ident)
+ if id.Obj == nil {
+ t.Errorf("lhs %s does not have Obj, should", id.Name)
+ }
+ }
+}
+
+func TestVarScope(t *testing.T) {
+ f, err := ParseFile(fset, "", `package p; func f() { var x, y, z = x, y, z }`, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // RHS refers to undefined globals; LHS does not.
+ as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ for _, v := range as.Values {
+ id := v.(*ast.Ident)
+ if id.Obj != nil {
+ t.Errorf("rhs %s has Obj, should not", id.Name)
+ }
+ }
+ for _, id := range as.Names {
+ if id.Obj == nil {
+ t.Errorf("lhs %s does not have Obj, should", id.Name)
+ }
+ }
+}
+
+func TestObjects(t *testing.T) {
+ const src = `
+package p
+import fmt "fmt"
+const pi = 3.14
+type T struct{}
+var x int
+func f() { L: }
+`
+
+ f, err := ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ objects := map[string]ast.ObjKind{
+ "p": ast.Bad, // not in a scope
+ "fmt": ast.Bad, // not resolved yet
+ "pi": ast.Con,
+ "T": ast.Typ,
+ "x": ast.Var,
+ "int": ast.Bad, // not resolved yet
+ "f": ast.Fun,
+ "L": ast.Lbl,
+ }
+
+ ast.Inspect(f, func(n ast.Node) bool {
+ if ident, ok := n.(*ast.Ident); ok {
+ obj := ident.Obj
+ if obj == nil {
+ if objects[ident.Name] != ast.Bad {
+ t.Errorf("no object for %s", ident.Name)
+ }
+ return true
+ }
+ if obj.Name != ident.Name {
+ t.Errorf("names don't match: obj.Name = %s, ident.Name = %s", obj.Name, ident.Name)
+ }
+ kind := objects[ident.Name]
+ if obj.Kind != kind {
+ t.Errorf("%s: obj.Kind = %s; want %s", ident.Name, obj.Kind, kind)
+ }
+ }
+ return true
+ })
+}
+
+func TestUnresolved(t *testing.T) {
+ f, err := ParseFile(fset, "", `
+package p
+//
+func f1a(int)
+func f2a(byte, int, float)
+func f3a(a, b int, c float)
+func f4a(...complex)
+func f5a(a s1a, b ...complex)
+//
+func f1b(*int)
+func f2b([]byte, (int), *float)
+func f3b(a, b *int, c []float)
+func f4b(...*complex)
+func f5b(a s1a, b ...[]complex)
+//
+type s1a struct { int }
+type s2a struct { byte; int; s1a }
+type s3a struct { a, b int; c float }
+//
+type s1b struct { *int }
+type s2b struct { byte; int; *float }
+type s3b struct { a, b *s3b; c []float }
+`, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := "int " + // f1a
+ "byte int float " + // f2a
+ "int float " + // f3a
+ "complex " + // f4a
+ "complex " + // f5a
+ //
+ "int " + // f1b
+ "byte int float " + // f2b
+ "int float " + // f3b
+ "complex " + // f4b
+ "complex " + // f5b
+ //
+ "int " + // s1a
+ "byte int " + // s2a
+ "int float " + // s3a
+ //
+ "int " + // s1a
+ "byte int float " + // s2a
+ "float " // s3a
+
+ // collect unresolved identifiers
+ var buf bytes.Buffer
+ for _, u := range f.Unresolved {
+ buf.WriteString(u.Name)
+ buf.WriteByte(' ')
+ }
+ got := buf.String()
+
+ if got != want {
+ t.Errorf("\ngot: %s\nwant: %s", got, want)
+ }
+}
+
+var imports = map[string]bool{
+ `"a"`: true,
+ "`a`": true,
+ `"a/b"`: true,
+ `"a.b"`: true,
+ `"m\x61th"`: true,
+ `"greek/αβ"`: true,
+ `""`: false,
+
+ // Each of these pairs tests both `` vs "" strings
+ // and also use of invalid characters spelled out as
+ // escape sequences and written directly.
+ // For example `"\x00"` tests import "\x00"
+ // while "`\x00`" tests import `<actual-NUL-byte>`.
+ `"\x00"`: false,
+ "`\x00`": false,
+ `"\x7f"`: false,
+ "`\x7f`": false,
+ `"a!"`: false,
+ "`a!`": false,
+ `"a b"`: false,
+ "`a b`": false,
+ `"a\\b"`: false,
+ "`a\\b`": false,
+ "\"`a`\"": false,
+ "`\"a\"`": false,
+ `"\x80\x80"`: false,
+ "`\x80\x80`": false,
+ `"\xFFFD"`: false,
+ "`\xFFFD`": false,
+}
+
+func TestImports(t *testing.T) {
+ for path, isValid := range imports {
+ src := fmt.Sprintf("package p; import %s", path)
+ _, err := ParseFile(fset, "", src, 0)
+ switch {
+ case err != nil && isValid:
+ t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
+ case err == nil && !isValid:
+ t.Errorf("ParseFile(%s): got no error; expected one", src)
+ }
+ }
+}
+
+func TestCommentGroups(t *testing.T) {
+ f, err := ParseFile(fset, "", `
+package p /* 1a */ /* 1b */ /* 1c */ // 1d
+/* 2a
+*/
+// 2b
+const pi = 3.1415
+/* 3a */ // 3b
+/* 3c */ const e = 2.7182
+
+// Example from issue 3139
+func ExampleCount() {
+ fmt.Println(strings.Count("cheese", "e"))
+ fmt.Println(strings.Count("five", "")) // before & after each rune
+ // Output:
+ // 3
+ // 5
+}
+`, ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := [][]string{
+ {"/* 1a */", "/* 1b */", "/* 1c */", "// 1d"},
+ {"/* 2a\n*/", "// 2b"},
+ {"/* 3a */", "// 3b", "/* 3c */"},
+ {"// Example from issue 3139"},
+ {"// before & after each rune"},
+ {"// Output:", "// 3", "// 5"},
+ }
+ if len(f.Comments) != len(expected) {
+ t.Fatalf("got %d comment groups; expected %d", len(f.Comments), len(expected))
+ }
+ for i, exp := range expected {
+ got := f.Comments[i].List
+ if len(got) != len(exp) {
+ t.Errorf("got %d comments in group %d; expected %d", len(got), i, len(exp))
+ continue
+ }
+ for j, exp := range exp {
+ got := got[j].Text
+ if got != exp {
+ t.Errorf("got %q in group %d; expected %q", got, i, exp)
+ }
+ }
+ }
+}
+
+func getField(file *ast.File, fieldname string) *ast.Field {
+ parts := strings.Split(fieldname, ".")
+ for _, d := range file.Decls {
+ if d, ok := d.(*ast.GenDecl); ok && d.Tok == token.TYPE {
+ for _, s := range d.Specs {
+ if s, ok := s.(*ast.TypeSpec); ok && s.Name.Name == parts[0] {
+ if s, ok := s.Type.(*ast.StructType); ok {
+ for _, f := range s.Fields.List {
+ for _, name := range f.Names {
+ if name.Name == parts[1] {
+ return f
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Don't use ast.CommentGroup.Text() - we want to see exact comment text.
+func commentText(c *ast.CommentGroup) string {
+ var buf bytes.Buffer
+ if c != nil {
+ for _, c := range c.List {
+ buf.WriteString(c.Text)
+ }
+ }
+ return buf.String()
+}
+
+func checkFieldComments(t *testing.T, file *ast.File, fieldname, lead, line string) {
+ f := getField(file, fieldname)
+ if f == nil {
+ t.Fatalf("field not found: %s", fieldname)
+ }
+ if got := commentText(f.Doc); got != lead {
+ t.Errorf("got lead comment %q; expected %q", got, lead)
+ }
+ if got := commentText(f.Comment); got != line {
+ t.Errorf("got line comment %q; expected %q", got, line)
+ }
+}
+
+func TestLeadAndLineComments(t *testing.T) {
+ f, err := ParseFile(fset, "", `
+package p
+type T struct {
+ /* F1 lead comment */
+ //
+ F1 int /* F1 */ // line comment
+ // F2 lead
+ // comment
+ F2 int // F2 line comment
+ // f3 lead comment
+ f3 int // f3 line comment
+}
+`, ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+ checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
+ checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
+ checkFieldComments(t, f, "T.f3", "// f3 lead comment", "// f3 line comment")
+ ast.FileExports(f)
+ checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
+ checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
+ if getField(f, "T.f3") != nil {
+ t.Error("not expected to find T.f3")
+ }
+}
diff --git a/src/go/parser/performance_test.go b/src/go/parser/performance_test.go
new file mode 100644
index 000000000..f2732c0e2
--- /dev/null
+++ b/src/go/parser/performance_test.go
@@ -0,0 +1,30 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package parser
+
+import (
+ "go/token"
+ "io/ioutil"
+ "testing"
+)
+
+var src = readFile("parser.go")
+
+func readFile(filename string) []byte {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+func BenchmarkParse(b *testing.B) {
+ b.SetBytes(int64(len(src)))
+ for i := 0; i < b.N; i++ {
+ if _, err := ParseFile(token.NewFileSet(), "", src, ParseComments); err != nil {
+ b.Fatalf("benchmark failed due to parse error: %s", err)
+ }
+ }
+}
diff --git a/src/go/parser/short_test.go b/src/go/parser/short_test.go
new file mode 100644
index 000000000..f861086dd
--- /dev/null
+++ b/src/go/parser/short_test.go
@@ -0,0 +1,103 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for short valid and invalid programs.
+
+package parser
+
+import "testing"
+
+var valids = []string{
+ "package p\n",
+ `package p;`,
+ `package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
+ `package p; func f() { if f(T{}) {} };`,
+ `package p; func f() { _ = <-chan int(nil) };`,
+ `package p; func f() { _ = (<-chan int)(nil) };`,
+ `package p; func f() { _ = (<-chan <-chan int)(nil) };`,
+ `package p; func f() { _ = <-chan <-chan <-chan <-chan <-int(nil) };`,
+ `package p; func f(func() func() func());`,
+ `package p; func f(...T);`,
+ `package p; func f(float, ...int);`,
+ `package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
+ `package p; func f(int,) {};`,
+ `package p; func f(...int,) {};`,
+ `package p; func f(x ...int,) {};`,
+ `package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
+ `package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
+ `package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
+ `package p; var a = T{{1, 2}, {3, 4}}`,
+ `package p; func f() { select { case <- c: case c <- d: case c <- <- d: case <-c <- d: } };`,
+ `package p; func f() { select { case x := (<-c): } };`,
+ `package p; func f() { if ; true {} };`,
+ `package p; func f() { switch ; {} };`,
+ `package p; func f() { for _ = range "foo" + "bar" {} };`,
+ `package p; func f() { var s []int; g(s[:], s[i:], s[:j], s[i:j], s[i:j:k], s[:j:k]) };`,
+ `package p; var ( _ = (struct {*T}).m; _ = (interface {T}).m )`,
+ `package p; func ((T),) m() {}`,
+ `package p; func ((*T),) m() {}`,
+ `package p; func (*(T),) m() {}`,
+ `package p; func _(x []int) { for range x {} }`,
+}
+
+func TestValid(t *testing.T) {
+ for _, src := range valids {
+ checkErrors(t, src, src)
+ }
+}
+
+var invalids = []string{
+ `foo /* ERROR "expected 'package'" */ !`,
+ `package p; func f() { if { /* ERROR "expected operand" */ } };`,
+ `package p; func f() { if ; { /* ERROR "expected operand" */ } };`,
+ `package p; func f() { if f(); { /* ERROR "expected operand" */ } };`,
+ `package p; func f() { if _ /* ERROR "expected boolean expression" */ = range x; true {} };`,
+ `package p; func f() { switch _ /* ERROR "expected switch expression" */ = range x; true {} };`,
+ `package p; func f() { for _ = range x ; /* ERROR "expected '{'" */ ; {} };`,
+ `package p; func f() { for ; ; _ = range /* ERROR "expected operand" */ x {} };`,
+ `package p; func f() { for ; _ /* ERROR "expected boolean or range expression" */ = range x ; {} };`,
+ `package p; func f() { switch t /* ERROR "expected switch expression" */ = t.(type) {} };`,
+ `package p; func f() { switch t /* ERROR "expected switch expression" */ , t = t.(type) {} };`,
+ `package p; func f() { switch t /* ERROR "expected switch expression" */ = t.(type), t {} };`,
+ `package p; var a = [ /* ERROR "expected expression" */ 1]int;`,
+ `package p; var a = [ /* ERROR "expected expression" */ ...]int;`,
+ `package p; var a = struct /* ERROR "expected expression" */ {}`,
+ `package p; var a = func /* ERROR "expected expression" */ ();`,
+ `package p; var a = interface /* ERROR "expected expression" */ {}`,
+ `package p; var a = [ /* ERROR "expected expression" */ ]int`,
+ `package p; var a = map /* ERROR "expected expression" */ [int]int`,
+ `package p; var a = chan /* ERROR "expected expression" */ int;`,
+ `package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
+ `package p; var a = ( /* ERROR "expected expression" */ []int);`,
+ `package p; var a = a[[ /* ERROR "expected expression" */ ]int:[]int];`,
+ `package p; var a = <- /* ERROR "expected expression" */ chan int;`,
+ `package p; func f() { select { case _ <- chan /* ERROR "expected expression" */ int: } };`,
+ `package p; func f() { _ = (<-<- /* ERROR "expected 'chan'" */ chan int)(nil) };`,
+ `package p; func f() { _ = (<-chan<-chan<-chan<-chan<-chan<- /* ERROR "expected channel type" */ int)(nil) };`,
+ `package p; func f() { var t []int; t /* ERROR "expected identifier on left side of :=" */ [0] := 0 };`,
+ `package p; func f() { if x := g(); x = /* ERROR "expected '=='" */ 0 {}};`,
+ `package p; func f() { _ = x = /* ERROR "expected '=='" */ 0 {}};`,
+ `package p; func f() { _ = 1 == func()int { var x bool; x = x = /* ERROR "expected '=='" */ true; return x }() };`,
+ `package p; func f() { var s []int; _ = s[] /* ERROR "expected operand" */ };`,
+ `package p; func f() { var s []int; _ = s[i:j: /* ERROR "3rd index required" */ ] };`,
+ `package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :k] };`,
+ `package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :] };`,
+ `package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ :] };`,
+ `package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ ::] };`,
+ `package p; func f() { var s []int; _ = s[i:j:k: /* ERROR "expected ']'" */ l] };`,
+ `package p; func f() { for x /* ERROR "boolean or range expression" */ = []string {} }`,
+ `package p; func f() { for x /* ERROR "boolean or range expression" */ := []string {} }`,
+ `package p; func f() { for i /* ERROR "boolean or range expression" */ , x = []string {} }`,
+ `package p; func f() { for i /* ERROR "boolean or range expression" */ , x := []string {} }`,
+ `package p; func f() { go f /* ERROR HERE "function must be invoked" */ }`,
+ `package p; func f() { defer func() {} /* ERROR HERE "function must be invoked" */ }`,
+ `package p; func f() { go func() { func() { f(x func /* ERROR "expected '\)'" */ (){}) } } }`,
+ `package p; func f() (a b string /* ERROR "expected '\)'" */ , ok bool) // issue 8656`,
+}
+
+func TestInvalid(t *testing.T) {
+ for _, src := range invalids {
+ checkErrors(t, src, src)
+ }
+}
diff --git a/src/go/parser/testdata/commas.src b/src/go/parser/testdata/commas.src
new file mode 100644
index 000000000..af6e70645
--- /dev/null
+++ b/src/go/parser/testdata/commas.src
@@ -0,0 +1,19 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for error messages/parser synchronization
+// after missing commas.
+
+package p
+
+var _ = []int{
+ 0 /* ERROR "missing ','" */
+}
+
+var _ = []int{
+ 0,
+ 1,
+ 2,
+ 3 /* ERROR "missing ','" */
+}
diff --git a/src/go/parser/testdata/issue3106.src b/src/go/parser/testdata/issue3106.src
new file mode 100644
index 000000000..82796c8ce
--- /dev/null
+++ b/src/go/parser/testdata/issue3106.src
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 3106: Better synchronization of
+// parser after certain syntax errors.
+
+package main
+
+func f() {
+ var m Mutex
+ c := MakeCond(&m)
+ percent := 0
+ const step = 10
+ for i := 0; i < 5; i++ {
+ go func() {
+ for {
+ // Emulates some useful work.
+ time.Sleep(1e8)
+ m.Lock()
+ defer
+ if /* ERROR "expected operand, found 'if'" */ percent == 100 {
+ m.Unlock()
+ break
+ }
+ percent++
+ if percent % step == 0 {
+ //c.Signal()
+ }
+ m.Unlock()
+ }
+ }()
+ }
+ for {
+ m.Lock()
+ if percent == 0 || percent % step != 0 {
+ c.Wait()
+ }
+ fmt.Print(",")
+ if percent == 100 {
+ m.Unlock()
+ break
+ }
+ m.Unlock()
+ }
+}
diff --git a/src/go/printer/example_test.go b/src/go/printer/example_test.go
new file mode 100644
index 000000000..e570040ba
--- /dev/null
+++ b/src/go/printer/example_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "strings"
+ "testing"
+)
+
+// Dummy test function so that godoc does not use the entire file as example.
+func Test(*testing.T) {}
+
+func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
+ fset = token.NewFileSet()
+ if file, err := parser.ParseFile(fset, filename, nil, 0); err == nil {
+ for _, d := range file.Decls {
+ if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
+ fun = f
+ return
+ }
+ }
+ }
+ panic("function not found")
+}
+
+func ExampleFprint() {
+ // Parse source file and extract the AST without comments for
+ // this function, with position information referring to the
+ // file set fset.
+ funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+
+ // Print the function body into buffer buf.
+ // The file set is provided to the printer so that it knows
+ // about the original source formatting and can add additional
+ // line breaks where they were present in the source.
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, funcAST.Body)
+
+ // Remove braces {} enclosing the function body, unindent,
+ // and trim leading and trailing white space.
+ s := buf.String()
+ s = s[1 : len(s)-1]
+ s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1))
+
+ // Print the cleaned-up body text to stdout.
+ fmt.Println(s)
+
+ // output:
+ // funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
+ //
+ // var buf bytes.Buffer
+ // printer.Fprint(&buf, fset, funcAST.Body)
+ //
+ // s := buf.String()
+ // s = s[1 : len(s)-1]
+ // s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1))
+ //
+ // fmt.Println(s)
+}
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
new file mode 100644
index 000000000..6e26f9a63
--- /dev/null
+++ b/src/go/printer/nodes.go
@@ -0,0 +1,1602 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of AST nodes; specifically
+// expressions, statements, declarations, and files. It uses
+// the print functionality implemented in printer.go.
+
+package printer
+
+import (
+ "bytes"
+ "go/ast"
+ "go/token"
+ "unicode/utf8"
+)
+
+// Formatting issues:
+// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
+// when the comment spans multiple lines; if such a comment is just two lines, formatting is
+// not idempotent
+// - formatting of expression lists
+// - should use blank instead of tab to separate one-line function bodies from
+// the function header unless there is a group of consecutive one-liners
+
+// ----------------------------------------------------------------------------
+// Common AST nodes.
+
+// Print as many newlines as necessary (but at least min newlines) to get to
+// the current line. ws is printed before the first line break. If newSection
+// is set, the first line break is printed as formfeed. Returns true if any
+// line break was printed; returns false otherwise.
+//
+// TODO(gri): linebreak may add too many lines if the next statement at "line"
+// is preceded by comments because the computation of n assumes
+// the current position before the comment and the target position
+// after the comment. Thus, after interspersing such comments, the
+// space taken up by them is not considered to reduce the number of
+// linebreaks. At the moment there is no easy way to know about
+// future (not yet interspersed) comments in this function.
+//
+func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (printedBreak bool) {
+ n := nlimit(line - p.pos.Line)
+ if n < min {
+ n = min
+ }
+ if n > 0 {
+ p.print(ws)
+ if newSection {
+ p.print(formfeed)
+ n--
+ }
+ for ; n > 0; n-- {
+ p.print(newline)
+ }
+ printedBreak = true
+ }
+ return
+}
+
+// setComment sets g as the next comment if g != nil and if node comments
+// are enabled - this mode is used when printing source code fragments such
+// as exports only. It assumes that there is no pending comment in p.comments
+// and at most one pending comment in the p.comment cache.
+func (p *printer) setComment(g *ast.CommentGroup) {
+ if g == nil || !p.useNodeComments {
+ return
+ }
+ if p.comments == nil {
+ // initialize p.comments lazily
+ p.comments = make([]*ast.CommentGroup, 1)
+ } else if p.cindex < len(p.comments) {
+ // for some reason there are pending comments; this
+ // should never happen - handle gracefully and flush
+ // all comments up to g, ignore anything after that
+ p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL)
+ p.comments = p.comments[0:1]
+ // in debug mode, report error
+ p.internalError("setComment found pending comments")
+ }
+ p.comments[0] = g
+ p.cindex = 0
+ // don't overwrite any pending comment in the p.comment cache
+ // (there may be a pending comment when a line comment is
+ // immediately followed by a lead comment with no other
+ // tokens between)
+ if p.commentOffset == infinity {
+ p.nextComment() // get comment ready for use
+ }
+}
+
+type exprListMode uint
+
+const (
+ commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
+ noIndent // no extra indentation in multi-line lists
+)
+
+// If indent is set, a multi-line identifier list is indented after the
+// first linebreak encountered.
+func (p *printer) identList(list []*ast.Ident, indent bool) {
+ // convert into an expression list so we can re-use exprList formatting
+ xlist := make([]ast.Expr, len(list))
+ for i, x := range list {
+ xlist[i] = x
+ }
+ var mode exprListMode
+ if !indent {
+ mode = noIndent
+ }
+ p.exprList(token.NoPos, xlist, 1, mode, token.NoPos)
+}
+
+// Print a list of expressions. If the list spans multiple
+// source lines, the original line breaks are respected between
+// expressions.
+//
+// TODO(gri) Consider rewriting this to be independent of []ast.Expr
+// so that we can use the algorithm for any kind of list
+// (e.g., pass list via a channel over which to range).
+func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos) {
+ if len(list) == 0 {
+ return
+ }
+
+ prev := p.posFor(prev0)
+ next := p.posFor(next0)
+ line := p.lineFor(list[0].Pos())
+ endLine := p.lineFor(list[len(list)-1].End())
+
+ if prev.IsValid() && prev.Line == line && line == endLine {
+ // all list entries on a single line
+ for i, x := range list {
+ if i > 0 {
+ // use position of expression following the comma as
+ // comma position for correct comment placement
+ p.print(x.Pos(), token.COMMA, blank)
+ }
+ p.expr0(x, depth)
+ }
+ return
+ }
+
+ // list entries span multiple lines;
+ // use source code positions to guide line breaks
+
+ // don't add extra indentation if noIndent is set;
+ // i.e., pretend that the first line is already indented
+ ws := ignore
+ if mode&noIndent == 0 {
+ ws = indent
+ }
+
+ // the first linebreak is always a formfeed since this section must not
+ // depend on any previous formatting
+ prevBreak := -1 // index of last expression that was followed by a linebreak
+ if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) {
+ ws = ignore
+ prevBreak = 0
+ }
+
+ // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line
+ size := 0
+
+ // print all list elements
+ for i, x := range list {
+ prevLine := line
+ line = p.lineFor(x.Pos())
+
+ // determine if the next linebreak, if any, needs to use formfeed:
+ // in general, use the entire node size to make the decision; for
+ // key:value expressions, use the key size
+ // TODO(gri) for a better result, should probably incorporate both
+ // the key and the node size into the decision process
+ useFF := true
+
+ // determine element size: all bets are off if we don't have
+ // position information for the previous and next token (likely
+ // generated code - simply ignore the size in this case by setting
+ // it to 0)
+ prevSize := size
+ const infinity = 1e6 // larger than any source line
+ size = p.nodeSize(x, infinity)
+ pair, isPair := x.(*ast.KeyValueExpr)
+ if size <= infinity && prev.IsValid() && next.IsValid() {
+ // x fits on a single line
+ if isPair {
+ size = p.nodeSize(pair.Key, infinity) // size <= infinity
+ }
+ } else {
+ // size too large or we don't have good layout information
+ size = 0
+ }
+
+ // if the previous line and the current line had single-
+ // line-expressions and the key sizes are small or the
+ // the ratio between the key sizes does not exceed a
+ // threshold, align columns and do not use formfeed
+ if prevSize > 0 && size > 0 {
+ const smallSize = 20
+ if prevSize <= smallSize && size <= smallSize {
+ useFF = false
+ } else {
+ const r = 4 // threshold
+ ratio := float64(size) / float64(prevSize)
+ useFF = ratio <= 1.0/r || r <= ratio
+ }
+ }
+
+ if i > 0 {
+ needsLinebreak := prevLine < line && prevLine > 0 && line > 0
+ // use position of expression following the comma as
+ // comma position for correct comment placement, but
+ // only if the expression is on the same line
+ if !needsLinebreak {
+ p.print(x.Pos())
+ }
+ p.print(token.COMMA)
+ needsBlank := true
+ if needsLinebreak {
+ // lines are broken using newlines so comments remain aligned
+ // unless forceFF is set or there are multiple expressions on
+ // the same line in which case formfeed is used
+ if p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) {
+ ws = ignore
+ prevBreak = i
+ needsBlank = false // we got a line break instead
+ }
+ }
+ if needsBlank {
+ p.print(blank)
+ }
+ }
+
+ if isPair && size > 0 && len(list) > 1 {
+ // we have a key:value expression that fits onto one line and
+ // is in a list with more then one entry: use a column for the
+ // key such that consecutive entries can align if possible
+ p.expr(pair.Key)
+ p.print(pair.Colon, token.COLON, vtab)
+ p.expr(pair.Value)
+ } else {
+ p.expr0(x, depth)
+ }
+ }
+
+ if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line {
+ // print a terminating comma if the next token is on a new line
+ p.print(token.COMMA)
+ if ws == ignore && mode&noIndent == 0 {
+ // unindent if we indented
+ p.print(unindent)
+ }
+ p.print(formfeed) // terminating comma needs a line break to look good
+ return
+ }
+
+ if ws == ignore && mode&noIndent == 0 {
+ // unindent if we indented
+ p.print(unindent)
+ }
+}
+
+func (p *printer) parameters(fields *ast.FieldList) {
+ p.print(fields.Opening, token.LPAREN)
+ if len(fields.List) > 0 {
+ prevLine := p.lineFor(fields.Opening)
+ ws := indent
+ for i, par := range fields.List {
+ // determine par begin and end line (may be different
+ // if there are multiple parameter names for this par
+ // or the type is on a separate line)
+ var parLineBeg int
+ if len(par.Names) > 0 {
+ parLineBeg = p.lineFor(par.Names[0].Pos())
+ } else {
+ parLineBeg = p.lineFor(par.Type.Pos())
+ }
+ var parLineEnd = p.lineFor(par.Type.End())
+ // separating "," if needed
+ needsLinebreak := 0 < prevLine && prevLine < parLineBeg
+ if i > 0 {
+ // use position of parameter following the comma as
+ // comma position for correct comma placement, but
+ // only if the next parameter is on the same line
+ if !needsLinebreak {
+ p.print(par.Pos())
+ }
+ p.print(token.COMMA)
+ }
+ // separator if needed (linebreak or blank)
+ if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) {
+ // break line if the opening "(" or previous parameter ended on a different line
+ ws = ignore
+ } else if i > 0 {
+ p.print(blank)
+ }
+ // parameter names
+ if len(par.Names) > 0 {
+ // Very subtle: If we indented before (ws == ignore), identList
+ // won't indent again. If we didn't (ws == indent), identList will
+ // indent if the identList spans multiple lines, and it will outdent
+ // again at the end (and still ws == indent). Thus, a subsequent indent
+ // by a linebreak call after a type, or in the next multi-line identList
+ // will do the right thing.
+ p.identList(par.Names, ws == indent)
+ p.print(blank)
+ }
+ // parameter type
+ p.expr(stripParensAlways(par.Type))
+ prevLine = parLineEnd
+ }
+ // if the closing ")" is on a separate line from the last parameter,
+ // print an additional "," and line break
+ if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
+ p.print(token.COMMA)
+ p.linebreak(closing, 0, ignore, true)
+ }
+ // unindent if we indented
+ if ws == ignore {
+ p.print(unindent)
+ }
+ }
+ p.print(fields.Closing, token.RPAREN)
+}
+
+func (p *printer) signature(params, result *ast.FieldList) {
+ if params != nil {
+ p.parameters(params)
+ } else {
+ p.print(token.LPAREN, token.RPAREN)
+ }
+ n := result.NumFields()
+ if n > 0 {
+ // result != nil
+ p.print(blank)
+ if n == 1 && result.List[0].Names == nil {
+ // single anonymous result; no ()'s
+ p.expr(stripParensAlways(result.List[0].Type))
+ return
+ }
+ p.parameters(result)
+ }
+}
+
+func identListSize(list []*ast.Ident, maxSize int) (size int) {
+ for i, x := range list {
+ if i > 0 {
+ size += len(", ")
+ }
+ size += utf8.RuneCountInString(x.Name)
+ if size >= maxSize {
+ break
+ }
+ }
+ return
+}
+
+func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
+ if len(list) != 1 {
+ return false // allow only one field
+ }
+ f := list[0]
+ if f.Tag != nil || f.Comment != nil {
+ return false // don't allow tags or comments
+ }
+ // only name(s) and type
+ const maxSize = 30 // adjust as appropriate, this is an approximate value
+ namesSize := identListSize(f.Names, maxSize)
+ if namesSize > 0 {
+ namesSize = 1 // blank between names and types
+ }
+ typeSize := p.nodeSize(f.Type, maxSize)
+ return namesSize+typeSize <= maxSize
+}
+
+func (p *printer) setLineComment(text string) {
+ p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
+}
+
+func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
+ lbrace := fields.Opening
+ list := fields.List
+ rbrace := fields.Closing
+ hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace))
+ srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace)
+
+ if !hasComments && srcIsOneLine {
+ // possibly a one-line struct/interface
+ if len(list) == 0 {
+ // no blank between keyword and {} in this case
+ p.print(lbrace, token.LBRACE, rbrace, token.RBRACE)
+ return
+ } else if isStruct && p.isOneLineFieldList(list) { // for now ignore interfaces
+ // small enough - print on one line
+ // (don't use identList and ignore source line breaks)
+ p.print(lbrace, token.LBRACE, blank)
+ f := list[0]
+ for i, x := range f.Names {
+ if i > 0 {
+ // no comments so no need for comma position
+ p.print(token.COMMA, blank)
+ }
+ p.expr(x)
+ }
+ if len(f.Names) > 0 {
+ p.print(blank)
+ }
+ p.expr(f.Type)
+ p.print(blank, rbrace, token.RBRACE)
+ return
+ }
+ }
+ // hasComments || !srcIsOneLine
+
+ p.print(blank, lbrace, token.LBRACE, indent)
+ if hasComments || len(list) > 0 {
+ p.print(formfeed)
+ }
+
+ if isStruct {
+
+ sep := vtab
+ if len(list) == 1 {
+ sep = blank
+ }
+ var line int
+ for i, f := range list {
+ if i > 0 {
+ p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ extraTabs := 0
+ p.setComment(f.Doc)
+ p.recordLine(&line)
+ if len(f.Names) > 0 {
+ // named fields
+ p.identList(f.Names, false)
+ p.print(sep)
+ p.expr(f.Type)
+ extraTabs = 1
+ } else {
+ // anonymous field
+ p.expr(f.Type)
+ extraTabs = 2
+ }
+ if f.Tag != nil {
+ if len(f.Names) > 0 && sep == vtab {
+ p.print(sep)
+ }
+ p.print(sep)
+ p.expr(f.Tag)
+ extraTabs = 0
+ }
+ if f.Comment != nil {
+ for ; extraTabs > 0; extraTabs-- {
+ p.print(sep)
+ }
+ p.setComment(f.Comment)
+ }
+ }
+ if isIncomplete {
+ if len(list) > 0 {
+ p.print(formfeed)
+ }
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// contains filtered or unexported fields")
+ }
+
+ } else { // interface
+
+ var line int
+ for i, f := range list {
+ if i > 0 {
+ p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ p.setComment(f.Doc)
+ p.recordLine(&line)
+ if ftyp, isFtyp := f.Type.(*ast.FuncType); isFtyp {
+ // method
+ p.expr(f.Names[0])
+ p.signature(ftyp.Params, ftyp.Results)
+ } else {
+ // embedded interface
+ p.expr(f.Type)
+ }
+ p.setComment(f.Comment)
+ }
+ if isIncomplete {
+ if len(list) > 0 {
+ p.print(formfeed)
+ }
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// contains filtered or unexported methods")
+ }
+
+ }
+ p.print(unindent, formfeed, rbrace, token.RBRACE)
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
+ switch e.Op.Precedence() {
+ case 4:
+ has4 = true
+ case 5:
+ has5 = true
+ }
+
+ switch l := e.X.(type) {
+ case *ast.BinaryExpr:
+ if l.Op.Precedence() < e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *ast.ParenExpr and do nothing.
+ break
+ }
+ h4, h5, mp := walkBinary(l)
+ has4 = has4 || h4
+ has5 = has5 || h5
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+ }
+
+ switch r := e.Y.(type) {
+ case *ast.BinaryExpr:
+ if r.Op.Precedence() <= e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *ast.ParenExpr and do nothing.
+ break
+ }
+ h4, h5, mp := walkBinary(r)
+ has4 = has4 || h4
+ has5 = has5 || h5
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+
+ case *ast.StarExpr:
+ if e.Op == token.QUO { // `*/`
+ maxProblem = 5
+ }
+
+ case *ast.UnaryExpr:
+ switch e.Op.String() + r.Op.String() {
+ case "/*", "&&", "&^":
+ maxProblem = 5
+ case "++", "--":
+ if maxProblem < 4 {
+ maxProblem = 4
+ }
+ }
+ }
+ return
+}
+
+func cutoff(e *ast.BinaryExpr, depth int) int {
+ has4, has5, maxProblem := walkBinary(e)
+ if maxProblem > 0 {
+ return maxProblem + 1
+ }
+ if has4 && has5 {
+ if depth == 1 {
+ return 5
+ }
+ return 4
+ }
+ if depth == 1 {
+ return 6
+ }
+ return 4
+}
+
+func diffPrec(expr ast.Expr, prec int) int {
+ x, ok := expr.(*ast.BinaryExpr)
+ if !ok || prec != x.Op.Precedence() {
+ return 1
+ }
+ return 0
+}
+
+func reduceDepth(depth int) int {
+ depth--
+ if depth < 1 {
+ depth = 1
+ }
+ return depth
+}
+
+// Format the binary expression: decide the cutoff and then format.
+// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
+// (Algorithm suggestion by Russ Cox.)
+//
+// The precedences are:
+// 5 * / % << >> & &^
+// 4 + - | ^
+// 3 == != < <= > >=
+// 2 &&
+// 1 ||
+//
+// The only decision is whether there will be spaces around levels 4 and 5.
+// There are never spaces at level 6 (unary), and always spaces at levels 3 and below.
+//
+// To choose the cutoff, look at the whole expression but excluding primary
+// expressions (function calls, parenthesized exprs), and apply these rules:
+//
+// 1) If there is a binary operator with a right side unary operand
+// that would clash without a space, the cutoff must be (in order):
+//
+// /* 6
+// && 6
+// &^ 6
+// ++ 5
+// -- 5
+//
+// (Comparison operators always have spaces around them.)
+//
+// 2) If there is a mix of level 5 and level 4 operators, then the cutoff
+// is 5 (use spaces to distinguish precedence) in Normal mode
+// and 4 (never use spaces) in Compact mode.
+//
+// 3) If there are no level 4 operators or no level 5 operators, then the
+// cutoff is 6 (always use spaces) in Normal mode
+// and 4 (never use spaces) in Compact mode.
+//
+func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
+ prec := x.Op.Precedence()
+ if prec < prec1 {
+ // parenthesis needed
+ // Note: The parser inserts an ast.ParenExpr node; thus this case
+ // can only occur if the AST is created in a different way.
+ p.print(token.LPAREN)
+ p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
+ p.print(token.RPAREN)
+ return
+ }
+
+ printBlank := prec < cutoff
+
+ ws := indent
+ p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
+ if printBlank {
+ p.print(blank)
+ }
+ xline := p.pos.Line // before the operator (it may be on the next line!)
+ yline := p.lineFor(x.Y.Pos())
+ p.print(x.OpPos, x.Op)
+ if xline != yline && xline > 0 && yline > 0 {
+ // at least one line break, but respect an extra empty line
+ // in the source
+ if p.linebreak(yline, 1, ws, true) {
+ ws = ignore
+ printBlank = false // no blank after line break
+ }
+ }
+ if printBlank {
+ p.print(blank)
+ }
+ p.expr1(x.Y, prec+1, depth+1)
+ if ws == ignore {
+ p.print(unindent)
+ }
+}
+
+func isBinary(expr ast.Expr) bool {
+ _, ok := expr.(*ast.BinaryExpr)
+ return ok
+}
+
+func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
+ p.print(expr.Pos())
+
+ switch x := expr.(type) {
+ case *ast.BadExpr:
+ p.print("BadExpr")
+
+ case *ast.Ident:
+ p.print(x)
+
+ case *ast.BinaryExpr:
+ if depth < 1 {
+ p.internalError("depth < 1:", depth)
+ depth = 1
+ }
+ p.binaryExpr(x, prec1, cutoff(x, depth), depth)
+
+ case *ast.KeyValueExpr:
+ p.expr(x.Key)
+ p.print(x.Colon, token.COLON, blank)
+ p.expr(x.Value)
+
+ case *ast.StarExpr:
+ const prec = token.UnaryPrec
+ if prec < prec1 {
+ // parenthesis needed
+ p.print(token.LPAREN)
+ p.print(token.MUL)
+ p.expr(x.X)
+ p.print(token.RPAREN)
+ } else {
+ // no parenthesis needed
+ p.print(token.MUL)
+ p.expr(x.X)
+ }
+
+ case *ast.UnaryExpr:
+ const prec = token.UnaryPrec
+ if prec < prec1 {
+ // parenthesis needed
+ p.print(token.LPAREN)
+ p.expr(x)
+ p.print(token.RPAREN)
+ } else {
+ // no parenthesis needed
+ p.print(x.Op)
+ if x.Op == token.RANGE {
+ // TODO(gri) Remove this code if it cannot be reached.
+ p.print(blank)
+ }
+ p.expr1(x.X, prec, depth)
+ }
+
+ case *ast.BasicLit:
+ p.print(x)
+
+ case *ast.FuncLit:
+ p.expr(x.Type)
+ p.adjBlock(p.distanceFrom(x.Type.Pos()), blank, x.Body)
+
+ case *ast.ParenExpr:
+ if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
+ // don't print parentheses around an already parenthesized expression
+ // TODO(gri) consider making this more general and incorporate precedence levels
+ p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
+ } else {
+ p.print(token.LPAREN)
+ p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
+ p.print(x.Rparen, token.RPAREN)
+ }
+
+ case *ast.SelectorExpr:
+ p.expr1(x.X, token.HighestPrec, depth)
+ p.print(token.PERIOD)
+ if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
+ p.print(indent, newline, x.Sel.Pos(), x.Sel, unindent)
+ } else {
+ p.print(x.Sel.Pos(), x.Sel)
+ }
+
+ case *ast.TypeAssertExpr:
+ p.expr1(x.X, token.HighestPrec, depth)
+ p.print(token.PERIOD, x.Lparen, token.LPAREN)
+ if x.Type != nil {
+ p.expr(x.Type)
+ } else {
+ p.print(token.TYPE)
+ }
+ p.print(x.Rparen, token.RPAREN)
+
+ case *ast.IndexExpr:
+ // TODO(gri): should treat[] like parentheses and undo one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.print(x.Lbrack, token.LBRACK)
+ p.expr0(x.Index, depth+1)
+ p.print(x.Rbrack, token.RBRACK)
+
+ case *ast.SliceExpr:
+ // TODO(gri): should treat[] like parentheses and undo one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.print(x.Lbrack, token.LBRACK)
+ indices := []ast.Expr{x.Low, x.High}
+ if x.Max != nil {
+ indices = append(indices, x.Max)
+ }
+ for i, y := range indices {
+ if i > 0 {
+ // blanks around ":" if both sides exist and either side is a binary expression
+ // TODO(gri) once we have committed a variant of a[i:j:k] we may want to fine-
+ // tune the formatting here
+ x := indices[i-1]
+ if depth <= 1 && x != nil && y != nil && (isBinary(x) || isBinary(y)) {
+ p.print(blank, token.COLON, blank)
+ } else {
+ p.print(token.COLON)
+ }
+ }
+ if y != nil {
+ p.expr0(y, depth+1)
+ }
+ }
+ p.print(x.Rbrack, token.RBRACK)
+
+ case *ast.CallExpr:
+ if len(x.Args) > 1 {
+ depth++
+ }
+ if _, ok := x.Fun.(*ast.FuncType); ok {
+ // conversions to literal function types require parentheses around the type
+ p.print(token.LPAREN)
+ p.expr1(x.Fun, token.HighestPrec, depth)
+ p.print(token.RPAREN)
+ } else {
+ p.expr1(x.Fun, token.HighestPrec, depth)
+ }
+ p.print(x.Lparen, token.LPAREN)
+ if x.Ellipsis.IsValid() {
+ p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis)
+ p.print(x.Ellipsis, token.ELLIPSIS)
+ if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
+ p.print(token.COMMA, formfeed)
+ }
+ } else {
+ p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen)
+ }
+ p.print(x.Rparen, token.RPAREN)
+
+ case *ast.CompositeLit:
+ // composite literal elements that are composite literals themselves may have the type omitted
+ if x.Type != nil {
+ p.expr1(x.Type, token.HighestPrec, depth)
+ }
+ p.print(x.Lbrace, token.LBRACE)
+ p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace)
+ // do not insert extra line break following a /*-style comment
+ // before the closing '}' as it might break the code if there
+ // is no trailing ','
+ mode := noExtraLinebreak
+ // do not insert extra blank following a /*-style comment
+ // before the closing '}' unless the literal is empty
+ if len(x.Elts) > 0 {
+ mode |= noExtraBlank
+ }
+ p.print(mode, x.Rbrace, token.RBRACE, mode)
+
+ case *ast.Ellipsis:
+ p.print(token.ELLIPSIS)
+ if x.Elt != nil {
+ p.expr(x.Elt)
+ }
+
+ case *ast.ArrayType:
+ p.print(token.LBRACK)
+ if x.Len != nil {
+ p.expr(x.Len)
+ }
+ p.print(token.RBRACK)
+ p.expr(x.Elt)
+
+ case *ast.StructType:
+ p.print(token.STRUCT)
+ p.fieldList(x.Fields, true, x.Incomplete)
+
+ case *ast.FuncType:
+ p.print(token.FUNC)
+ p.signature(x.Params, x.Results)
+
+ case *ast.InterfaceType:
+ p.print(token.INTERFACE)
+ p.fieldList(x.Methods, false, x.Incomplete)
+
+ case *ast.MapType:
+ p.print(token.MAP, token.LBRACK)
+ p.expr(x.Key)
+ p.print(token.RBRACK)
+ p.expr(x.Value)
+
+ case *ast.ChanType:
+ switch x.Dir {
+ case ast.SEND | ast.RECV:
+ p.print(token.CHAN)
+ case ast.RECV:
+ p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same
+ case ast.SEND:
+ p.print(token.CHAN, x.Arrow, token.ARROW)
+ }
+ p.print(blank)
+ p.expr(x.Value)
+
+ default:
+ panic("unreachable")
+ }
+
+ return
+}
+
+func (p *printer) expr0(x ast.Expr, depth int) {
+ p.expr1(x, token.LowestPrec, depth)
+}
+
+func (p *printer) expr(x ast.Expr) {
+ const depth = 1
+ p.expr1(x, token.LowestPrec, depth)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// Print the statement list indented, but without a newline after the last statement.
+// Extra line breaks between statements in the source are respected but at most one
+// empty line is printed between statements.
+func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) {
+ if nindent > 0 {
+ p.print(indent)
+ }
+ var line int
+ i := 0
+ for _, s := range list {
+ // ignore empty statements (was issue 3466)
+ if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty {
+ // nindent == 0 only for lists of switch/select case clauses;
+ // in those cases each clause is a new section
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.stmt(s, nextIsRBrace && i == len(list)-1)
+ // labeled statements put labels on a separate line, but here
+ // we only care about the start line of the actual statement
+ // without label - correct line for each label
+ for t := s; ; {
+ lt, _ := t.(*ast.LabeledStmt)
+ if lt == nil {
+ break
+ }
+ line++
+ t = lt.Stmt
+ }
+ i++
+ }
+ }
+ if nindent > 0 {
+ p.print(unindent)
+ }
+}
+
+// block prints an *ast.BlockStmt; it always spans at least two lines.
+func (p *printer) block(b *ast.BlockStmt, nindent int) {
+ p.print(b.Lbrace, token.LBRACE)
+ p.stmtList(b.List, nindent, true)
+ p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true)
+ p.print(b.Rbrace, token.RBRACE)
+}
+
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.Ident:
+ return true
+ case *ast.SelectorExpr:
+ return isTypeName(t.X)
+ }
+ return false
+}
+
+func stripParens(x ast.Expr) ast.Expr {
+ if px, strip := x.(*ast.ParenExpr); strip {
+ // parentheses must not be stripped if there are any
+ // unparenthesized composite literals starting with
+ // a type name
+ ast.Inspect(px.X, func(node ast.Node) bool {
+ switch x := node.(type) {
+ case *ast.ParenExpr:
+ // parentheses protect enclosed composite literals
+ return false
+ case *ast.CompositeLit:
+ if isTypeName(x.Type) {
+ strip = false // do not strip parentheses
+ }
+ return false
+ }
+ // in all other cases, keep inspecting
+ return true
+ })
+ if strip {
+ return stripParens(px.X)
+ }
+ }
+ return x
+}
+
+func stripParensAlways(x ast.Expr) ast.Expr {
+ if x, ok := x.(*ast.ParenExpr); ok {
+ return stripParensAlways(x.X)
+ }
+ return x
+}
+
+func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
+ p.print(blank)
+ needsBlank := false
+ if init == nil && post == nil {
+ // no semicolons required
+ if expr != nil {
+ p.expr(stripParens(expr))
+ needsBlank = true
+ }
+ } else {
+ // all semicolons required
+ // (they are not separators, print them explicitly)
+ if init != nil {
+ p.stmt(init, false)
+ }
+ p.print(token.SEMICOLON, blank)
+ if expr != nil {
+ p.expr(stripParens(expr))
+ needsBlank = true
+ }
+ if isForStmt {
+ p.print(token.SEMICOLON, blank)
+ needsBlank = false
+ if post != nil {
+ p.stmt(post, false)
+ needsBlank = true
+ }
+ }
+ }
+ if needsBlank {
+ p.print(blank)
+ }
+}
+
+// indentList reports whether an expression list would look better if it
+// were indented wholesale (starting with the very first element, rather
+// than starting at the first line break).
+//
+func (p *printer) indentList(list []ast.Expr) bool {
+ // Heuristic: indentList returns true if there are more than one multi-
+ // line element in the list, or if there is any element that is not
+ // starting on the same line as the previous one ends.
+ if len(list) >= 2 {
+ var b = p.lineFor(list[0].Pos())
+ var e = p.lineFor(list[len(list)-1].End())
+ if 0 < b && b < e {
+ // list spans multiple lines
+ n := 0 // multi-line element count
+ line := b
+ for _, x := range list {
+ xb := p.lineFor(x.Pos())
+ xe := p.lineFor(x.End())
+ if line < xb {
+ // x is not starting on the same
+ // line as the previous one ended
+ return true
+ }
+ if xb < xe {
+ // x is a multi-line element
+ n++
+ }
+ line = xe
+ }
+ return n > 1
+ }
+ }
+ return false
+}
+
+func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
+ p.print(stmt.Pos())
+
+ switch s := stmt.(type) {
+ case *ast.BadStmt:
+ p.print("BadStmt")
+
+ case *ast.DeclStmt:
+ p.decl(s.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ // a "correcting" unindent immediately following a line break
+ // is applied before the line break if there is no comment
+ // between (see writeWhitespace)
+ p.print(unindent)
+ p.expr(s.Label)
+ p.print(s.Colon, token.COLON, indent)
+ if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
+ if !nextIsRBrace {
+ p.print(newline, e.Pos(), token.SEMICOLON)
+ break
+ }
+ } else {
+ p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
+ }
+ p.stmt(s.Stmt, nextIsRBrace)
+
+ case *ast.ExprStmt:
+ const depth = 1
+ p.expr0(s.X, depth)
+
+ case *ast.SendStmt:
+ const depth = 1
+ p.expr0(s.Chan, depth)
+ p.print(blank, s.Arrow, token.ARROW, blank)
+ p.expr0(s.Value, depth)
+
+ case *ast.IncDecStmt:
+ const depth = 1
+ p.expr0(s.X, depth+1)
+ p.print(s.TokPos, s.Tok)
+
+ case *ast.AssignStmt:
+ var depth = 1
+ if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
+ depth++
+ }
+ p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos)
+ p.print(blank, s.TokPos, s.Tok, blank)
+ p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos)
+
+ case *ast.GoStmt:
+ p.print(token.GO, blank)
+ p.expr(s.Call)
+
+ case *ast.DeferStmt:
+ p.print(token.DEFER, blank)
+ p.expr(s.Call)
+
+ case *ast.ReturnStmt:
+ p.print(token.RETURN)
+ if s.Results != nil {
+ p.print(blank)
+ // Use indentList heuristic to make corner cases look
+ // better (issue 1207). A more systematic approach would
+ // always indent, but this would cause significant
+ // reformatting of the code base and not necessarily
+ // lead to more nicely formatted code in general.
+ if p.indentList(s.Results) {
+ p.print(indent)
+ p.exprList(s.Pos(), s.Results, 1, noIndent, token.NoPos)
+ p.print(unindent)
+ } else {
+ p.exprList(s.Pos(), s.Results, 1, 0, token.NoPos)
+ }
+ }
+
+ case *ast.BranchStmt:
+ p.print(s.Tok)
+ if s.Label != nil {
+ p.print(blank)
+ p.expr(s.Label)
+ }
+
+ case *ast.BlockStmt:
+ p.block(s, 1)
+
+ case *ast.IfStmt:
+ p.print(token.IF)
+ p.controlClause(false, s.Init, s.Cond, nil)
+ p.block(s.Body, 1)
+ if s.Else != nil {
+ p.print(blank, token.ELSE, blank)
+ switch s.Else.(type) {
+ case *ast.BlockStmt, *ast.IfStmt:
+ p.stmt(s.Else, nextIsRBrace)
+ default:
+ p.print(token.LBRACE, indent, formfeed)
+ p.stmt(s.Else, true)
+ p.print(unindent, formfeed, token.RBRACE)
+ }
+ }
+
+ case *ast.CaseClause:
+ if s.List != nil {
+ p.print(token.CASE, blank)
+ p.exprList(s.Pos(), s.List, 1, 0, s.Colon)
+ } else {
+ p.print(token.DEFAULT)
+ }
+ p.print(s.Colon, token.COLON)
+ p.stmtList(s.Body, 1, nextIsRBrace)
+
+ case *ast.SwitchStmt:
+ p.print(token.SWITCH)
+ p.controlClause(false, s.Init, s.Tag, nil)
+ p.block(s.Body, 0)
+
+ case *ast.TypeSwitchStmt:
+ p.print(token.SWITCH)
+ if s.Init != nil {
+ p.print(blank)
+ p.stmt(s.Init, false)
+ p.print(token.SEMICOLON)
+ }
+ p.print(blank)
+ p.stmt(s.Assign, false)
+ p.print(blank)
+ p.block(s.Body, 0)
+
+ case *ast.CommClause:
+ if s.Comm != nil {
+ p.print(token.CASE, blank)
+ p.stmt(s.Comm, false)
+ } else {
+ p.print(token.DEFAULT)
+ }
+ p.print(s.Colon, token.COLON)
+ p.stmtList(s.Body, 1, nextIsRBrace)
+
+ case *ast.SelectStmt:
+ p.print(token.SELECT, blank)
+ body := s.Body
+ if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) {
+ // print empty select statement w/o comments on one line
+ p.print(body.Lbrace, token.LBRACE, body.Rbrace, token.RBRACE)
+ } else {
+ p.block(body, 0)
+ }
+
+ case *ast.ForStmt:
+ p.print(token.FOR)
+ p.controlClause(true, s.Init, s.Cond, s.Post)
+ p.block(s.Body, 1)
+
+ case *ast.RangeStmt:
+ p.print(token.FOR, blank)
+ if s.Key != nil {
+ p.expr(s.Key)
+ if s.Value != nil {
+ // use position of value following the comma as
+ // comma position for correct comment placement
+ p.print(s.Value.Pos(), token.COMMA, blank)
+ p.expr(s.Value)
+ }
+ p.print(blank, s.TokPos, s.Tok, blank)
+ }
+ p.print(token.RANGE, blank)
+ p.expr(stripParens(s.X))
+ p.print(blank)
+ p.block(s.Body, 1)
+
+ default:
+ panic("unreachable")
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// The keepTypeColumn function determines if the type column of a series of
+// consecutive const or var declarations must be kept, or if initialization
+// values (V) can be placed in the type column (T) instead. The i'th entry
+// in the result slice is true if the type column in spec[i] must be kept.
+//
+// For example, the declaration:
+//
+// const (
+// foobar int = 42 // comment
+// x = 7 // comment
+// foo
+// bar = 991
+// )
+//
+// leads to the type/values matrix below. A run of value columns (V) can
+// be moved into the type column if there is no type for any of the values
+// in that column (we only move entire columns so that they align properly).
+//
+// matrix formatted result
+// matrix
+// T V -> T V -> true there is a T and so the type
+// - V - V true column must be kept
+// - - - - false
+// - V V - false V is moved into T column
+//
+func keepTypeColumn(specs []ast.Spec) []bool {
+ m := make([]bool, len(specs))
+
+ populate := func(i, j int, keepType bool) {
+ if keepType {
+ for ; i < j; i++ {
+ m[i] = true
+ }
+ }
+ }
+
+ i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run
+ var keepType bool
+ for i, s := range specs {
+ t := s.(*ast.ValueSpec)
+ if t.Values != nil {
+ if i0 < 0 {
+ // start of a run of ValueSpecs with non-nil Values
+ i0 = i
+ keepType = false
+ }
+ } else {
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, i, keepType)
+ i0 = -1
+ }
+ }
+ if t.Type != nil {
+ keepType = true
+ }
+ }
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, len(specs), keepType)
+ }
+
+ return m
+}
+
+func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
+ p.setComment(s.Doc)
+ p.identList(s.Names, false) // always present
+ extraTabs := 3
+ if s.Type != nil || keepType {
+ p.print(vtab)
+ extraTabs--
+ }
+ if s.Type != nil {
+ p.expr(s.Type)
+ }
+ if s.Values != nil {
+ p.print(vtab, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
+ extraTabs--
+ }
+ if s.Comment != nil {
+ for ; extraTabs > 0; extraTabs-- {
+ p.print(vtab)
+ }
+ p.setComment(s.Comment)
+ }
+}
+
+// The parameter n is the number of specs in the group. If doIndent is set,
+// multi-line identifier lists in the spec are indented when the first
+// linebreak is encountered.
+//
+func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ p.setComment(s.Doc)
+ if s.Name != nil {
+ p.expr(s.Name)
+ p.print(blank)
+ }
+ p.expr(s.Path)
+ p.setComment(s.Comment)
+ p.print(s.EndPos)
+
+ case *ast.ValueSpec:
+ if n != 1 {
+ p.internalError("expected n = 1; got", n)
+ }
+ p.setComment(s.Doc)
+ p.identList(s.Names, doIndent) // always present
+ if s.Type != nil {
+ p.print(blank)
+ p.expr(s.Type)
+ }
+ if s.Values != nil {
+ p.print(blank, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos)
+ }
+ p.setComment(s.Comment)
+
+ case *ast.TypeSpec:
+ p.setComment(s.Doc)
+ p.expr(s.Name)
+ if n == 1 {
+ p.print(blank)
+ } else {
+ p.print(vtab)
+ }
+ p.expr(s.Type)
+ p.setComment(s.Comment)
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (p *printer) genDecl(d *ast.GenDecl) {
+ p.setComment(d.Doc)
+ p.print(d.Pos(), d.Tok, blank)
+
+ if d.Lparen.IsValid() {
+ // group of parenthesized declarations
+ p.print(d.Lparen, token.LPAREN)
+ if n := len(d.Specs); n > 0 {
+ p.print(indent, formfeed)
+ if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
+ // two or more grouped const/var declarations:
+ // determine if the type column must be kept
+ keepType := keepTypeColumn(d.Specs)
+ var line int
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.valueSpec(s.(*ast.ValueSpec), keepType[i])
+ }
+ } else {
+ var line int
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.spec(s, n, false)
+ }
+ }
+ p.print(unindent, formfeed)
+ }
+ p.print(d.Rparen, token.RPAREN)
+
+ } else {
+ // single declaration
+ p.spec(d.Specs[0], 1, true)
+ }
+}
+
+// nodeSize determines the size of n in chars after formatting.
+// The result is <= maxSize if the node fits on one line with at
+// most maxSize chars and the formatted output doesn't contain
+// any control chars. Otherwise, the result is > maxSize.
+//
+func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
+ // nodeSize invokes the printer, which may invoke nodeSize
+ // recursively. For deep composite literal nests, this can
+ // lead to an exponential algorithm. Remember previous
+ // results to prune the recursion (was issue 1628).
+ if size, found := p.nodeSizes[n]; found {
+ return size
+ }
+
+ size = maxSize + 1 // assume n doesn't fit
+ p.nodeSizes[n] = size
+
+ // nodeSize computation must be independent of particular
+ // style so that we always get the same decision; print
+ // in RawFormat
+ cfg := Config{Mode: RawFormat}
+ var buf bytes.Buffer
+ if err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil {
+ return
+ }
+ if buf.Len() <= maxSize {
+ for _, ch := range buf.Bytes() {
+ if ch < ' ' {
+ return
+ }
+ }
+ size = buf.Len() // n fits
+ p.nodeSizes[n] = size
+ }
+ return
+}
+
+// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's.
+func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int {
+ pos1 := b.Pos()
+ pos2 := b.Rbrace
+ if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
+ // opening and closing brace are on different lines - don't make it a one-liner
+ return maxSize + 1
+ }
+ if len(b.List) > 5 {
+ // too many statements - don't make it a one-liner
+ return maxSize + 1
+ }
+ // otherwise, estimate body size
+ bodySize := p.commentSizeBefore(p.posFor(pos2))
+ for i, s := range b.List {
+ if bodySize > maxSize {
+ break // no need to continue
+ }
+ if i > 0 {
+ bodySize += 2 // space for a semicolon and blank
+ }
+ bodySize += p.nodeSize(s, maxSize)
+ }
+ return bodySize
+}
+
+// adjBlock prints an "adjacent" block (e.g., a for-loop or function body) following
+// a header (e.g., a for-loop control clause or function signature) of given headerSize.
+// If the header's and block's size are "small enough" and the block is "simple enough",
+// the block is printed on the current line, without line breaks, spaced from the header
+// by sep. Otherwise the block's opening "{" is printed on the current line, followed by
+// lines for the block's statements and its closing "}".
+//
+func (p *printer) adjBlock(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
+ if b == nil {
+ return
+ }
+
+ const maxSize = 100
+ if headerSize+p.bodySize(b, maxSize) <= maxSize {
+ p.print(sep, b.Lbrace, token.LBRACE)
+ if len(b.List) > 0 {
+ p.print(blank)
+ for i, s := range b.List {
+ if i > 0 {
+ p.print(token.SEMICOLON, blank)
+ }
+ p.stmt(s, i == len(b.List)-1)
+ }
+ p.print(blank)
+ }
+ p.print(noExtraLinebreak, b.Rbrace, token.RBRACE, noExtraLinebreak)
+ return
+ }
+
+ if sep != ignore {
+ p.print(blank) // always use blank
+ }
+ p.block(b, 1)
+}
+
+// distanceFrom returns the column difference between from and p.pos (the current
+// estimated position) if both are on the same line; if they are on different lines
+// (or unknown) the result is infinity.
+func (p *printer) distanceFrom(from token.Pos) int {
+ if from.IsValid() && p.pos.IsValid() {
+ if f := p.posFor(from); f.Line == p.pos.Line {
+ return p.pos.Column - f.Column
+ }
+ }
+ return infinity
+}
+
+func (p *printer) funcDecl(d *ast.FuncDecl) {
+ p.setComment(d.Doc)
+ p.print(d.Pos(), token.FUNC, blank)
+ if d.Recv != nil {
+ p.parameters(d.Recv) // method: print receiver
+ p.print(blank)
+ }
+ p.expr(d.Name)
+ p.signature(d.Type.Params, d.Type.Results)
+ p.adjBlock(p.distanceFrom(d.Pos()), vtab, d.Body)
+}
+
+func (p *printer) decl(decl ast.Decl) {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ p.print(d.Pos(), "BadDecl")
+ case *ast.GenDecl:
+ p.genDecl(d)
+ case *ast.FuncDecl:
+ p.funcDecl(d)
+ default:
+ panic("unreachable")
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Files
+
+func declToken(decl ast.Decl) (tok token.Token) {
+ tok = token.ILLEGAL
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ tok = d.Tok
+ case *ast.FuncDecl:
+ tok = token.FUNC
+ }
+ return
+}
+
+func (p *printer) declList(list []ast.Decl) {
+ tok := token.ILLEGAL
+ for _, d := range list {
+ prev := tok
+ tok = declToken(d)
+ // If the declaration token changed (e.g., from CONST to TYPE)
+ // or the next declaration has documentation associated with it,
+ // print an empty line between top-level declarations.
+ // (because p.linebreak is called with the position of d, which
+ // is past any documentation, the minimum requirement is satisfied
+ // even w/o the extra getDoc(d) nil-check - leave it in case the
+ // linebreak logic improves - there's already a TODO).
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
+ min := 1
+ if prev != tok || getDoc(d) != nil {
+ min = 2
+ }
+ p.linebreak(p.lineFor(d.Pos()), min, ignore, false)
+ }
+ p.decl(d)
+ }
+}
+
+func (p *printer) file(src *ast.File) {
+ p.setComment(src.Doc)
+ p.print(src.Pos(), token.PACKAGE, blank)
+ p.expr(src.Name)
+ p.declList(src.Decls)
+ p.print(newline)
+}
diff --git a/src/go/printer/performance_test.go b/src/go/printer/performance_test.go
new file mode 100644
index 000000000..5b29affcb
--- /dev/null
+++ b/src/go/printer/performance_test.go
@@ -0,0 +1,58 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a simple printer performance benchmark:
+// go test -bench=BenchmarkPrint
+
+package printer
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "io"
+ "io/ioutil"
+ "log"
+ "testing"
+)
+
+var testfile *ast.File
+
+func testprint(out io.Writer, file *ast.File) {
+ if err := (&Config{TabIndent | UseSpaces, 8, 0}).Fprint(out, fset, file); err != nil {
+ log.Fatalf("print error: %s", err)
+ }
+}
+
+// cannot initialize in init because (printer) Fprint launches goroutines.
+func initialize() {
+ const filename = "testdata/parser.go"
+
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ var buf bytes.Buffer
+ testprint(&buf, file)
+ if !bytes.Equal(buf.Bytes(), src) {
+ log.Fatalf("print error: %s not idempotent", filename)
+ }
+
+ testfile = file
+}
+
+func BenchmarkPrint(b *testing.B) {
+ if testfile == nil {
+ initialize()
+ }
+ for i := 0; i < b.N; i++ {
+ testprint(ioutil.Discard, testfile)
+ }
+}
diff --git a/src/go/printer/printer.go b/src/go/printer/printer.go
new file mode 100644
index 000000000..280c697a0
--- /dev/null
+++ b/src/go/printer/printer.go
@@ -0,0 +1,1292 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package printer implements printing of AST nodes.
+package printer
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+ "unicode"
+)
+
+const (
+ maxNewlines = 2 // max. number of newlines between source text
+ debug = false // enable for debugging
+ infinity = 1 << 30
+)
+
+type whiteSpace byte
+
+const (
+ ignore = whiteSpace(0)
+ blank = whiteSpace(' ')
+ vtab = whiteSpace('\v')
+ newline = whiteSpace('\n')
+ formfeed = whiteSpace('\f')
+ indent = whiteSpace('>')
+ unindent = whiteSpace('<')
+)
+
+// A pmode value represents the current printer mode.
+type pmode int
+
+const (
+ noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment
+ noExtraLinebreak // disables extra line break after /*-style comment
+)
+
+type commentInfo struct {
+ cindex int // current comment index
+ comment *ast.CommentGroup // = printer.comments[cindex]; or nil
+ commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity
+ commentNewline bool // true if the comment group contains newlines
+}
+
+type printer struct {
+ // Configuration (does not change after initialization)
+ Config
+ fset *token.FileSet
+
+ // Current state
+ output []byte // raw printer result
+ indent int // current indentation
+ mode pmode // current printer mode
+ impliedSemi bool // if set, a linebreak implies a semicolon
+ lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace)
+ prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL
+ wsbuf []whiteSpace // delayed white space
+
+ // Positions
+ // The out position differs from the pos position when the result
+ // formatting differs from the source formatting (in the amount of
+ // white space). If there's a difference and SourcePos is set in
+ // ConfigMode, //line comments are used in the output to restore
+ // original source positions for a reader.
+ pos token.Position // current position in AST (source) space
+ out token.Position // current position in output space
+ last token.Position // value of pos after calling writeString
+ linePtr *int // if set, record out.Line for the next token in *linePtr
+
+ // The list of all source comments, in order of appearance.
+ comments []*ast.CommentGroup // may be nil
+ useNodeComments bool // if not set, ignore lead and line comments of nodes
+
+ // Information about p.comments[p.cindex]; set up by nextComment.
+ commentInfo
+
+ // Cache of already computed node sizes.
+ nodeSizes map[ast.Node]int
+
+ // Cache of most recently computed line position.
+ cachedPos token.Pos
+ cachedLine int // line corresponding to cachedPos
+}
+
+func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
+ p.Config = *cfg
+ p.fset = fset
+ p.pos = token.Position{Line: 1, Column: 1}
+ p.out = token.Position{Line: 1, Column: 1}
+ p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
+ p.nodeSizes = nodeSizes
+ p.cachedPos = -1
+}
+
+func (p *printer) internalError(msg ...interface{}) {
+ if debug {
+ fmt.Print(p.pos.String() + ": ")
+ fmt.Println(msg...)
+ panic("go/printer")
+ }
+}
+
+// commentsHaveNewline reports whether a list of comments belonging to
+// an *ast.CommentGroup contains newlines. Because the position information
+// may only be partially correct, we also have to read the comment text.
+func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
+ // len(list) > 0
+ line := p.lineFor(list[0].Pos())
+ for i, c := range list {
+ if i > 0 && p.lineFor(list[i].Pos()) != line {
+ // not all comments on the same line
+ return true
+ }
+ if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
+ return true
+ }
+ }
+ _ = line
+ return false
+}
+
+func (p *printer) nextComment() {
+ for p.cindex < len(p.comments) {
+ c := p.comments[p.cindex]
+ p.cindex++
+ if list := c.List; len(list) > 0 {
+ p.comment = c
+ p.commentOffset = p.posFor(list[0].Pos()).Offset
+ p.commentNewline = p.commentsHaveNewline(list)
+ return
+ }
+ // we should not reach here (correct ASTs don't have empty
+ // ast.CommentGroup nodes), but be conservative and try again
+ }
+ // no more comments
+ p.commentOffset = infinity
+}
+
+// commentBefore returns true iff the current comment group occurs
+// before the next position in the source code and printing it does
+// not introduce implicit semicolons.
+//
+func (p *printer) commentBefore(next token.Position) bool {
+ return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
+}
+
+// commentSizeBefore returns the estimated size of the
+// comments on the same line before the next position.
+//
+func (p *printer) commentSizeBefore(next token.Position) int {
+ // save/restore current p.commentInfo (p.nextComment() modifies it)
+ defer func(info commentInfo) {
+ p.commentInfo = info
+ }(p.commentInfo)
+
+ size := 0
+ for p.commentBefore(next) {
+ for _, c := range p.comment.List {
+ size += len(c.Text)
+ }
+ p.nextComment()
+ }
+ return size
+}
+
+// recordLine records the output line number for the next non-whitespace
+// token in *linePtr. It is used to compute an accurate line number for a
+// formatted construct, independent of pending (not yet emitted) whitespace
+// or comments.
+//
+func (p *printer) recordLine(linePtr *int) {
+ p.linePtr = linePtr
+}
+
+// linesFrom returns the number of output lines between the current
+// output line and the line argument, ignoring any pending (not yet
+// emitted) whitespace or comments. It is used to compute an accurate
+// size (in number of lines) for a formatted construct.
+//
+func (p *printer) linesFrom(line int) int {
+ return p.out.Line - line
+}
+
+func (p *printer) posFor(pos token.Pos) token.Position {
+ // not used frequently enough to cache entire token.Position
+ return p.fset.Position(pos)
+}
+
+func (p *printer) lineFor(pos token.Pos) int {
+ if pos != p.cachedPos {
+ p.cachedPos = pos
+ p.cachedLine = p.fset.Position(pos).Line
+ }
+ return p.cachedLine
+}
+
+// atLineBegin emits a //line comment if necessary and prints indentation.
+func (p *printer) atLineBegin(pos token.Position) {
+ // write a //line comment if necessary
+ if p.Config.Mode&SourcePos != 0 && pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
+ p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
+ p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
+ p.output = append(p.output, tabwriter.Escape)
+ // p.out must match the //line comment
+ p.out.Filename = pos.Filename
+ p.out.Line = pos.Line
+ }
+
+ // write indentation
+ // use "hard" htabs - indentation columns
+ // must not be discarded by the tabwriter
+ n := p.Config.Indent + p.indent // include base indentation
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, '\t')
+ }
+
+ // update positions
+ p.pos.Offset += n
+ p.pos.Column += n
+ p.out.Column += n
+}
+
+// writeByte writes ch n times to p.output and updates p.pos.
+func (p *printer) writeByte(ch byte, n int) {
+ if p.out.Column == 1 {
+ p.atLineBegin(p.pos)
+ }
+
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, ch)
+ }
+
+ // update positions
+ p.pos.Offset += n
+ if ch == '\n' || ch == '\f' {
+ p.pos.Line += n
+ p.out.Line += n
+ p.pos.Column = 1
+ p.out.Column = 1
+ return
+ }
+ p.pos.Column += n
+ p.out.Column += n
+}
+
+// writeString writes the string s to p.output and updates p.pos, p.out,
+// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
+// to protect s from being interpreted by the tabwriter.
+//
+// Note: writeString is only used to write Go tokens, literals, and
+// comments, all of which must be written literally. Thus, it is correct
+// to always set isLit = true. However, setting it explicitly only when
+// needed (i.e., when we don't know that s contains no tabs or line breaks)
+// avoids processing extra escape characters and reduces run time of the
+// printer benchmark by up to 10%.
+//
+func (p *printer) writeString(pos token.Position, s string, isLit bool) {
+ if p.out.Column == 1 {
+ p.atLineBegin(pos)
+ }
+
+ if pos.IsValid() {
+ // update p.pos (if pos is invalid, continue with existing p.pos)
+ // Note: Must do this after handling line beginnings because
+ // atLineBegin updates p.pos if there's indentation, but p.pos
+ // is the position of s.
+ p.pos = pos
+ }
+
+ if isLit {
+ // Protect s such that is passes through the tabwriter
+ // unchanged. Note that valid Go programs cannot contain
+ // tabwriter.Escape bytes since they do not appear in legal
+ // UTF-8 sequences.
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
+ if debug {
+ p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
+ }
+ p.output = append(p.output, s...)
+
+ // update positions
+ nlines := 0
+ var li int // index of last newline; valid if nlines > 0
+ for i := 0; i < len(s); i++ {
+ // Go tokens cannot contain '\f' - no need to look for it
+ if s[i] == '\n' {
+ nlines++
+ li = i
+ }
+ }
+ p.pos.Offset += len(s)
+ if nlines > 0 {
+ p.pos.Line += nlines
+ p.out.Line += nlines
+ c := len(s) - li
+ p.pos.Column = c
+ p.out.Column = c
+ } else {
+ p.pos.Column += len(s)
+ p.out.Column += len(s)
+ }
+
+ if isLit {
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
+ p.last = p.pos
+}
+
+// writeCommentPrefix writes the whitespace before a comment.
+// If there is any pending whitespace, it consumes as much of
+// it as is likely to help position the comment nicely.
+// pos is the comment position, next the position of the item
+// after all pending comments, prev is the previous comment in
+// a group of comments (or nil), and tok is the next token.
+//
+func (p *printer) writeCommentPrefix(pos, next token.Position, prev, comment *ast.Comment, tok token.Token) {
+ if len(p.output) == 0 {
+ // the comment is the first item to be printed - don't write any whitespace
+ return
+ }
+
+ if pos.IsValid() && pos.Filename != p.last.Filename {
+ // comment in a different file - separate with newlines
+ p.writeByte('\f', maxNewlines)
+ return
+ }
+
+ if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
+ // comment on the same line as last item:
+ // separate with at least one separator
+ hasSep := false
+ if prev == nil {
+ // first comment of a comment group
+ j := 0
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank:
+ // ignore any blanks before a comment
+ p.wsbuf[i] = ignore
+ continue
+ case vtab:
+ // respect existing tabs - important
+ // for proper formatting of commented structs
+ hasSep = true
+ continue
+ case indent:
+ // apply pending indentation
+ continue
+ }
+ j = i
+ break
+ }
+ p.writeWhitespace(j)
+ }
+ // make sure there is at least one separator
+ if !hasSep {
+ sep := byte('\t')
+ if pos.Line == next.Line {
+ // next item is on the same line as the comment
+ // (which must be a /*-style comment): separate
+ // with a blank instead of a tab
+ sep = ' '
+ }
+ p.writeByte(sep, 1)
+ }
+
+ } else {
+ // comment on a different line:
+ // separate with at least one line break
+ droppedLinebreak := false
+ j := 0
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank, vtab:
+ // ignore any horizontal whitespace before line breaks
+ p.wsbuf[i] = ignore
+ continue
+ case indent:
+ // apply pending indentation
+ continue
+ case unindent:
+ // if this is not the last unindent, apply it
+ // as it is (likely) belonging to the last
+ // construct (e.g., a multi-line expression list)
+ // and is not part of closing a block
+ if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
+ continue
+ }
+ // if the next token is not a closing }, apply the unindent
+ // if it appears that the comment is aligned with the
+ // token; otherwise assume the unindent is part of a
+ // closing block and stop (this scenario appears with
+ // comments before a case label where the comments
+ // apply to the next case instead of the current one)
+ if tok != token.RBRACE && pos.Column == next.Column {
+ continue
+ }
+ case newline, formfeed:
+ p.wsbuf[i] = ignore
+ droppedLinebreak = prev == nil // record only if first comment of a group
+ }
+ j = i
+ break
+ }
+ p.writeWhitespace(j)
+
+ // determine number of linebreaks before the comment
+ n := 0
+ if pos.IsValid() && p.last.IsValid() {
+ n = pos.Line - p.last.Line
+ if n < 0 { // should never happen
+ n = 0
+ }
+ }
+
+ // at the package scope level only (p.indent == 0),
+ // add an extra newline if we dropped one before:
+ // this preserves a blank line before documentation
+ // comments at the package scope level (issue 2570)
+ if p.indent == 0 && droppedLinebreak {
+ n++
+ }
+
+ // make sure there is at least one line break
+ // if the previous comment was a line comment
+ if n == 0 && prev != nil && prev.Text[1] == '/' {
+ n = 1
+ }
+
+ if n > 0 {
+ // use formfeeds to break columns before a comment;
+ // this is analogous to using formfeeds to separate
+ // individual lines of /*-style comments
+ p.writeByte('\f', nlimit(n))
+ }
+ }
+}
+
+// Returns true if s contains only white space
+// (only tabs and blanks can appear in the printer's context).
+//
+func isBlank(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] > ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+// commonPrefix returns the common prefix of a and b.
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
+ i++
+ }
+ return a[0:i]
+}
+
+// trimRight returns s with trailing whitespace removed.
+func trimRight(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
+// comment line is indented, all but the first line have some form of space prefix).
+// The prefix is computed using heuristics such that is likely that the comment
+// contents are nicely laid out after re-printing each line using the printer's
+// current indentation.
+//
+func stripCommonPrefix(lines []string) {
+ if len(lines) <= 1 {
+ return // at most one line - nothing to do
+ }
+ // len(lines) > 1
+
+ // The heuristic in this function tries to handle a few
+ // common patterns of /*-style comments: Comments where
+ // the opening /* and closing */ are aligned and the
+ // rest of the comment text is aligned and indented with
+ // blanks or tabs, cases with a vertical "line of stars"
+ // on the left, and cases where the closing */ is on the
+ // same line as the last comment text.
+
+ // Compute maximum common white prefix of all but the first,
+ // last, and blank lines, and replace blank lines with empty
+ // lines (the first line starts with /* and has no prefix).
+ // In case of two-line comments, consider the last line for
+ // the prefix computation since otherwise the prefix would
+ // be empty.
+ //
+ // Note that the first and last line are never empty (they
+ // contain the opening /* and closing */ respectively) and
+ // thus they can be ignored by the blank line check.
+ var prefix string
+ if len(lines) > 2 {
+ first := true
+ for i, line := range lines[1 : len(lines)-1] {
+ switch {
+ case isBlank(line):
+ lines[1+i] = "" // range starts with lines[1]
+ case first:
+ prefix = commonPrefix(line, line)
+ first = false
+ default:
+ prefix = commonPrefix(prefix, line)
+ }
+ }
+ } else { // len(lines) == 2, lines cannot be blank (contain /* and */)
+ line := lines[1]
+ prefix = commonPrefix(line, line)
+ }
+
+ /*
+ * Check for vertical "line of stars" and correct prefix accordingly.
+ */
+ lineOfStars := false
+ if i := strings.Index(prefix, "*"); i >= 0 {
+ // Line of stars present.
+ if i > 0 && prefix[i-1] == ' ' {
+ i-- // remove trailing blank from prefix so stars remain aligned
+ }
+ prefix = prefix[0:i]
+ lineOfStars = true
+ } else {
+ // No line of stars present.
+ // Determine the white space on the first line after the /*
+ // and before the beginning of the comment text, assume two
+ // blanks instead of the /* unless the first character after
+ // the /* is a tab. If the first comment line is empty but
+ // for the opening /*, assume up to 3 blanks or a tab. This
+ // whitespace may be found as suffix in the common prefix.
+ first := lines[0]
+ if isBlank(first[2:]) {
+ // no comment text on the first line:
+ // reduce prefix by up to 3 blanks or a tab
+ // if present - this keeps comment text indented
+ // relative to the /* and */'s if it was indented
+ // in the first place
+ i := len(prefix)
+ for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {
+ i--
+ }
+ if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {
+ i--
+ }
+ prefix = prefix[0:i]
+ } else {
+ // comment text on the first line
+ suffix := make([]byte, len(first))
+ n := 2 // start after opening /*
+ for n < len(first) && first[n] <= ' ' {
+ suffix[n] = first[n]
+ n++
+ }
+ if n > 2 && suffix[2] == '\t' {
+ // assume the '\t' compensates for the /*
+ suffix = suffix[2:n]
+ } else {
+ // otherwise assume two blanks
+ suffix[0], suffix[1] = ' ', ' '
+ suffix = suffix[0:n]
+ }
+ // Shorten the computed common prefix by the length of
+ // suffix, if it is found as suffix of the prefix.
+ prefix = strings.TrimSuffix(prefix, string(suffix))
+ }
+ }
+
+ // Handle last line: If it only contains a closing */, align it
+ // with the opening /*, otherwise align the text with the other
+ // lines.
+ last := lines[len(lines)-1]
+ closing := "*/"
+ i := strings.Index(last, closing) // i >= 0 (closing is always present)
+ if isBlank(last[0:i]) {
+ // last line only contains closing */
+ if lineOfStars {
+ closing = " */" // add blank to align final star
+ }
+ lines[len(lines)-1] = prefix + closing
+ } else {
+ // last line contains more comment text - assume
+ // it is aligned like the other lines and include
+ // in prefix computation
+ prefix = commonPrefix(prefix, last)
+ }
+
+ // Remove the common prefix from all but the first and empty lines.
+ for i, line := range lines {
+ if i > 0 && line != "" {
+ lines[i] = line[len(prefix):]
+ }
+ }
+}
+
+func (p *printer) writeComment(comment *ast.Comment) {
+ text := comment.Text
+ pos := p.posFor(comment.Pos())
+
+ const linePrefix = "//line "
+ if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
+ // possibly a line directive
+ ldir := strings.TrimSpace(text[len(linePrefix):])
+ if i := strings.LastIndex(ldir, ":"); i >= 0 {
+ if line, err := strconv.Atoi(ldir[i+1:]); err == nil && line > 0 {
+ // The line directive we are about to print changed
+ // the Filename and Line number used for subsequent
+ // tokens. We have to update our AST-space position
+ // accordingly and suspend indentation temporarily.
+ indent := p.indent
+ p.indent = 0
+ defer func() {
+ p.pos.Filename = ldir[:i]
+ p.pos.Line = line
+ p.pos.Column = 1
+ p.indent = indent
+ }()
+ }
+ }
+ }
+
+ // shortcut common case of //-style comments
+ if text[1] == '/' {
+ p.writeString(pos, trimRight(text), true)
+ return
+ }
+
+ // for /*-style comments, print line by line and let the
+ // write function take care of the proper indentation
+ lines := strings.Split(text, "\n")
+
+ // The comment started in the first column but is going
+ // to be indented. For an idempotent result, add indentation
+ // to all lines such that they look like they were indented
+ // before - this will make sure the common prefix computation
+ // is the same independent of how many times formatting is
+ // applied (was issue 1835).
+ if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
+ for i, line := range lines[1:] {
+ lines[1+i] = " " + line
+ }
+ }
+
+ stripCommonPrefix(lines)
+
+ // write comment lines, separated by formfeed,
+ // without a line break after the last line
+ for i, line := range lines {
+ if i > 0 {
+ p.writeByte('\f', 1)
+ pos = p.pos
+ }
+ if len(line) > 0 {
+ p.writeString(pos, trimRight(line), true)
+ }
+ }
+}
+
+// writeCommentSuffix writes a line break after a comment if indicated
+// and processes any leftover indentation information. If a line break
+// is needed, the kind of break (newline vs formfeed) depends on the
+// pending whitespace. The writeCommentSuffix result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+//
+func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank, vtab:
+ // ignore trailing whitespace
+ p.wsbuf[i] = ignore
+ case indent, unindent:
+ // don't lose indentation information
+ case newline, formfeed:
+ // if we need a line break, keep exactly one
+ // but remember if we dropped any formfeeds
+ if needsLinebreak {
+ needsLinebreak = false
+ wroteNewline = true
+ } else {
+ if ch == formfeed {
+ droppedFF = true
+ }
+ p.wsbuf[i] = ignore
+ }
+ }
+ }
+ p.writeWhitespace(len(p.wsbuf))
+
+ // make sure we have a line break
+ if needsLinebreak {
+ p.writeByte('\n', 1)
+ wroteNewline = true
+ }
+
+ return
+}
+
+// intersperseComments consumes all comments that appear before the next token
+// tok and prints it together with the buffered whitespace (i.e., the whitespace
+// that needs to be written before the next token). A heuristic is used to mix
+// the comments and whitespace. The intersperseComments result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace buffer.
+//
+func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+ var last *ast.Comment
+ for p.commentBefore(next) {
+ for _, c := range p.comment.List {
+ p.writeCommentPrefix(p.posFor(c.Pos()), next, last, c, tok)
+ p.writeComment(c)
+ last = c
+ }
+ p.nextComment()
+ }
+
+ if last != nil {
+ // if the last comment is a /*-style comment and the next item
+ // follows on the same line but is not a comma, and not a "closing"
+ // token immediately following its corresponding "opening" token,
+ // add an extra blank for separation unless explicitly disabled
+ if p.mode&noExtraBlank == 0 &&
+ last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
+ tok != token.COMMA &&
+ (tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
+ (tok != token.RBRACK || p.prevOpen == token.LBRACK) {
+ p.writeByte(' ', 1)
+ }
+ // ensure that there is a line break after a //-style comment,
+ // before a closing '}' unless explicitly disabled, or at eof
+ needsLinebreak :=
+ last.Text[1] == '/' ||
+ tok == token.RBRACE && p.mode&noExtraLinebreak == 0 ||
+ tok == token.EOF
+ return p.writeCommentSuffix(needsLinebreak)
+ }
+
+ // no comment was written - we should never reach here since
+ // intersperseComments should not be called in that case
+ p.internalError("intersperseComments called without pending comments")
+ return
+}
+
+// whiteWhitespace writes the first n whitespace entries.
+func (p *printer) writeWhitespace(n int) {
+ // write entries
+ for i := 0; i < n; i++ {
+ switch ch := p.wsbuf[i]; ch {
+ case ignore:
+ // ignore!
+ case indent:
+ p.indent++
+ case unindent:
+ p.indent--
+ if p.indent < 0 {
+ p.internalError("negative indentation:", p.indent)
+ p.indent = 0
+ }
+ case newline, formfeed:
+ // A line break immediately followed by a "correcting"
+ // unindent is swapped with the unindent - this permits
+ // proper label positioning. If a comment is between
+ // the line break and the label, the unindent is not
+ // part of the comment whitespace prefix and the comment
+ // will be positioned correctly indented.
+ if i+1 < n && p.wsbuf[i+1] == unindent {
+ // Use a formfeed to terminate the current section.
+ // Otherwise, a long label name on the next line leading
+ // to a wide column may increase the indentation column
+ // of lines before the label; effectively leading to wrong
+ // indentation.
+ p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
+ i-- // do it again
+ continue
+ }
+ fallthrough
+ default:
+ p.writeByte(byte(ch), 1)
+ }
+ }
+
+ // shift remaining entries down
+ l := copy(p.wsbuf, p.wsbuf[n:])
+ p.wsbuf = p.wsbuf[:l]
+}
+
+// ----------------------------------------------------------------------------
+// Printing interface
+
+// nlines limits n to maxNewlines.
+func nlimit(n int) int {
+ if n > maxNewlines {
+ n = maxNewlines
+ }
+ return n
+}
+
+func mayCombine(prev token.Token, next byte) (b bool) {
+ switch prev {
+ case token.INT:
+ b = next == '.' // 1.
+ case token.ADD:
+ b = next == '+' // ++
+ case token.SUB:
+ b = next == '-' // --
+ case token.QUO:
+ b = next == '*' // /*
+ case token.LSS:
+ b = next == '-' || next == '<' // <- or <<
+ case token.AND:
+ b = next == '&' || next == '^' // && or &^
+ }
+ return
+}
+
+// print prints a list of "items" (roughly corresponding to syntactic
+// tokens, but also including whitespace and formatting information).
+// It is the only print function that should be called directly from
+// any of the AST printing functions in nodes.go.
+//
+// Whitespace is accumulated until a non-whitespace token appears. Any
+// comments that need to appear before that token are printed first,
+// taking into account the amount and structure of any pending white-
+// space for best comment placement. Then, any leftover whitespace is
+// printed, followed by the actual token.
+//
+func (p *printer) print(args ...interface{}) {
+ for _, arg := range args {
+ // information about the current arg
+ var data string
+ var isLit bool
+ var impliedSemi bool // value for p.impliedSemi after this arg
+
+ // record previous opening token, if any
+ switch p.lastTok {
+ case token.ILLEGAL:
+ // ignore (white space)
+ case token.LPAREN, token.LBRACK:
+ p.prevOpen = p.lastTok
+ default:
+ // other tokens followed any opening token
+ p.prevOpen = token.ILLEGAL
+ }
+
+ switch x := arg.(type) {
+ case pmode:
+ // toggle printer mode
+ p.mode ^= x
+ continue
+
+ case whiteSpace:
+ if x == ignore {
+ // don't add ignore's to the buffer; they
+ // may screw up "correcting" unindents (see
+ // LabeledStmt)
+ continue
+ }
+ i := len(p.wsbuf)
+ if i == cap(p.wsbuf) {
+ // Whitespace sequences are very short so this should
+ // never happen. Handle gracefully (but possibly with
+ // bad comment placement) if it does happen.
+ p.writeWhitespace(i)
+ i = 0
+ }
+ p.wsbuf = p.wsbuf[0 : i+1]
+ p.wsbuf[i] = x
+ if x == newline || x == formfeed {
+ // newlines affect the current state (p.impliedSemi)
+ // and not the state after printing arg (impliedSemi)
+ // because comments can be interspersed before the arg
+ // in this case
+ p.impliedSemi = false
+ }
+ p.lastTok = token.ILLEGAL
+ continue
+
+ case *ast.Ident:
+ data = x.Name
+ impliedSemi = true
+ p.lastTok = token.IDENT
+
+ case *ast.BasicLit:
+ data = x.Value
+ isLit = true
+ impliedSemi = true
+ p.lastTok = x.Kind
+
+ case token.Token:
+ s := x.String()
+ if mayCombine(p.lastTok, s[0]) {
+ // the previous and the current token must be
+ // separated by a blank otherwise they combine
+ // into a different incorrect token sequence
+ // (except for token.INT followed by a '.' this
+ // should never happen because it is taken care
+ // of via binary expression formatting)
+ if len(p.wsbuf) != 0 {
+ p.internalError("whitespace buffer not empty")
+ }
+ p.wsbuf = p.wsbuf[0:1]
+ p.wsbuf[0] = ' '
+ }
+ data = s
+ // some keywords followed by a newline imply a semicolon
+ switch x {
+ case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
+ token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
+ impliedSemi = true
+ }
+ p.lastTok = x
+
+ case token.Pos:
+ if x.IsValid() {
+ p.pos = p.posFor(x) // accurate position of next item
+ }
+ continue
+
+ case string:
+ // incorrect AST - print error message
+ data = x
+ isLit = true
+ impliedSemi = true
+ p.lastTok = token.STRING
+
+ default:
+ fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
+ panic("go/printer type")
+ }
+ // data != ""
+
+ next := p.pos // estimated/accurate position of next item
+ wroteNewline, droppedFF := p.flush(next, p.lastTok)
+
+ // intersperse extra newlines if present in the source and
+ // if they don't cause extra semicolons (don't do this in
+ // flush as it will cause extra newlines at the end of a file)
+ if !p.impliedSemi {
+ n := nlimit(next.Line - p.pos.Line)
+ // don't exceed maxNewlines if we already wrote one
+ if wroteNewline && n == maxNewlines {
+ n = maxNewlines - 1
+ }
+ if n > 0 {
+ ch := byte('\n')
+ if droppedFF {
+ ch = '\f' // use formfeed since we dropped one before
+ }
+ p.writeByte(ch, n)
+ impliedSemi = false
+ }
+ }
+
+ // the next token starts now - record its line number if requested
+ if p.linePtr != nil {
+ *p.linePtr = p.out.Line
+ p.linePtr = nil
+ }
+
+ p.writeString(next, data, isLit)
+ p.impliedSemi = impliedSemi
+ }
+}
+
+// flush prints any pending comments and whitespace occurring textually
+// before the position of the next token tok. The flush result indicates
+// if a newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+//
+func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+ if p.commentBefore(next) {
+ // if there are comments before the next item, intersperse them
+ wroteNewline, droppedFF = p.intersperseComments(next, tok)
+ } else {
+ // otherwise, write any leftover whitespace
+ p.writeWhitespace(len(p.wsbuf))
+ }
+ return
+}
+
+// getNode returns the ast.CommentGroup associated with n, if any.
+func getDoc(n ast.Node) *ast.CommentGroup {
+ switch n := n.(type) {
+ case *ast.Field:
+ return n.Doc
+ case *ast.ImportSpec:
+ return n.Doc
+ case *ast.ValueSpec:
+ return n.Doc
+ case *ast.TypeSpec:
+ return n.Doc
+ case *ast.GenDecl:
+ return n.Doc
+ case *ast.FuncDecl:
+ return n.Doc
+ case *ast.File:
+ return n.Doc
+ }
+ return nil
+}
+
+func (p *printer) printNode(node interface{}) error {
+ // unpack *CommentedNode, if any
+ var comments []*ast.CommentGroup
+ if cnode, ok := node.(*CommentedNode); ok {
+ node = cnode.Node
+ comments = cnode.Comments
+ }
+
+ if comments != nil {
+ // commented node - restrict comment list to relevant range
+ n, ok := node.(ast.Node)
+ if !ok {
+ goto unsupported
+ }
+ beg := n.Pos()
+ end := n.End()
+ // if the node has associated documentation,
+ // include that commentgroup in the range
+ // (the comment list is sorted in the order
+ // of the comment appearance in the source code)
+ if doc := getDoc(n); doc != nil {
+ beg = doc.Pos()
+ }
+ // token.Pos values are global offsets, we can
+ // compare them directly
+ i := 0
+ for i < len(comments) && comments[i].End() < beg {
+ i++
+ }
+ j := i
+ for j < len(comments) && comments[j].Pos() < end {
+ j++
+ }
+ if i < j {
+ p.comments = comments[i:j]
+ }
+ } else if n, ok := node.(*ast.File); ok {
+ // use ast.File comments, if any
+ p.comments = n.Comments
+ }
+
+ // if there are no comments, use node comments
+ p.useNodeComments = p.comments == nil
+
+ // get comments ready for use
+ p.nextComment()
+
+ // format node
+ switch n := node.(type) {
+ case ast.Expr:
+ p.expr(n)
+ case ast.Stmt:
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ if _, ok := n.(*ast.LabeledStmt); ok {
+ p.indent = 1
+ }
+ p.stmt(n, false)
+ case ast.Decl:
+ p.decl(n)
+ case ast.Spec:
+ p.spec(n, 1, false)
+ case []ast.Stmt:
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ for _, s := range n {
+ if _, ok := s.(*ast.LabeledStmt); ok {
+ p.indent = 1
+ }
+ }
+ p.stmtList(n, 0, false)
+ case []ast.Decl:
+ p.declList(n)
+ case *ast.File:
+ p.file(n)
+ default:
+ goto unsupported
+ }
+
+ return nil
+
+unsupported:
+ return fmt.Errorf("go/printer: unsupported node type %T", node)
+}
+
+// ----------------------------------------------------------------------------
+// Trimmer
+
+// A trimmer is an io.Writer filter for stripping tabwriter.Escape
+// characters, trailing blanks and tabs, and for converting formfeed
+// and vtab characters into newlines and htabs (in case no tabwriter
+// is used). Text bracketed by tabwriter.Escape characters is passed
+// through unchanged.
+//
+type trimmer struct {
+ output io.Writer
+ state int
+ space []byte
+}
+
+// trimmer is implemented as a state machine.
+// It can be in one of the following states:
+const (
+ inSpace = iota // inside space
+ inEscape // inside text bracketed by tabwriter.Escapes
+ inText // inside text
+)
+
+func (p *trimmer) resetSpace() {
+ p.state = inSpace
+ p.space = p.space[0:0]
+}
+
+// Design note: It is tempting to eliminate extra blanks occurring in
+// whitespace in this function as it could simplify some
+// of the blanks logic in the node printing functions.
+// However, this would mess up any formatting done by
+// the tabwriter.
+
+var aNewline = []byte("\n")
+
+func (p *trimmer) Write(data []byte) (n int, err error) {
+ // invariants:
+ // p.state == inSpace:
+ // p.space is unwritten
+ // p.state == inEscape, inText:
+ // data[m:n] is unwritten
+ m := 0
+ var b byte
+ for n, b = range data {
+ if b == '\v' {
+ b = '\t' // convert to htab
+ }
+ switch p.state {
+ case inSpace:
+ switch b {
+ case '\t', ' ':
+ p.space = append(p.space, b)
+ case '\n', '\f':
+ p.resetSpace() // discard trailing space
+ _, err = p.output.Write(aNewline)
+ case tabwriter.Escape:
+ _, err = p.output.Write(p.space)
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
+ default:
+ _, err = p.output.Write(p.space)
+ p.state = inText
+ m = n
+ }
+ case inEscape:
+ if b == tabwriter.Escape {
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ }
+ case inText:
+ switch b {
+ case '\t', ' ':
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ p.space = append(p.space, b)
+ case '\n', '\f':
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ _, err = p.output.Write(aNewline)
+ case tabwriter.Escape:
+ _, err = p.output.Write(data[m:n])
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
+ }
+ default:
+ panic("unreachable")
+ }
+ if err != nil {
+ return
+ }
+ }
+ n = len(data)
+
+ switch p.state {
+ case inEscape, inText:
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Public interface
+
+// A Mode value is a set of flags (or 0). They control printing.
+type Mode uint
+
+const (
+ RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
+ TabIndent // use tabs for indentation independent of UseSpaces
+ UseSpaces // use spaces instead of tabs for alignment
+ SourcePos // emit //line comments to preserve original source positions
+)
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ Mode Mode // default: 0
+ Tabwidth int // default: 8
+ Indent int // default: 0 (all code is indented at least by this much)
+}
+
+// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {
+ // print node
+ var p printer
+ p.init(cfg, fset, nodeSizes)
+ if err = p.printNode(node); err != nil {
+ return
+ }
+ // print outstanding comments
+ p.impliedSemi = false // EOF acts like a newline
+ p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
+
+ // redirect output through a trimmer to eliminate trailing whitespace
+ // (Input to a tabwriter must be untrimmed since trailing tabs provide
+ // formatting information. The tabwriter could provide trimming
+ // functionality but no tabwriter is used when RawFormat is set.)
+ output = &trimmer{output: output}
+
+ // redirect output through a tabwriter if necessary
+ if cfg.Mode&RawFormat == 0 {
+ minwidth := cfg.Tabwidth
+
+ padchar := byte('\t')
+ if cfg.Mode&UseSpaces != 0 {
+ padchar = ' '
+ }
+
+ twmode := tabwriter.DiscardEmptyColumns
+ if cfg.Mode&TabIndent != 0 {
+ minwidth = 0
+ twmode |= tabwriter.TabIndent
+ }
+
+ output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
+ }
+
+ // write printer result via tabwriter/trimmer to output
+ if _, err = output.Write(p.output); err != nil {
+ return
+ }
+
+ // flush tabwriter, if any
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return
+}
+
+// A CommentedNode bundles an AST node and corresponding comments.
+// It may be provided as argument to any of the Fprint functions.
+//
+type CommentedNode struct {
+ Node interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
+ Comments []*ast.CommentGroup
+}
+
+// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
+// Position information is interpreted relative to the file set fset.
+// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
+// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
+//
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
+ return cfg.fprint(output, fset, node, make(map[ast.Node]int))
+}
+
+// Fprint "pretty-prints" an AST node to output.
+// It calls Config.Fprint with default settings.
+//
+func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
+ return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
+}
diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go
new file mode 100644
index 000000000..3b0570e5b
--- /dev/null
+++ b/src/go/printer/printer_test.go
@@ -0,0 +1,562 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+const (
+ dataDir = "testdata"
+ tabwidth = 8
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+var fset = token.NewFileSet()
+
+type checkMode uint
+
+const (
+ export checkMode = 1 << iota
+ rawFormat
+ idempotent
+)
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte, mode checkMode) ([]byte, error) {
+ // parse src
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ }
+
+ // filter exports if necessary
+ if mode&export != 0 {
+ ast.FileExports(f) // ignore result
+ f.Comments = nil // don't print comments that are not in AST
+ }
+
+ // determine printer configuration
+ cfg := Config{Tabwidth: tabwidth}
+ if mode&rawFormat != 0 {
+ cfg.Mode |= RawFormat
+ }
+
+ // print AST
+ var buf bytes.Buffer
+ if err := cfg.Fprint(&buf, fset, f); err != nil {
+ return nil, fmt.Errorf("print: %s", err)
+ }
+
+ // make sure formatted output is syntactically correct
+ res := buf.Bytes()
+ if _, err := parser.ParseFile(fset, "", res, 0); err != nil {
+ return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes())
+ }
+
+ return res, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+ i := offs
+ for i < len(text) && text[i] != '\n' {
+ i++
+ }
+ return text[offs:i]
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+ var buf bytes.Buffer // holding long error message
+
+ // compare lengths
+ if len(a) != len(b) {
+ fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+ }
+
+ // compare contents
+ line := 1
+ offs := 1
+ for i := 0; i < len(a) && i < len(b); i++ {
+ ch := a[i]
+ if ch != b[i] {
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+ fmt.Fprintf(&buf, "\n\n")
+ break
+ }
+ if ch == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+
+ if buf.Len() > 0 {
+ return errors.New(buf.String())
+ }
+ return nil
+}
+
+func runcheck(t *testing.T, source, golden string, mode checkMode) {
+ src, err := ioutil.ReadFile(source)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ res, err := format(src, mode)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // update golden files if necessary
+ if *update {
+ if err := ioutil.WriteFile(golden, res, 0644); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+
+ // get golden
+ gld, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // formatted source and golden must be the same
+ if err := diff(source, golden, res, gld); err != nil {
+ t.Error(err)
+ return
+ }
+
+ if mode&idempotent != 0 {
+ // formatting golden must be idempotent
+ // (This is very difficult to achieve in general and for now
+ // it is only checked for files explicitly marked as such.)
+ res, err = format(gld, mode)
+ if err := diff(golden, fmt.Sprintf("format(%s)", golden), gld, res); err != nil {
+ t.Errorf("golden is not idempotent: %s", err)
+ }
+ }
+}
+
+func check(t *testing.T, source, golden string, mode checkMode) {
+ // run the test
+ cc := make(chan int)
+ go func() {
+ runcheck(t, source, golden, mode)
+ cc <- 0
+ }()
+
+ // wait with timeout
+ select {
+ case <-time.After(10 * time.Second): // plenty of a safety margin, even for very slow machines
+ // test running past time out
+ t.Errorf("%s: running too slowly", source)
+ case <-cc:
+ // test finished within allotted time margin
+ }
+}
+
+type entry struct {
+ source, golden string
+ mode checkMode
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+ {"empty.input", "empty.golden", idempotent},
+ {"comments.input", "comments.golden", 0},
+ {"comments.input", "comments.x", export},
+ {"comments2.input", "comments2.golden", idempotent},
+ {"linebreaks.input", "linebreaks.golden", idempotent},
+ {"expressions.input", "expressions.golden", idempotent},
+ {"expressions.input", "expressions.raw", rawFormat | idempotent},
+ {"declarations.input", "declarations.golden", 0},
+ {"statements.input", "statements.golden", 0},
+ {"slow.input", "slow.golden", idempotent},
+}
+
+func TestFiles(t *testing.T) {
+ for _, e := range data {
+ source := filepath.Join(dataDir, e.source)
+ golden := filepath.Join(dataDir, e.golden)
+ check(t, source, golden, e.mode)
+ // TODO(gri) check that golden is idempotent
+ //check(t, golden, golden, e.mode)
+ }
+}
+
+// TestLineComments, using a simple test case, checks that consecutive line
+// comments are properly terminated with a newline even if the AST position
+// information is incorrect.
+//
+func TestLineComments(t *testing.T) {
+ const src = `// comment 1
+ // comment 2
+ // comment 3
+ package main
+ `
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ fset = token.NewFileSet() // use the wrong file set
+ Fprint(&buf, fset, f)
+
+ nlines := 0
+ for _, ch := range buf.Bytes() {
+ if ch == '\n' {
+ nlines++
+ }
+ }
+
+ const expected = 3
+ if nlines < expected {
+ t.Errorf("got %d, expected %d\n", nlines, expected)
+ t.Errorf("result:\n%s", buf.Bytes())
+ }
+}
+
+// Verify that the printer can be invoked during initialization.
+func init() {
+ const name = "foobar"
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
+ panic(err) // error in test
+ }
+ // in debug mode, the result contains additional information;
+ // ignore it
+ if s := buf.String(); !debug && s != name {
+ panic("got " + s + ", want " + name)
+ }
+}
+
+// Verify that the printer doesn't crash if the AST contains BadXXX nodes.
+func TestBadNodes(t *testing.T) {
+ const src = "package p\n("
+ const res = "package p\nBadDecl\n"
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err == nil {
+ t.Error("expected illegal program") // error in test
+ }
+ var buf bytes.Buffer
+ Fprint(&buf, fset, f)
+ if buf.String() != res {
+ t.Errorf("got %q, expected %q", buf.String(), res)
+ }
+}
+
+// testComment verifies that f can be parsed again after printing it
+// with its first comment set to comment at any possible source offset.
+func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
+ f.Comments[0].List[0] = comment
+ var buf bytes.Buffer
+ for offs := 0; offs <= srclen; offs++ {
+ buf.Reset()
+ // Printing f should result in a correct program no
+ // matter what the (incorrect) comment position is.
+ if err := Fprint(&buf, fset, f); err != nil {
+ t.Error(err)
+ }
+ if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+ t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
+ }
+ // Position information is just an offset.
+ // Move comment one byte down in the source.
+ comment.Slash++
+ }
+}
+
+// Verify that the printer produces a correct program
+// even if the position information of comments introducing newlines
+// is incorrect.
+func TestBadComments(t *testing.T) {
+ const src = `
+// first comment - text and position changed by test
+package p
+import "fmt"
+const pi = 3.14 // rough circle
+var (
+ x, y, z int = 1, 2, 3
+ u, v float64
+)
+func fibo(n int) {
+ if n < 2 {
+ return n /* seed values */
+ }
+ return fibo(n-1) + fibo(n-2)
+}
+`
+
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Error(err) // error in test
+ }
+
+ comment := f.Comments[0].List[0]
+ pos := comment.Pos()
+ if fset.Position(pos).Offset != 1 {
+ t.Error("expected offset 1") // error in test
+ }
+
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
+}
+
+type visitor chan *ast.Ident
+
+func (v visitor) Visit(n ast.Node) (w ast.Visitor) {
+ if ident, ok := n.(*ast.Ident); ok {
+ v <- ident
+ }
+ return v
+}
+
+// idents is an iterator that returns all idents in f via the result channel.
+func idents(f *ast.File) <-chan *ast.Ident {
+ v := make(visitor)
+ go func() {
+ ast.Walk(v, f)
+ close(v)
+ }()
+ return v
+}
+
+// identCount returns the number of identifiers found in f.
+func identCount(f *ast.File) int {
+ n := 0
+ for range idents(f) {
+ n++
+ }
+ return n
+}
+
+// Verify that the SourcePos mode emits correct //line comments
+// by testing that position information for matching identifiers
+// is maintained.
+func TestSourcePos(t *testing.T) {
+ const src = `
+package p
+import ( "go/printer"; "math" )
+const pi = 3.14; var x = 0
+type t struct{ x, y, z int; u, v, w float32 }
+func (t *t) foo(a, b, c int) int {
+ return a*t.x + b*t.y +
+ // two extra lines here
+ // ...
+ c*t.z
+}
+`
+
+ // parse original
+ f1, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // pretty-print original
+ var buf bytes.Buffer
+ err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // parse pretty printed original
+ // (//line comments must be interpreted even w/o parser.ParseComments set)
+ f2, err := parser.ParseFile(fset, "", buf.Bytes(), 0)
+ if err != nil {
+ t.Fatalf("%s\n%s", err, buf.Bytes())
+ }
+
+ // At this point the position information of identifiers in f2 should
+ // match the position information of corresponding identifiers in f1.
+
+ // number of identifiers must be > 0 (test should run) and must match
+ n1 := identCount(f1)
+ n2 := identCount(f2)
+ if n1 == 0 {
+ t.Fatal("got no idents")
+ }
+ if n2 != n1 {
+ t.Errorf("got %d idents; want %d", n2, n1)
+ }
+
+ // verify that all identifiers have correct line information
+ i2range := idents(f2)
+ for i1 := range idents(f1) {
+ i2 := <-i2range
+
+ if i2.Name != i1.Name {
+ t.Errorf("got ident %s; want %s", i2.Name, i1.Name)
+ }
+
+ l1 := fset.Position(i1.Pos()).Line
+ l2 := fset.Position(i2.Pos()).Line
+ if l2 != l1 {
+ t.Errorf("got line %d; want %d for %s", l2, l1, i1.Name)
+ }
+ }
+
+ if t.Failed() {
+ t.Logf("\n%s", buf.Bytes())
+ }
+}
+
+var decls = []string{
+ `import "fmt"`,
+ "const pi = 3.1415\nconst e = 2.71828\n\nvar x = pi",
+ "func sum(x, y int) int\t{ return x + y }",
+}
+
+func TestDeclLists(t *testing.T) {
+ for _, src := range decls {
+ file, err := parser.ParseFile(fset, "", "package p;"+src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls) // only print declarations
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+var stmts = []string{
+ "i := 0",
+ "select {}\nvar a, b = 1, 2\nreturn a + b",
+ "go f()\ndefer func() {}()",
+}
+
+func TestStmtLists(t *testing.T) {
+ for _, src := range stmts {
+ file, err := parser.ParseFile(fset, "", "package p; func _() {"+src+"}", parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls[0].(*ast.FuncDecl).Body.List) // only print statements
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+func TestBaseIndent(t *testing.T) {
+ // The testfile must not contain multi-line raw strings since those
+ // are not indented (because their values must not change) and make
+ // this test fail.
+ const filename = "printer.go"
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ for indent := 0; indent < 4; indent++ {
+ buf.Reset()
+ (&Config{Tabwidth: tabwidth, Indent: indent}).Fprint(&buf, fset, file)
+ // all code must be indented by at least 'indent' tabs
+ lines := bytes.Split(buf.Bytes(), []byte{'\n'})
+ for i, line := range lines {
+ if len(line) == 0 {
+ continue // empty lines don't have indentation
+ }
+ n := 0
+ for j, b := range line {
+ if b != '\t' {
+ // end of indentation
+ n = j
+ break
+ }
+ }
+ if n < indent {
+ t.Errorf("line %d: got only %d tabs; want at least %d: %q", i, n, indent, line)
+ }
+ }
+ }
+}
+
+// TestFuncType tests that an ast.FuncType with a nil Params field
+// can be printed (per go/ast specification). Test case for issue 3870.
+func TestFuncType(t *testing.T) {
+ src := &ast.File{
+ Name: &ast.Ident{Name: "p"},
+ Decls: []ast.Decl{
+ &ast.FuncDecl{
+ Name: &ast.Ident{Name: "f"},
+ Type: &ast.FuncType{},
+ },
+ },
+ }
+
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, src); err != nil {
+ t.Fatal(err)
+ }
+ got := buf.String()
+
+ const want = `package p
+
+func f()
+`
+
+ if got != want {
+ t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
+ }
+}
+
+// TextX is a skeleton test that can be filled in for debugging one-off cases.
+// Do not remove.
+func TestX(t *testing.T) {
+ const src = `
+package p
+func _() {}
+`
+ _, err := format([]byte(src), 0)
+ if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/src/go/printer/testdata/comments.golden b/src/go/printer/testdata/comments.golden
new file mode 100644
index 000000000..b1af7958a
--- /dev/null
+++ b/src/go/printer/testdata/comments.golden
@@ -0,0 +1,643 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+import "fmt" // fmt
+
+const c0 = 0 // zero
+const (
+ c1 = iota // c1
+ c2 // c2
+)
+
+// Alignment of comments in declarations>
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comments
+
+ _ = 10 // comment
+ _ T = 20 // comment
+)
+
+const (
+ _____ = iota // foo
+ _ // bar
+ _ = 0 // bal
+ _ // bat
+)
+
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comment
+ _ = 10
+ _ = 20 // comment
+ _ T = 0 // comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ int
+ x, y, z int // 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D, b, c int // 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ f(x int) int // unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ g(x int) int // unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ f3 int // f3 is not exported
+}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int // x
+var ()
+
+// This comment SHOULD be associated with f0.
+func f0() {
+ const pi = 3.14 // pi
+ var s1 struct{} /* an empty struct */ /* foo */
+ // a struct constructor
+ // --------------------
+ var s2 struct{} = struct{}{}
+ x := pi
+}
+
+//
+// This comment should be associated with f1, with one blank line before the comment.
+//
+func f1() {
+ f0()
+ /* 1 */
+ // 2
+ /* 3 */
+ /* 4 */
+ f0()
+}
+
+func _() {
+ // this comment should be properly indented
+}
+
+func _(x int) int {
+ if x < 0 { // the tab printed before this comment's // must not affect the remaining lines
+ return -x // this statement should be properly indented
+ }
+ if x < 0 { /* the tab printed before this comment's /* must not affect the remaining lines */
+ return -x // this statement should be properly indented
+ }
+ return x
+}
+
+func typeswitch(x interface{}) {
+ switch v := x.(type) {
+ case bool, int, float:
+ case string:
+ default:
+ }
+
+ switch x.(type) {
+ }
+
+ switch v0, ok := x.(int); v := x.(type) {
+ }
+
+ switch v0, ok := x.(int); x.(type) {
+ case byte: // this comment should be on the same line as the keyword
+ // this comment should be normally indented
+ _ = 0
+ case bool, int, float:
+ // this comment should be indented
+ case string:
+ default:
+ // this comment should be indented
+ }
+ // this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/* and another line
+ * of
+ * stars */
+
+/* a line of
+ * stars */
+
+/* and another line of
+ * stars */
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/*
+aligned in middle
+here
+ not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+ aligned in middle
+ here
+ not here
+*/
+
+/*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+
+func _() {
+ /*
+ * line
+ * of
+ * stars
+ */
+
+ /*
+ aligned in middle
+ here
+ not here
+ */
+
+ /*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+ */
+}
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _( /* this */ x /* is */ /* an */ int) {
+}
+
+func _( /* no params - extra blank before and after comment */ ) {}
+func _(a, b int /* params - no extra blank after comment */) {}
+
+func _() { f( /* no args - extra blank before and after comment */ ) }
+func _() { f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+ f( /* no args - extra blank before and after comment */ )
+ f(a, b /* args - no extra blank after comment */)
+}
+
+func ( /* comment1 */ T /* comment2 */) _() {}
+
+func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _() { x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+ _ = 0
+ /* closing curly brace should be on new line */
+}
+
+func _() {
+ _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2 /*jasldf*/}
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}/*jasldf
+ */
+
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}// jasldf
+
+ _ = a
+}
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3, // comment after comma
+ }
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3, // comment after comma
+ }
+ _ = T{
+ /* comment before literal */ 1,
+ 2, /* comment before comma - ok to move after comma */
+ 3, /* comment before comma - ok to move after comma */
+ }
+
+ for i = 0; // comment after semicolon
+ i < 9; /* comment after semicolon */
+ i++ { // comment after opening curly brace
+ }
+
+ // TODO(gri) the last comment in this example should be aligned */
+ for i = 0; // comment after semicolon
+ i < 9; /* comment before semicolon - ok to move after semicolon */
+ i++ /* comment before opening curly brace */ {
+ }
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x, /* comment */
+ y)
+ f(
+ x, /* comment */
+ )
+}
+
+func g(
+ x int, /* comment */
+) {
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+ _ = 0
+ // The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+ // The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+ // The following is not a legal line directive (negative line number):
+ //line foo:-3
+ _ = 3
+}
+
+// Line comments with tabs
+func _() {
+ var finput *bufio.Reader // input file
+ var stderr *bufio.Writer
+ var ftable *bufio.Writer // y.go file
+ var foutput *bufio.Writer // y.output file
+
+ var oflag string // -o [y.go] - y.go file
+ var vflag string // -v [y.output] - y.output file
+ var lflag bool // -l - disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+ // This comment has 4 blanks following that should be trimmed:
+ /* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+ */
+}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/go/printer/testdata/comments.input b/src/go/printer/testdata/comments.input
new file mode 100644
index 000000000..983e2b2c9
--- /dev/null
+++ b/src/go/printer/testdata/comments.input
@@ -0,0 +1,648 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+import "fmt" // fmt
+
+const c0 = 0 // zero
+const (
+ c1 = iota // c1
+ c2 // c2
+)
+
+// Alignment of comments in declarations>
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota+10
+ _ // comments
+
+ _ = 10 // comment
+ _ T = 20 // comment
+)
+
+const (
+ _____ = iota // foo
+ _ // bar
+ _ = 0 // bal
+ _ // bat
+)
+
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comment
+ _ = 10
+ _ = 20 // comment
+ _ T = 0 // comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct {}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ int
+ x, y, z int // 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D, b, c int // 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface {}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ f(x int) int // unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ g(x int) int // unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ f3 int // f3 is not exported
+}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int // x
+var ()
+
+
+// This comment SHOULD be associated with f0.
+func f0() {
+ const pi = 3.14 // pi
+ var s1 struct {} /* an empty struct */ /* foo */
+ // a struct constructor
+ // --------------------
+ var s2 struct {} = struct {}{}
+ x := pi
+}
+//
+// This comment should be associated with f1, with one blank line before the comment.
+//
+func f1() {
+ f0()
+ /* 1 */
+ // 2
+ /* 3 */
+ /* 4 */
+ f0()
+}
+
+
+func _() {
+// this comment should be properly indented
+}
+
+
+func _(x int) int {
+ if x < 0 { // the tab printed before this comment's // must not affect the remaining lines
+ return -x // this statement should be properly indented
+ }
+ if x < 0 { /* the tab printed before this comment's /* must not affect the remaining lines */
+ return -x // this statement should be properly indented
+ }
+ return x
+}
+
+
+func typeswitch(x interface{}) {
+ switch v := x.(type) {
+ case bool, int, float:
+ case string:
+ default:
+ }
+
+ switch x.(type) {
+ }
+
+ switch v0, ok := x.(int); v := x.(type) {
+ }
+
+ switch v0, ok := x.(int); x.(type) {
+ case byte: // this comment should be on the same line as the keyword
+ // this comment should be normally indented
+ _ = 0
+ case bool, int, float:
+ // this comment should be indented
+ case string:
+ default:
+ // this comment should be indented
+ }
+ // this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/* and another line
+ * of
+ * stars */
+
+/* a line of
+ * stars */
+
+/* and another line of
+ * stars */
+
+/* a line of stars
+*/
+
+/* and another line of
+*/
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/*
+aligned in middle
+here
+ not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+ aligned in middle
+ here
+ not here
+*/
+
+/*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+
+func _() {
+ /*
+ * line
+ * of
+ * stars
+ */
+
+ /*
+ aligned in middle
+ here
+ not here
+ */
+
+ /*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+}
+
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _(/* this */x/* is *//* an */ int) {
+}
+
+func _(/* no params - extra blank before and after comment */) {}
+func _(a, b int /* params - no extra blank after comment */) {}
+
+func _() { f(/* no args - extra blank before and after comment */) }
+func _() { f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+ f(/* no args - extra blank before and after comment */)
+ f(a, b /* args - no extra blank after comment */)
+}
+
+func (/* comment1 */ T /* comment2 */) _() {}
+
+func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _() { x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+ _ = 0
+ /* closing curly brace should be on new line */ }
+
+func _() {
+ _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2, /*jasldf*/
+ }
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2, /*jasldf
+ */
+ }
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2, // jasldf
+ }
+ _ = a
+}
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3 , // comment after comma
+ }
+ _ = T{
+ 1 ,// comment after comma
+ 2 ,/* comment after comma */
+ 3,// comment after comma
+ }
+ _ = T{
+ /* comment before literal */1,
+ 2/* comment before comma - ok to move after comma */,
+ 3 /* comment before comma - ok to move after comma */ ,
+ }
+
+ for
+ i=0;// comment after semicolon
+ i<9;/* comment after semicolon */
+ i++{// comment after opening curly brace
+ }
+
+ // TODO(gri) the last comment in this example should be aligned */
+ for
+ i=0;// comment after semicolon
+ i<9/* comment before semicolon - ok to move after semicolon */;
+ i++ /* comment before opening curly brace */ {
+ }
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x /* comment */,
+ y)
+ f(
+ x /* comment */,
+ )
+}
+
+func g(
+ x int /* comment */,
+) {}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct { a, b /* comment */, c int }
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+ _ = 0
+// The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+// The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+// The following is not a legal line directive (negative line number):
+//line foo:-3
+ _ = 3
+}
+
+// Line comments with tabs
+func _() {
+var finput *bufio.Reader // input file
+var stderr *bufio.Writer
+var ftable *bufio.Writer // y.go file
+var foutput *bufio.Writer // y.output file
+
+var oflag string // -o [y.go] - y.go file
+var vflag string // -v [y.output] - y.output file
+var lflag bool // -l - disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+// This comment has 4 blanks following that should be trimmed:
+/* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+*/
+}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/go/printer/testdata/comments.x b/src/go/printer/testdata/comments.x
new file mode 100644
index 000000000..ae7729286
--- /dev/null
+++ b/src/go/printer/testdata/comments.x
@@ -0,0 +1,56 @@
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ // contains filtered or unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D int // 2 unexported fields
+ // contains filtered or unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ // contains filtered or unexported methods
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ // contains filtered or unexported methods
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ // contains filtered or unexported fields
+}
diff --git a/src/go/printer/testdata/comments2.golden b/src/go/printer/testdata/comments2.golden
new file mode 100644
index 000000000..7676a26c1
--- /dev/null
+++ b/src/go/printer/testdata/comments2.golden
@@ -0,0 +1,105 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+ /*
+ c2a
+ */
+ /*
+ c2b
+ */
+ /* foo
+ c2c
+ */
+ /* foo
+ c2d
+ */
+ /*
+ c2e
+ foo */
+ /*
+ c2f
+ foo */
+}
+
+func g() {
+ /*
+ c3a
+ */
+ /*
+ c3b
+ */
+ /* foo
+ c3c
+ */
+ /* foo
+ c3d
+ */
+ /*
+ c3e
+ foo */
+ /*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+ /*
+ prints test 5 times
+ */
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+}
+
+func issue5623() {
+L:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LLLLLLL:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LL:
+LLLLL:
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+
+ // test case from issue
+label:
+ mask := uint64(1)<<c - 1 // Allocation mask
+ used := atomic.LoadUint64(&h.used) // Current allocations
+}
diff --git a/src/go/printer/testdata/comments2.input b/src/go/printer/testdata/comments2.input
new file mode 100644
index 000000000..4a055c827
--- /dev/null
+++ b/src/go/printer/testdata/comments2.input
@@ -0,0 +1,105 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+/*
+c2a
+*/
+/*
+ c2b
+*/
+/* foo
+c2c
+*/
+/* foo
+ c2d
+*/
+/*
+c2e
+foo */
+/*
+ c2f
+ foo */
+}
+
+func g() {
+/*
+c3a
+*/
+/*
+ c3b
+*/
+/* foo
+c3c
+*/
+/* foo
+ c3d
+*/
+/*
+c3e
+foo */
+/*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+/*
+prints test 5 times
+*/
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+}
+
+func issue5623() {
+L:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LLLLLLL:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LL:
+LLLLL:
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+
+// test case from issue
+label:
+ mask := uint64(1)<<c - 1 // Allocation mask
+ used := atomic.LoadUint64(&h.used) // Current allocations
+}
diff --git a/src/go/printer/testdata/declarations.golden b/src/go/printer/testdata/declarations.golden
new file mode 100644
index 000000000..a27f21fc8
--- /dev/null
+++ b/src/go/printer/testdata/declarations.golden
@@ -0,0 +1,955 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+ _ "io"
+)
+
+import _ "io"
+
+import (
+ "io"
+ "io"
+ "io"
+)
+
+import (
+ "io"
+ aLongRename "io"
+
+ b "io"
+)
+
+import (
+ "unrenamed"
+ renamed "renameMe"
+ . "io"
+ _ "io"
+ "io"
+ . "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo" // a comment
+import "bar" // a comment
+
+import (
+ _ "foo"
+ // a comment
+ "bar"
+ "foo" // a comment
+ "bar" // a comment
+)
+
+// comments + renames
+import (
+ "unrenamed" // a comment
+ renamed "renameMe"
+ . "io" /* a comment */
+ _ "io/ioutil" // a comment
+ "io" // testing alignment
+ . "os"
+ // a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+ . "fmt"
+ "io"
+ "malloc" // for the malloc count test only
+ "math"
+ "strings"
+ "testing"
+)
+
+// more import examples
+import (
+ "xxx"
+ "much_longer_name" // comment
+ "short_name" // comment
+)
+
+import (
+ _ "xxx"
+ "much_longer_name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+// T3 comment
+type T2 struct {
+} // should be a two-line struct
+
+// printing of constant literals
+const (
+ _ = "foobar"
+ _ = "a۰۱۸"
+ _ = "foo६४"
+ _ = "bar9876"
+ _ = 0
+ _ = 1
+ _ = 123456789012345678890
+ _ = 01234567
+ _ = 0xcafebabe
+ _ = 0.
+ _ = .0
+ _ = 3.14159265
+ _ = 1e0
+ _ = 1e+100
+ _ = 1e-100
+ _ = 2.71828e-1000
+ _ = 0i
+ _ = 1i
+ _ = 012345678901234567889i
+ _ = 123456789012345678890i
+ _ = 0.i
+ _ = .0i
+ _ = 3.14159265i
+ _ = 1e0i
+ _ = 1e+100i
+ _ = 1e-100i
+ _ = 2.71828e-1000i
+ _ = 'a'
+ _ = '\000'
+ _ = '\xFF'
+ _ = '\uff16'
+ _ = '\U0000ff16'
+ _ = `foobar`
+ _ = `foo
+---
+---
+bar`
+)
+
+func _() {
+ type _ int
+ type _ *int
+ type _ []int
+ type _ map[string]int
+ type _ chan int
+ type _ func() int
+
+ var _ int
+ var _ *int
+ var _ []int
+ var _ map[string]int
+ var _ chan int
+ var _ func() int
+
+ type _ struct{}
+ type _ *struct{}
+ type _ []struct{}
+ type _ map[string]struct{}
+ type _ chan struct{}
+ type _ func() struct{}
+
+ type _ interface{}
+ type _ *interface{}
+ type _ []interface{}
+ type _ map[string]interface{}
+ type _ chan interface{}
+ type _ func() interface{}
+
+ var _ struct{}
+ var _ *struct{}
+ var _ []struct{}
+ var _ map[string]struct{}
+ var _ chan struct{}
+ var _ func() struct{}
+
+ var _ interface{}
+ var _ *interface{}
+ var _ []interface{}
+ var _ map[string]interface{}
+ var _ chan interface{}
+ var _ func() interface{}
+}
+
+// don't lose blank lines in grouped declarations
+const (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ = iota
+ _
+
+ // a comment
+ _
+
+ _
+)
+
+type (
+ _ int
+ _ struct{}
+
+ _ interface{}
+
+ // a comment
+ _ map[string]int
+)
+
+var (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ bool
+
+ // a comment
+ _ bool
+)
+
+// don't lose blank lines in this struct
+type _ struct {
+ String struct {
+ Str, Len int
+ }
+ Slice struct {
+ Array, Len, Cap int
+ }
+ Eface struct {
+ Typ, Ptr int
+ }
+
+ UncommonType struct {
+ Name, PkgPath int
+ }
+ CommonType struct {
+ Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+ }
+ Type struct {
+ Typ, Ptr int
+ }
+ StructField struct {
+ Name, PkgPath, Typ, Tag, Offset int
+ }
+ StructType struct {
+ Fields int
+ }
+ PtrType struct {
+ Elem int
+ }
+ SliceType struct {
+ Elem int
+ }
+ ArrayType struct {
+ Elem, Len int
+ }
+
+ Stktop struct {
+ Stackguard, Stackbase, Gobuf int
+ }
+ Gobuf struct {
+ Sp, Pc, G int
+ }
+ G struct {
+ Stackbase, Sched, Status, Alllink int
+ }
+}
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{}
+type _ struct {
+}
+
+type _ interface{}
+type _ interface {
+}
+
+// no tabs for single or ungrouped decls
+func _() {
+ const xxxxxx = 0
+ type x int
+ var xxx int
+ var yyyy float = 3.14
+ var zzzzz = "bar"
+
+ const (
+ xxxxxx = 0
+ )
+ type (
+ x int
+ )
+ var (
+ xxx int
+ )
+ var (
+ yyyy float = 3.14
+ )
+ var (
+ zzzzz = "bar"
+ )
+}
+
+// tabs for multiple or grouped decls
+func _() {
+ // no entry has a type
+ const (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // some entries have a type
+ const (
+ xxxxxx = 1
+ x = 2
+ xxx = 3
+ yyyyyyyy float = iota
+ yyyy = "bar"
+ yyy
+ yy = 2
+ )
+}
+
+func _() {
+ // no entry has a type
+ var (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // no entry has a value
+ var (
+ _ int
+ _ float
+ _ string
+
+ _ int // comment
+ _ float // comment
+ _ string // comment
+ )
+ // some entries have a type
+ var (
+ xxxxxx int
+ x float
+ xxx string
+ yyyyyyyy int = 1234
+ y float = 3.14
+ yyyy = "bar"
+ yyy string = "foo"
+ )
+ // mixed entries - all comments should be aligned
+ var (
+ a, b, c int
+ x = 10
+ d int // comment
+ y = 20 // comment
+ f, ff, fff, ffff int = 0, 1, 2, 3 // comment
+ )
+ // respect original line breaks
+ var _ = []T{
+ T{0x20, "Telugu"},
+ }
+ var _ = []T{
+ // respect original line breaks
+ T{0x20, "Telugu"},
+ }
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+ short = 2 * (1 + 2)
+ aMuchLongerName = 3
+)
+
+var (
+ short = X{}
+ aMuchLongerName = X{}
+
+ x1 = X{} // foo
+ x2 = X{} // foo
+)
+
+func _() {
+ type (
+ xxxxxx int
+ x float
+ xxx string
+ xxxxx []x
+ xx struct{}
+ xxxxxxx struct {
+ _, _ int
+ _ float
+ }
+ xxxx chan<- string
+ )
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+// formatting of structs
+type _ struct{}
+
+type _ struct { /* this comment should be visible */
+}
+
+type _ struct {
+ // this comment should be visible and properly indented
+}
+
+type _ struct { // this comment must not change indentation
+ f int
+ f, ff, fff, ffff int
+}
+
+type _ struct {
+ string
+}
+
+type _ struct {
+ string // comment
+}
+
+type _ struct {
+ string "tag"
+}
+
+type _ struct {
+ string "tag" // comment
+}
+
+type _ struct {
+ f int
+}
+
+type _ struct {
+ f int // comment
+}
+
+type _ struct {
+ f int "tag"
+}
+
+type _ struct {
+ f int "tag" // comment
+}
+
+type _ struct {
+ bool
+ a, b, c int
+ int "tag"
+ ES // comment
+ float "tag" // comment
+ f int // comment
+ f, ff, fff, ffff int // comment
+ g float "tag"
+ h float "tag" // comment
+}
+
+type _ struct {
+ a, b,
+ c, d int // this line should be indented
+ u, v, w, x float // this line should be indented
+ p, q,
+ r, s float // this line should be indented
+}
+
+// difficult cases
+type _ struct {
+ bool // comment
+ text []byte // comment
+}
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+ EI
+}
+
+type _ interface {
+ f()
+ fffff()
+}
+
+type _ interface {
+ EI
+ f()
+ fffffg()
+}
+
+type _ interface { // this comment must not change indentation
+ EI // here's a comment
+ f() // no blank between identifier and ()
+ fffff() // no blank between identifier and ()
+ gggggggggggg(x, y, z int) // hurray
+}
+
+// formatting of variable declarations
+func _() {
+ type day struct {
+ n int
+ short, long string
+ }
+ var (
+ Sunday = day{0, "SUN", "Sunday"}
+ Monday = day{1, "MON", "Monday"}
+ Tuesday = day{2, "TUE", "Tuesday"}
+ Wednesday = day{3, "WED", "Wednesday"}
+ Thursday = day{4, "THU", "Thursday"}
+ Friday = day{5, "FRI", "Friday"}
+ Saturday = day{6, "SAT", "Saturday"}
+ )
+}
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int // all on one line
+
+var a2, b2,
+ c2 int // this line should be indented
+
+var (
+ a3, b3,
+ c3, d3 int // this line should be indented
+ a4, b4, c4 int // this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+ var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+ Headers: map[string]string{},
+ Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+ 0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+ 0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+ 0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+ },
+ }
+}
+
+func _() {
+ var Universe = Scope{
+ Names: map[string]*Ident{
+ // basic types
+ "bool": nil,
+ "byte": nil,
+ "int8": nil,
+ "int16": nil,
+ "int32": nil,
+ "int64": nil,
+ "uint8": nil,
+ "uint16": nil,
+ "uint32": nil,
+ "uint64": nil,
+ "float32": nil,
+ "float64": nil,
+ "string": nil,
+
+ // convenience types
+ "int": nil,
+ "uint": nil,
+ "uintptr": nil,
+ "float": nil,
+
+ // constants
+ "false": nil,
+ "true": nil,
+ "iota": nil,
+ "nil": nil,
+
+ // functions
+ "cap": nil,
+ "len": nil,
+ "new": nil,
+ "make": nil,
+ "panic": nil,
+ "panicln": nil,
+ "print": nil,
+ "println": nil,
+ },
+ }
+}
+
+// alignment of map composite entries
+var _ = map[int]int{
+ // small key sizes: always align even if size ratios are large
+ a: a,
+ abcdefghabcdefgh: a,
+ ab: a,
+ abc: a,
+ abcdefgabcdefg: a,
+ abcd: a,
+ abcde: a,
+ abcdef: a,
+
+ // mixed key sizes: align when key sizes change within accepted ratio
+ abcdefgh: a,
+ abcdefghabcdefg: a,
+ abcdefghij: a,
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
+
+ ab: a, // do not align with previous line
+ abcde: a, // align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+func _() {
+ var _ = T{
+ a, // must introduce trailing comma
+ }
+}
+
+// formatting of function results
+func _() func() {}
+func _() func(int) { return nil }
+func _() func(int) int { return nil }
+func _() func(int) func(int) func() { return nil }
+
+// formatting of consecutive single-line functions
+func _() {}
+func _() {}
+func _() {}
+
+func _() {} // an empty line before this function
+func _() {}
+func _() {}
+
+func _() { f(1, 2, 3) }
+func _(x int) int { y := x; return y + 1 }
+func _() int { type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+ f(1, 2, 3)
+}
+func _(x int) int {
+ y := x
+ return y + 1
+}
+func _() int {
+ type T struct{}
+ var x T
+ return x
+}
+
+// making function declarations safe for new semicolon rules
+func _() { /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */
+}
+
+func _() {
+ /* multi-line func because block is on multiple lines */
+}
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+ int) {
+}
+func _(x bool,
+ y int) {
+}
+func _(x,
+ y bool) {
+}
+func _(bool, // comment
+ int) {
+}
+func _(x bool, // comment
+ y int) {
+}
+func _(x, // comment
+ y bool) {
+}
+func _(bool, // comment
+ // comment
+ int) {
+}
+func _(x bool, // comment
+ // comment
+ y int) {
+}
+func _(x, // comment
+ // comment
+ y bool) {
+}
+func _(bool,
+ // comment
+ int) {
+}
+func _(x bool,
+ // comment
+ y int) {
+}
+func _(x,
+ // comment
+ y bool) {
+}
+func _(x, // comment
+ y, // comment
+ z bool) {
+}
+func _(x, // comment
+ y, // comment
+ z bool) {
+}
+func _(x int, // comment
+ y float, // comment
+ z bool) {
+}
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+ a, b, c int,
+) {
+}
+
+func MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+) {
+}
+
+func MultiLineSignature2(
+ a, b,
+ c int,
+) {
+}
+
+func MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int) {
+}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+ MultiLineSignature0(
+ a, b, c int,
+ )
+
+ MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+ )
+
+ MultiLineSignature2(
+ a, b,
+ c int,
+ )
+
+ MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _(int)
+func _(int)
+func _(x int)
+func _(x int)
+func _(x, y int)
+func _(x, y int)
+
+func _() int
+func _() int
+func _() int
+
+func _() (x int)
+func _() (x int)
+func _() (x int)
+
+// special cases: some channel types require parentheses
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+ m()
+}) // no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+ x int
+ y int
+}) // no extra comma between } and )
diff --git a/src/go/printer/testdata/declarations.input b/src/go/printer/testdata/declarations.input
new file mode 100644
index 000000000..d9951d386
--- /dev/null
+++ b/src/go/printer/testdata/declarations.input
@@ -0,0 +1,967 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+ _ "io"
+)
+
+import _ "io"
+
+import (
+ "io"
+ "io"
+ "io"
+)
+
+import (
+ "io"
+ aLongRename "io"
+
+ b "io"
+)
+
+import (
+ "unrenamed"
+ renamed "renameMe"
+ . "io"
+ _ "io"
+ "io"
+ . "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo" // a comment
+import "bar" // a comment
+
+import (
+ _ "foo"
+ // a comment
+ "bar"
+ "foo" // a comment
+ "bar" // a comment
+)
+
+// comments + renames
+import (
+ "unrenamed" // a comment
+ renamed "renameMe"
+ . "io" /* a comment */
+ _ "io/ioutil" // a comment
+ "io" // testing alignment
+ . "os"
+ // a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+ . "fmt"
+ "io"
+ "malloc" // for the malloc count test only
+ "math"
+ "strings"
+ "testing"
+)
+
+// more import examples
+import (
+ "xxx"
+ "much_longer_name" // comment
+ "short_name" // comment
+)
+
+import (
+ _ "xxx"
+ "much_longer_name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+
+// T3 comment
+type T2 struct {
+
+
+} // should be a two-line struct
+
+
+// printing of constant literals
+const (
+ _ = "foobar"
+ _ = "a۰۱۸"
+ _ = "foo६४"
+ _ = "bar9876"
+ _ = 0
+ _ = 1
+ _ = 123456789012345678890
+ _ = 01234567
+ _ = 0xcafebabe
+ _ = 0.
+ _ = .0
+ _ = 3.14159265
+ _ = 1e0
+ _ = 1e+100
+ _ = 1e-100
+ _ = 2.71828e-1000
+ _ = 0i
+ _ = 1i
+ _ = 012345678901234567889i
+ _ = 123456789012345678890i
+ _ = 0.i
+ _ = .0i
+ _ = 3.14159265i
+ _ = 1e0i
+ _ = 1e+100i
+ _ = 1e-100i
+ _ = 2.71828e-1000i
+ _ = 'a'
+ _ = '\000'
+ _ = '\xFF'
+ _ = '\uff16'
+ _ = '\U0000ff16'
+ _ = `foobar`
+ _ = `foo
+---
+---
+bar`
+)
+
+
+func _() {
+ type _ int
+ type _ *int
+ type _ []int
+ type _ map[string]int
+ type _ chan int
+ type _ func() int
+
+ var _ int
+ var _ *int
+ var _ []int
+ var _ map[string]int
+ var _ chan int
+ var _ func() int
+
+ type _ struct{}
+ type _ *struct{}
+ type _ []struct{}
+ type _ map[string]struct{}
+ type _ chan struct{}
+ type _ func() struct{}
+
+ type _ interface{}
+ type _ *interface{}
+ type _ []interface{}
+ type _ map[string]interface{}
+ type _ chan interface{}
+ type _ func() interface{}
+
+ var _ struct{}
+ var _ *struct{}
+ var _ []struct{}
+ var _ map[string]struct{}
+ var _ chan struct{}
+ var _ func() struct{}
+
+ var _ interface{}
+ var _ *interface{}
+ var _ []interface{}
+ var _ map[string]interface{}
+ var _ chan interface{}
+ var _ func() interface{}
+}
+
+
+// don't lose blank lines in grouped declarations
+const (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ = iota
+ _
+
+ // a comment
+ _
+
+ _
+)
+
+
+type (
+ _ int
+ _ struct {}
+
+ _ interface{}
+
+ // a comment
+ _ map[string]int
+)
+
+
+var (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ bool
+
+ // a comment
+ _ bool
+)
+
+
+// don't lose blank lines in this struct
+type _ struct {
+ String struct {
+ Str, Len int
+ }
+ Slice struct {
+ Array, Len, Cap int
+ }
+ Eface struct {
+ Typ, Ptr int
+ }
+
+ UncommonType struct {
+ Name, PkgPath int
+ }
+ CommonType struct {
+ Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+ }
+ Type struct {
+ Typ, Ptr int
+ }
+ StructField struct {
+ Name, PkgPath, Typ, Tag, Offset int
+ }
+ StructType struct {
+ Fields int
+ }
+ PtrType struct {
+ Elem int
+ }
+ SliceType struct {
+ Elem int
+ }
+ ArrayType struct {
+ Elem, Len int
+ }
+
+ Stktop struct {
+ Stackguard, Stackbase, Gobuf int
+ }
+ Gobuf struct {
+ Sp, Pc, G int
+ }
+ G struct {
+ Stackbase, Sched, Status, Alllink int
+ }
+}
+
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{ }
+type _ struct {
+
+}
+
+type _ interface{ }
+type _ interface {
+
+}
+
+
+// no tabs for single or ungrouped decls
+func _() {
+ const xxxxxx = 0
+ type x int
+ var xxx int
+ var yyyy float = 3.14
+ var zzzzz = "bar"
+
+ const (
+ xxxxxx = 0
+ )
+ type (
+ x int
+ )
+ var (
+ xxx int
+ )
+ var (
+ yyyy float = 3.14
+ )
+ var (
+ zzzzz = "bar"
+ )
+}
+
+// tabs for multiple or grouped decls
+func _() {
+ // no entry has a type
+ const (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // some entries have a type
+ const (
+ xxxxxx = 1
+ x = 2
+ xxx = 3
+ yyyyyyyy float = iota
+ yyyy = "bar"
+ yyy
+ yy = 2
+ )
+}
+
+func _() {
+ // no entry has a type
+ var (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // no entry has a value
+ var (
+ _ int
+ _ float
+ _ string
+
+ _ int // comment
+ _ float // comment
+ _ string // comment
+ )
+ // some entries have a type
+ var (
+ xxxxxx int
+ x float
+ xxx string
+ yyyyyyyy int = 1234
+ y float = 3.14
+ yyyy = "bar"
+ yyy string = "foo"
+ )
+ // mixed entries - all comments should be aligned
+ var (
+ a, b, c int
+ x = 10
+ d int // comment
+ y = 20 // comment
+ f, ff, fff, ffff int = 0, 1, 2, 3 // comment
+ )
+ // respect original line breaks
+ var _ = []T {
+ T{0x20, "Telugu"},
+ }
+ var _ = []T {
+ // respect original line breaks
+ T{0x20, "Telugu"},
+ }
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+ short = 2 * (
+ 1 + 2)
+ aMuchLongerName = 3
+)
+
+var (
+ short = X{
+ }
+ aMuchLongerName = X{}
+
+ x1 = X{} // foo
+ x2 = X{
+ } // foo
+)
+
+func _() {
+ type (
+ xxxxxx int
+ x float
+ xxx string
+ xxxxx []x
+ xx struct{}
+ xxxxxxx struct {
+ _, _ int
+ _ float
+ }
+ xxxx chan<- string
+ )
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+
+// formatting of structs
+type _ struct{}
+
+type _ struct{ /* this comment should be visible */ }
+
+type _ struct{
+ // this comment should be visible and properly indented
+}
+
+type _ struct { // this comment must not change indentation
+ f int
+ f, ff, fff, ffff int
+}
+
+type _ struct {
+ string
+}
+
+type _ struct {
+ string // comment
+}
+
+type _ struct {
+ string "tag"
+}
+
+type _ struct {
+ string "tag" // comment
+}
+
+type _ struct {
+ f int
+}
+
+type _ struct {
+ f int // comment
+}
+
+type _ struct {
+ f int "tag"
+}
+
+type _ struct {
+ f int "tag" // comment
+}
+
+type _ struct {
+ bool
+ a, b, c int
+ int "tag"
+ ES // comment
+ float "tag" // comment
+ f int // comment
+ f, ff, fff, ffff int // comment
+ g float "tag"
+ h float "tag" // comment
+}
+
+type _ struct { a, b,
+c, d int // this line should be indented
+u, v, w, x float // this line should be indented
+p, q,
+r, s float // this line should be indented
+}
+
+
+// difficult cases
+type _ struct {
+ bool // comment
+ text []byte // comment
+}
+
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+ EI
+}
+
+type _ interface {
+ f()
+ fffff()
+}
+
+type _ interface {
+ EI
+ f()
+ fffffg()
+}
+
+type _ interface { // this comment must not change indentation
+ EI // here's a comment
+ f() // no blank between identifier and ()
+ fffff() // no blank between identifier and ()
+ gggggggggggg(x, y, z int) () // hurray
+}
+
+
+// formatting of variable declarations
+func _() {
+ type day struct { n int; short, long string }
+ var (
+ Sunday = day{ 0, "SUN", "Sunday" }
+ Monday = day{ 1, "MON", "Monday" }
+ Tuesday = day{ 2, "TUE", "Tuesday" }
+ Wednesday = day{ 3, "WED", "Wednesday" }
+ Thursday = day{ 4, "THU", "Thursday" }
+ Friday = day{ 5, "FRI", "Friday" }
+ Saturday = day{ 6, "SAT", "Saturday" }
+ )
+}
+
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int // all on one line
+
+var a2, b2,
+c2 int // this line should be indented
+
+var (a3, b3,
+c3, d3 int // this line should be indented
+a4, b4, c4 int // this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+ var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+ Headers: map[string]string{},
+ Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+ 0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+ 0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+ 0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+ },
+ }
+}
+
+
+func _() {
+ var Universe = Scope {
+ Names: map[string]*Ident {
+ // basic types
+ "bool": nil,
+ "byte": nil,
+ "int8": nil,
+ "int16": nil,
+ "int32": nil,
+ "int64": nil,
+ "uint8": nil,
+ "uint16": nil,
+ "uint32": nil,
+ "uint64": nil,
+ "float32": nil,
+ "float64": nil,
+ "string": nil,
+
+ // convenience types
+ "int": nil,
+ "uint": nil,
+ "uintptr": nil,
+ "float": nil,
+
+ // constants
+ "false": nil,
+ "true": nil,
+ "iota": nil,
+ "nil": nil,
+
+ // functions
+ "cap": nil,
+ "len": nil,
+ "new": nil,
+ "make": nil,
+ "panic": nil,
+ "panicln": nil,
+ "print": nil,
+ "println": nil,
+ },
+ }
+}
+
+
+// alignment of map composite entries
+var _ = map[int]int{
+ // small key sizes: always align even if size ratios are large
+ a: a,
+ abcdefghabcdefgh: a,
+ ab: a,
+ abc: a,
+ abcdefgabcdefg: a,
+ abcd: a,
+ abcde: a,
+ abcdef: a,
+
+ // mixed key sizes: align when key sizes change within accepted ratio
+ abcdefgh: a,
+ abcdefghabcdefg: a,
+ abcdefghij: a,
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
+
+ ab: a, // do not align with previous line
+ abcde: a, // align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+
+func _() {
+ var _ = T{
+ a, // must introduce trailing comma
+ }
+}
+
+
+// formatting of function results
+func _() func() {}
+func _() func(int) { return nil }
+func _() func(int) int { return nil }
+func _() func(int) func(int) func() { return nil }
+
+
+// formatting of consecutive single-line functions
+func _() {}
+func _() {}
+func _() {}
+
+func _() {} // an empty line before this function
+func _() {}
+func _() {}
+
+func _() { f(1, 2, 3) }
+func _(x int) int { y := x; return y+1 }
+func _() int { type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+ f(1, 2, 3)
+}
+func _(x int) int {
+ y := x; return y+1
+}
+func _() int {
+ type T struct{}; var x T; return x
+}
+
+
+// making function declarations safe for new semicolon rules
+func _() { /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */ }
+
+func _() {
+/* multi-line func because block is on multiple lines */ }
+
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+int) {
+}
+func _(x bool,
+y int) {
+}
+func _(x,
+y bool) {
+}
+func _(bool, // comment
+int) {
+}
+func _(x bool, // comment
+y int) {
+}
+func _(x, // comment
+y bool) {
+}
+func _(bool, // comment
+// comment
+int) {
+}
+func _(x bool, // comment
+// comment
+y int) {
+}
+func _(x, // comment
+// comment
+y bool) {
+}
+func _(bool,
+// comment
+int) {
+}
+func _(x bool,
+// comment
+y int) {
+}
+func _(x,
+// comment
+y bool) {
+}
+func _(x, // comment
+y,// comment
+z bool) {
+}
+func _(x, // comment
+ y,// comment
+ z bool) {
+}
+func _(x int, // comment
+ y float, // comment
+ z bool) {
+}
+
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+stat chan<- *TargetInfo,
+TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+a, b, c int,
+) {}
+
+func MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+) {}
+
+func MultiLineSignature2(
+a, b,
+c int,
+) {}
+
+func MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int) {}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+MultiLineSignature0(
+a, b, c int,
+)
+
+MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+)
+
+MultiLineSignature2(
+a, b,
+c int,
+)
+
+MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int)
+
+MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int)
+
+MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _((int))
+func _((((((int))))))
+func _(x (int))
+func _(x (((((int))))))
+func _(x, y (int))
+func _(x, y (((((int))))))
+
+func _() (int)
+func _() ((int))
+func _() ((((((int))))))
+
+func _() (x int)
+func _() (x (int))
+func _() (x (((((int))))))
+
+// special cases: some channel types require parentheses
+func _(x chan(<-chan int))
+func _(x (chan(<-chan int)))
+func _(x ((((chan(<-chan int))))))
+
+func _(x chan<-(chan int))
+func _(x (chan<-(chan int)))
+func _(x ((((chan<-(chan int))))))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+ m()
+}) // no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+ x int
+ y int
+}) // no extra comma between } and )
diff --git a/src/go/printer/testdata/empty.golden b/src/go/printer/testdata/empty.golden
new file mode 100644
index 000000000..a055f4758
--- /dev/null
+++ b/src/go/printer/testdata/empty.golden
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/src/go/printer/testdata/empty.input b/src/go/printer/testdata/empty.input
new file mode 100644
index 000000000..a055f4758
--- /dev/null
+++ b/src/go/printer/testdata/empty.input
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/src/go/printer/testdata/expressions.golden b/src/go/printer/testdata/expressions.golden
new file mode 100644
index 000000000..fbe8275b3
--- /dev/null
+++ b/src/go/printer/testdata/expressions.golden
@@ -0,0 +1,681 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a + 0)
+ _ = a + b
+ _ = a + b + c
+ _ = a + b - c
+ _ = a - b - c
+ _ = a + (b * c)
+ _ = a + (b / c)
+ _ = a - (b % c)
+ _ = 1 + a
+ _ = a + 1
+ _ = a + b + 1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0] << 1
+ _ = (s[0] << 1) & 0xf
+ _ = s[0]<<2 | s[1]>>4
+ _ = "foo" + s
+ _ = s + "foo"
+ _ = 'a' + 'b'
+ _ = len(s) / 2
+ _ = len(t0.x) / a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[:b-c]
+ _ = s[a+b:]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1:]
+ _ = s[a+b : len(s)]
+ _ = s[len(s):-a]
+ _ = s[a : len(s)+1]
+ _ = s[a:len(s)+1] + s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2*longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x*t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a + b + c) * 2
+ _ = a - b + c - d + (a + b + c) + d&e
+ _ = under_bar - 1
+ _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a + b
+ a + b + c
+ a + (b * c)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a:b : c+d]
+ _ = x[a : b+d : c]
+ _ = x[a : b+d : c+d]
+ _ = x[a+d : b:c]
+ _ = x[a+d : b : c+d]
+ _ = x[a+d : b+d : c]
+ _ = x[a+d : b+d : c+d]
+
+ _ = x[:b:c]
+ _ = x[:b : c+d]
+ _ = x[:b+d : c]
+ _ = x[:b+d : c+d]
+}
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
+
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
+
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize - 1
+
+ buf = make(x, 2*cap(b.buf)+n)
+
+ dst[i*3+2] = dbuf[0] << 2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0 : b.off+m+n]
+ b.buf = b.buf[0 : b.off+m*n]
+ f(b.buf[0 : b.off+m+n])
+
+ signed += ' ' * 8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2 + t0
+ z1 = (t1 + t0>>w2) >> w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1 << z) | (x0 >> (uint(w) - z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
+
+ a, b = b, a
+ a = b + c
+ a = b*c + d
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42....)
+ f(5, 42....)
+ f(6, 42.0...)
+ f(7, 42.0...)
+ f(8, .42...)
+ f(9, .42...)
+ f(10, 42e0...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42..x
+ _ = 42..x
+ _ = 42.0.x
+ _ = 42.0.x
+ _ = .42.x
+ _ = .42.x
+ _ = 42e0.x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x / *y
+ _ = x < -1
+ _ = x < <-1
+ _ = x + +1
+ _ = x - -1
+ _ = x & &x
+ _ = x & ^x
+
+ _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
+}
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+ _ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+ var _ = ``
+ var _ = `foo`
+ var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = /* comment */ ``
+ var _ = /* comment */ `foo`
+ var _ = /* comment */ `foo
+bar`
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+ var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ })
+}
+
+func _() {
+ _ = [][]int{
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string{
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a +
+ b +
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3+
+ 4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar().(*Type)
+
+ _ = new(T).
+ foo().
+ bar().(*Type).
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"].
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table["foo"].
+ Blob.(*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.(T).
+ c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
diff --git a/src/go/printer/testdata/expressions.input b/src/go/printer/testdata/expressions.input
new file mode 100644
index 000000000..f4d20fa0f
--- /dev/null
+++ b/src/go/printer/testdata/expressions.input
@@ -0,0 +1,710 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a+0)
+ _ = a+b
+ _ = a+b+c
+ _ = a+b-c
+ _ = a-b-c
+ _ = a+(b*c)
+ _ = a+(b/c)
+ _ = a-(b%c)
+ _ = 1+a
+ _ = a+1
+ _ = a+b+1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0]<<1
+ _ = (s[0]<<1)&0xf
+ _ = s[0] << 2 | s[1] >> 4
+ _ = "foo"+s
+ _ = s+"foo"
+ _ = 'a'+'b'
+ _ = len(s)/2
+ _ = len(t0.x)/a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[: b-c]
+ _ = s[a+b :]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1 :]
+ _ = s[a+b : len(s)]
+ _ = s[len(s) : -a]
+ _ = s[a : len(s)+1]
+ _ = s[a : len(s)+1]+s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2 * longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x * t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a+b+c)*2
+ _ = a - b + c - d + (a+b+c) + d&e
+ _ = under_bar-1
+ _ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a+b
+ a+b+c
+ a+(b*c)
+ a+(b/c)
+ 1+a
+ a+1
+ s[a]
+ x<<1
+ (s[0]<<1)&0xf
+ "foo"+s
+ x == y
+ x < y || z > 42
+}
+
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a:b:c+d]
+ _ = x[a:b+d:c]
+ _ = x[a:b+d:c+d]
+ _ = x[a+d:b:c]
+ _ = x[a+d:b:c+d]
+ _ = x[a+d:b+d:c]
+ _ = x[a+d:b+d:c+d]
+
+ _ = x[:b:c]
+ _ = x[:b:c+d]
+ _ = x[:b+d:c]
+ _ = x[:b+d:c+d]
+}
+
+func _() {
+ _ = a+b
+ _ = a+b+c
+ _ = a+b*c
+ _ = a+(b*c)
+ _ = (a+b)*c
+ _ = a+(b*c*d)
+ _ = a+(b*c+d)
+
+ _ = 1<<x
+ _ = -1<<x
+ _ = 1<<x-1
+ _ = -1<<x-1
+
+ _ = f(a+b)
+ _ = f(a+b+c)
+ _ = f(a+b*c)
+ _ = f(a+(b*c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize-1
+
+ buf = make(x, 2*cap(b.buf) + n)
+
+ dst[i*3+2] = dbuf[0]<<2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0:b.off+m+n]
+ b.buf = b.buf[0:b.off+m*n]
+ f(b.buf[0:b.off+m+n])
+
+ signed += ' '*8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2+t0
+ z1 = (t1+t0>>w2)>>w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1<<z)|(x0>>(uint(w)-z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0:len(buf)+1]
+ _ = buf[0:n+1]
+
+ a,b = b,a
+ a = b+c
+ a = b*c+d
+ _ = a*b+c
+ _ = a-b-c
+ _ = a-(b-c)
+ _ = a-b*c
+ _ = a-(b*c)
+ _ = a*b/c
+ _ = a/ *b
+ _ = x[a|^b]
+ _ = x[a/ *b]
+ _ = a& ^b
+ _ = a+ +b
+ _ = a- -b
+ _ = x[a*-b]
+ _ = x[a+ +b]
+ _ = x^y^z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName)*2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42. ...)
+ f(5, 42....)
+ f(6, 42.0 ...)
+ f(7, 42.0...)
+ f(8, .42 ...)
+ f(9, .42...)
+ f(10, 42e0 ...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42. .x
+ _ = 42..x
+ _ = 42.0 .x
+ _ = 42.0.x
+ _ = .42 .x
+ _ = .42.x
+ _ = 42e0 .x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x/ *y
+ _ = x< -1
+ _ = x< <-1
+ _ = x+ +1
+ _ = x- -1
+ _ = x& &x
+ _ = x& ^x
+
+ _ = f(x/ *y, x< -1, x< <-1, x+ +1, x- -1, x& &x, x& ^x)
+}
+
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct { int } }{struct{ int}{0} }
+}
+
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+_ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+
+var _ =
+ ``
+var _ =
+ `foo`
+var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+
+var _ = /* comment */ ``
+var _ = /* comment */ `foo`
+var _ = /* comment */ `foo
+bar`
+
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+
+var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x+y)/2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x+y)/2; return x < y }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x+y)/2; return x < y })
+}
+
+
+func _() {
+ _ = [][]int {
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int {
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int {
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int {{1}, {1, 2}, {1, 2, 3}}
+}
+
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string {
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+
+const _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+
+const _ = `datafmt "datafmt";` +
+`default = "%v";` +
+`array = *;` +
+`datafmt.T3 = s {" " a a / ","};`
+
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a+
+ b+
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3)// comment
+ f(1,
+ 2,
+ 3,// comment
+ )
+}
+
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3 +
+4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar() . (*Type)
+
+ _ = new(T).
+foo().
+bar().(*Type).
+baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"] .
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table ["foo"].
+ Blob. (*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.
+ (T).
+ c
+}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = func()()(nil)
+ _ = func(x int)(float)(nil)
+ _ = func() func() func()()(nil)
+
+ _ = (func()())(nil)
+ _ = (func(x int)(float))(nil)
+ _ = (func() func() func()())(nil)
+}
diff --git a/src/go/printer/testdata/expressions.raw b/src/go/printer/testdata/expressions.raw
new file mode 100644
index 000000000..97bc81dad
--- /dev/null
+++ b/src/go/printer/testdata/expressions.raw
@@ -0,0 +1,681 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a + 0)
+ _ = a + b
+ _ = a + b + c
+ _ = a + b - c
+ _ = a - b - c
+ _ = a + (b * c)
+ _ = a + (b / c)
+ _ = a - (b % c)
+ _ = 1 + a
+ _ = a + 1
+ _ = a + b + 1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0] << 1
+ _ = (s[0] << 1) & 0xf
+ _ = s[0]<<2 | s[1]>>4
+ _ = "foo" + s
+ _ = s + "foo"
+ _ = 'a' + 'b'
+ _ = len(s) / 2
+ _ = len(t0.x) / a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[:b-c]
+ _ = s[a+b:]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1:]
+ _ = s[a+b : len(s)]
+ _ = s[len(s):-a]
+ _ = s[a : len(s)+1]
+ _ = s[a:len(s)+1] + s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2*longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x*t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a + b + c) * 2
+ _ = a - b + c - d + (a + b + c) + d&e
+ _ = under_bar - 1
+ _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a + b
+ a + b + c
+ a + (b * c)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a:b : c+d]
+ _ = x[a : b+d : c]
+ _ = x[a : b+d : c+d]
+ _ = x[a+d : b:c]
+ _ = x[a+d : b : c+d]
+ _ = x[a+d : b+d : c]
+ _ = x[a+d : b+d : c+d]
+
+ _ = x[:b:c]
+ _ = x[:b : c+d]
+ _ = x[:b+d : c]
+ _ = x[:b+d : c+d]
+}
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
+
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
+
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize - 1
+
+ buf = make(x, 2*cap(b.buf)+n)
+
+ dst[i*3+2] = dbuf[0] << 2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0 : b.off+m+n]
+ b.buf = b.buf[0 : b.off+m*n]
+ f(b.buf[0 : b.off+m+n])
+
+ signed += ' ' * 8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2 + t0
+ z1 = (t1 + t0>>w2) >> w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1 << z) | (x0 >> (uint(w) - z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
+
+ a, b = b, a
+ a = b + c
+ a = b*c + d
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42....)
+ f(5, 42....)
+ f(6, 42.0...)
+ f(7, 42.0...)
+ f(8, .42...)
+ f(9, .42...)
+ f(10, 42e0...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42..x
+ _ = 42..x
+ _ = 42.0.x
+ _ = 42.0.x
+ _ = .42.x
+ _ = .42.x
+ _ = 42e0.x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x / *y
+ _ = x < -1
+ _ = x < <-1
+ _ = x + +1
+ _ = x - -1
+ _ = x & &x
+ _ = x & ^x
+
+ _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
+}
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+ _ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+ var _ = ``
+ var _ = `foo`
+ var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = /* comment */ ``
+ var _ = /* comment */ `foo`
+ var _ = /* comment */ `foo
+bar`
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+ var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ })
+}
+
+func _() {
+ _ = [][]int{
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string{
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a +
+ b +
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3+
+ 4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar().(*Type)
+
+ _ = new(T).
+ foo().
+ bar().(*Type).
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"].
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table["foo"].
+ Blob.(*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.(T).
+ c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
diff --git a/src/go/printer/testdata/linebreaks.golden b/src/go/printer/testdata/linebreaks.golden
new file mode 100644
index 000000000..006cf1718
--- /dev/null
+++ b/src/go/printer/testdata/linebreaks.golden
@@ -0,0 +1,275 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ &writerTest{
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1246508266,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ &writerTestEntry{
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1245217492,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ &writerTest{
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ Mtime: 1254699560,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // no contents
+ },
+ },
+ },
+}
+
+type untarTest struct {
+ file string
+ headers []*Header
+}
+
+var untarTests = []*untarTest{
+ &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244428340,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244436044,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/star.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ },
+ },
+}
+
+var facts = map[int]string{
+ 0: "1",
+ 1: "1",
+ 2: "2",
+ 10: "3628800",
+ 20: "2432902008176640000",
+ 100: "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000",
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr,
+ // TODO(gri): the 2nd string of this string list should not be indented
+ "usage: godoc package [name ...]\n"+
+ " godoc -http=:6060\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file, os.O_RDONLY, 0444)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(hdr, header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+ }
+ f.Close()
+ }
+}
+
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {
+}
+func _(x T,
+ y T,
+) {
+}
+func _(
+ x T,
+ y T) {
+}
+func _(
+ x T,
+ y T,
+) {
+}
+
+// Example from issue 2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/src/go/printer/testdata/linebreaks.input b/src/go/printer/testdata/linebreaks.input
new file mode 100644
index 000000000..e782bb044
--- /dev/null
+++ b/src/go/printer/testdata/linebreaks.input
@@ -0,0 +1,271 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ &writerTest{
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1246508266,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ &writerTestEntry{
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1245217492,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ &writerTest{
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ Mtime: 1254699560,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // no contents
+ },
+ },
+ },
+}
+
+type untarTest struct {
+ file string
+ headers []*Header
+}
+
+var untarTests = []*untarTest{
+ &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244428340,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244436044,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/star.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ },
+ },
+}
+
+var facts = map[int] string {
+ 0: "1",
+ 1: "1",
+ 2: "2",
+ 10: "3628800",
+ 20: "2432902008176640000",
+ 100: "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000",
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr,
+ // TODO(gri): the 2nd string of this string list should not be indented
+ "usage: godoc package [name ...]\n" +
+ " godoc -http=:6060\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file, os.O_RDONLY, 0444)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(hdr, header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+ }
+ f.Close()
+ }
+}
+
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {}
+func _(x T,
+ y T,
+) {}
+func _(
+ x T,
+ y T) {}
+func _(
+ x T,
+ y T,
+) {}
+
+// Example from issue 2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/src/go/printer/testdata/parser.go b/src/go/printer/testdata/parser.go
new file mode 100644
index 000000000..dba8bbd43
--- /dev/null
+++ b/src/go/printer/testdata/parser.go
@@ -0,0 +1,2153 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+
+package parser
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+)
+
+// The mode parameter to the Parse* functions is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+//
+const (
+ PackageClauseOnly uint = 1 << iota // parsing stops after package clause
+ ImportsOnly // parsing stops after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+ file *token.File
+ scanner.ErrorVector
+ scanner scanner.Scanner
+
+ // Tracing/debugging
+ mode uint // parsing mode
+ trace bool // == (mode & Trace != 0)
+ indent uint // indentation used for tracing output
+
+ // Comments
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ // Next token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+
+ // Non-syntactic parser control
+ exprLev int // < 0: in control clause, >= 0: in expression
+
+ // Ordinary identifier scopes
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
+ // Label scope
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+ targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+// scannerMode returns the scanner mode bits given the parser's mode bits.
+func scannerMode(mode uint) uint {
+ var m uint = scanner.InsertSemis
+ if mode&ParseComments != 0 {
+ m |= scanner.ScanComments
+ }
+ return m
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
+ p.file = fset.AddFile(filename, fset.Base(), len(src))
+ p.scanner.Init(p.file, src, p, scannerMode(mode))
+
+ p.mode = mode
+ p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
+ p.next()
+
+ // set up the pkgScope here (as opposed to in parseFile) because
+ // there are other parser entry points (ParseExpr, etc.)
+ p.openScope()
+ p.pkgScope = p.topScope
+
+ // for the same reason, set up a label scope
+ p.openLabelScope()
+}
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+ p.topScope = ast.NewScope(p.topScope)
+}
+
+func (p *parser) closeScope() {
+ p.topScope = p.topScope.Outer
+}
+
+func (p *parser) openLabelScope() {
+ p.labelScope = ast.NewScope(p.labelScope)
+ p.targetStack = append(p.targetStack, nil)
+}
+
+func (p *parser) closeLabelScope() {
+ // resolve labels
+ n := len(p.targetStack) - 1
+ scope := p.labelScope
+ for _, ident := range p.targetStack[n] {
+ ident.Obj = scope.Lookup(ident.Name)
+ if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+ p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+ }
+ }
+ // pop label scope
+ p.targetStack = p.targetStack[0:n]
+ p.labelScope = p.labelScope.Outer
+}
+
+func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+ }
+ p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ ident.Obj = obj
+ }
+ }
+}
+
+func (p *parser) shortVarDecl(idents []*ast.Ident) {
+ // Go spec: A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block with
+ // the same type, and at least one of the non-blank variables is new.
+ n := 0 // number of new variables
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // short var declarations cannot have redeclaration errors
+ // and are not global => no need to remember the respective
+ // declaration
+ alt := p.topScope.Insert(obj)
+ if alt == nil {
+ n++ // new declaration
+ alt = obj
+ }
+ ident.Obj = alt
+ }
+ }
+ if n == 0 && p.mode&DeclarationErrors != 0 {
+ p.error(idents[0].Pos(), "no new variables on left side of :=")
+ }
+}
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+func (p *parser) resolve(x ast.Expr) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name == "_" {
+ return
+ }
+ // try to resolve the identifier
+ for s := p.topScope; s != nil; s = s.Outer {
+ if obj := s.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return
+ }
+ }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...interface{}) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
+ ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = uint(len(dots))
+ pos := p.file.Position(p.pos)
+ fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+ i := 2 * p.indent
+ for ; i > n; i -= n {
+ fmt.Print(dots)
+ }
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."));
+func un(p *parser) {
+ p.indent--
+ p.printTrace(")")
+}
+
+// Advance to the next token.
+func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+ // when tracing as it provides a more readable output. The
+ // very first token (!p.pos.IsValid()) is not initialized
+ // (it is token.ILLEGAL), so don't print it .
+ if p.trace && p.pos.IsValid() {
+ s := p.tok.String()
+ switch {
+ case p.tok.IsLiteral():
+ p.printTrace(s, p.lit)
+ case p.tok.IsOperator(), p.tok.IsKeyword():
+ p.printTrace("\"" + s + "\"")
+ default:
+ p.printTrace(s)
+ }
+ }
+
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+ // /*-style comments may end on a different line than where they start.
+ // Scan the comment for '\n' chars and adjust endline accordingly.
+ endline = p.file.Line(p.pos)
+ if p.lit[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{p.pos, p.lit}
+ p.next0()
+
+ return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. An empty line or non-comment
+// token terminates a comment group.
+//
+func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.file.Line(p.pos)
+ for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// and line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+//
+func (p *parser) next() {
+ p.leadComment = nil
+ p.lineComment = nil
+ line := p.file.Line(p.pos) // current line
+ p.next0()
+
+ if p.tok == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ if p.file.Line(p.pos) == line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup()
+ if p.file.Line(p.pos) != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok == token.COMMENT {
+ comment, endline = p.consumeCommentGroup()
+ }
+
+ if endline+1 == p.file.Line(p.pos) {
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+}
+
+func (p *parser) error(pos token.Pos, msg string) {
+ p.Error(p.file.Position(pos), msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // the error happened at the current position;
+ // make the error message more specific
+ if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
+ msg += ", found newline"
+ } else {
+ msg += ", found '" + p.tok.String() + "'"
+ if p.tok.IsLiteral() {
+ msg += " " + p.lit
+ }
+ }
+ }
+ p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress
+ return pos
+}
+
+func (p *parser) expectSemi() {
+ if p.tok != token.RPAREN && p.tok != token.RBRACE {
+ p.expect(token.SEMICOLON)
+ }
+}
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+ pos := p.pos
+ name := "_"
+ if p.tok == token.IDENT {
+ name = p.lit
+ p.next()
+ } else {
+ p.expect(token.IDENT) // use expect() error handling
+ }
+ return &ast.Ident{pos, name, nil}
+}
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "IdentList"))
+ }
+
+ list = append(list, p.parseIdent())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseIdent())
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ExpressionList"))
+ }
+
+ list = append(list, p.parseExpr(lhs))
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseExpr(lhs))
+ }
+
+ return
+}
+
+func (p *parser) parseLhsList() []ast.Expr {
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ p.shortVarDecl(p.makeIdentList(list))
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ return list
+}
+
+func (p *parser) parseRhsList() []ast.Expr {
+ return p.parseExprList(false)
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Type"))
+ }
+
+ typ := p.tryType()
+
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+ }
+
+ return typ
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeName"))
+ }
+
+ ident := p.parseIdent()
+ // don't resolve ident yet - it may be a parameter or field name
+
+ if p.tok == token.PERIOD {
+ // ident is a package name
+ p.next()
+ p.resolve(ident)
+ sel := p.parseIdent()
+ return &ast.SelectorExpr{ident, sel}
+ }
+
+ return ident
+}
+
+func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "ArrayType"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ var len ast.Expr
+ if ellipsisOk && p.tok == token.ELLIPSIS {
+ len = &ast.Ellipsis{p.pos, nil}
+ p.next()
+ } else if p.tok != token.RBRACK {
+ len = p.parseRhs()
+ }
+ p.expect(token.RBRACK)
+ elt := p.parseType()
+
+ return &ast.ArrayType{lbrack, len, elt}
+}
+
+func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
+ idents := make([]*ast.Ident, len(list))
+ for i, x := range list {
+ ident, isIdent := x.(*ast.Ident)
+ if !isIdent {
+ pos := x.(ast.Expr).Pos()
+ p.errorExpected(pos, "identifier")
+ ident = &ast.Ident{pos, "_", nil}
+ }
+ idents[i] = ident
+ }
+ return idents
+}
+
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "FieldDecl"))
+ }
+
+ doc := p.leadComment
+
+ // fields
+ list, typ := p.parseVarList(false)
+
+ // optional tag
+ var tag *ast.BasicLit
+ if p.tok == token.STRING {
+ tag = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ }
+
+ // analyze case
+ var idents []*ast.Ident
+ if typ != nil {
+ // IdentifierList Type
+ idents = p.makeIdentList(list)
+ } else {
+ // ["*"] TypeName (AnonymousField)
+ typ = list[0] // we always have at least one element
+ p.resolve(typ)
+ if n := len(list); n > 1 || !isTypeName(deref(typ)) {
+ pos := typ.Pos()
+ p.errorExpected(pos, "anonymous field")
+ typ = &ast.BadExpr{pos, list[n-1].End()}
+ }
+ }
+
+ p.expectSemi() // call before accessing p.linecomment
+
+ field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ p.declare(field, scope, ast.Var, idents...)
+
+ return field
+}
+
+func (p *parser) parseStructType() *ast.StructType {
+ if p.trace {
+ defer un(trace(p, "StructType"))
+ }
+
+ pos := p.expect(token.STRUCT)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
+ var list []*ast.Field
+ for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+ // a field declaration cannot start with a '(' but we accept
+ // it here for more robust parsing and better error messages
+ // (parseFieldDecl will check and complain if necessary)
+ list = append(list, p.parseFieldDecl(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store struct scope in AST
+ return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+ if p.trace {
+ defer un(trace(p, "PointerType"))
+ }
+
+ star := p.expect(token.MUL)
+ base := p.parseType()
+
+ return &ast.StarExpr{star, base}
+}
+
+func (p *parser) tryVarType(isParam bool) ast.Expr {
+ if isParam && p.tok == token.ELLIPSIS {
+ pos := p.pos
+ p.next()
+ typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
+ if typ == nil {
+ p.error(pos, "'...' parameter is missing type")
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ if p.tok != token.RPAREN {
+ p.error(pos, "can use '...' with last parameter type only")
+ }
+ return &ast.Ellipsis{pos, typ}
+ }
+ return p.tryIdentOrType(false)
+}
+
+func (p *parser) parseVarType(isParam bool) ast.Expr {
+ typ := p.tryVarType(isParam)
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ return typ
+}
+
+func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "VarList"))
+ }
+
+ // a list of identifiers looks like a list of type names
+ for {
+ // parseVarType accepts any type (including parenthesized ones)
+ // even though the syntax does not permit them here: we
+ // accept them all for more robust parsing and complain
+ // afterwards
+ list = append(list, p.parseVarType(isParam))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ // if we had a list of identifiers, it must be followed by a type
+ typ = p.tryVarType(isParam)
+ if typ != nil {
+ p.resolve(typ)
+ }
+
+ return
+}
+
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
+ if p.trace {
+ defer un(trace(p, "ParameterList"))
+ }
+
+ list, typ := p.parseVarList(ellipsisOk)
+ if typ != nil {
+ // IdentifierList Type
+ idents := p.makeIdentList(list)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok == token.COMMA {
+ p.next()
+ }
+
+ for p.tok != token.RPAREN && p.tok != token.EOF {
+ idents := p.parseIdentList()
+ typ := p.parseVarType(ellipsisOk)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ } else {
+ // Type { "," Type } (anonymous parameters)
+ params = make([]*ast.Field, len(list))
+ for i, x := range list {
+ p.resolve(x)
+ params[i] = &ast.Field{Type: x}
+ }
+ }
+
+ return
+}
+
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Parameters"))
+ }
+
+ var params []*ast.Field
+ lparen := p.expect(token.LPAREN)
+ if p.tok != token.RPAREN {
+ params = p.parseParameterList(scope, ellipsisOk)
+ }
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.FieldList{lparen, params, rparen}
+}
+
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Result"))
+ }
+
+ if p.tok == token.LPAREN {
+ return p.parseParameters(scope, false)
+ }
+
+ typ := p.tryType()
+ if typ != nil {
+ list := make([]*ast.Field, 1)
+ list[0] = &ast.Field{Type: typ}
+ return &ast.FieldList{List: list}
+ }
+
+ return nil
+}
+
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
+ if p.trace {
+ defer un(trace(p, "Signature"))
+ }
+
+ params = p.parseParameters(scope, true)
+ results = p.parseResult(scope)
+
+ return
+}
+
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
+ if p.trace {
+ defer un(trace(p, "FuncType"))
+ }
+
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+ params, results := p.parseSignature(scope)
+
+ return &ast.FuncType{pos, params, results}, scope
+}
+
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "MethodSpec"))
+ }
+
+ doc := p.leadComment
+ var idents []*ast.Ident
+ var typ ast.Expr
+ x := p.parseTypeName()
+ if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
+ // method
+ idents = []*ast.Ident{ident}
+ scope := ast.NewScope(nil) // method scope
+ params, results := p.parseSignature(scope)
+ typ = &ast.FuncType{token.NoPos, params, results}
+ } else {
+ // embedded interface
+ typ = x
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ p.declare(spec, scope, ast.Fun, idents...)
+
+ return spec
+}
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+ if p.trace {
+ defer un(trace(p, "InterfaceType"))
+ }
+
+ pos := p.expect(token.INTERFACE)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
+ var list []*ast.Field
+ for p.tok == token.IDENT {
+ list = append(list, p.parseMethodSpec(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store interface scope in AST
+ return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parseMapType() *ast.MapType {
+ if p.trace {
+ defer un(trace(p, "MapType"))
+ }
+
+ pos := p.expect(token.MAP)
+ p.expect(token.LBRACK)
+ key := p.parseType()
+ p.expect(token.RBRACK)
+ value := p.parseType()
+
+ return &ast.MapType{pos, key, value}
+}
+
+func (p *parser) parseChanType() *ast.ChanType {
+ if p.trace {
+ defer un(trace(p, "ChanType"))
+ }
+
+ pos := p.pos
+ dir := ast.SEND | ast.RECV
+ if p.tok == token.CHAN {
+ p.next()
+ if p.tok == token.ARROW {
+ p.next()
+ dir = ast.SEND
+ }
+ } else {
+ p.expect(token.ARROW)
+ p.expect(token.CHAN)
+ dir = ast.RECV
+ }
+ value := p.parseType()
+
+ return &ast.ChanType{pos, dir, value}
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+ case token.LBRACK:
+ return p.parseArrayType(ellipsisOk)
+ case token.STRUCT:
+ return p.parseStructType()
+ case token.MUL:
+ return p.parsePointerType()
+ case token.FUNC:
+ typ, _ := p.parseFuncType()
+ return typ
+ case token.INTERFACE:
+ return p.parseInterfaceType()
+ case token.MAP:
+ return p.parseMapType()
+ case token.CHAN, token.ARROW:
+ return p.parseChanType()
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ typ := p.parseType()
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, typ, rparen}
+ }
+
+ // no type found
+ return nil
+}
+
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType(false)
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "StatementList"))
+ }
+
+ for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseStmt())
+ }
+
+ return
+}
+
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "Body"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.topScope = scope // open function scope
+ p.openLabelScope()
+ list := p.parseStmtList()
+ p.closeLabelScope()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "BlockStmt"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.openScope()
+ list := p.parseStmtList()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "FuncTypeOrLit"))
+ }
+
+ typ, scope := p.parseFuncType()
+ if p.tok != token.LBRACE {
+ // function type only
+ return typ
+ }
+
+ p.exprLev++
+ body := p.parseBody(scope)
+ p.exprLev--
+
+ return &ast.FuncLit{typ, body}
+}
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
+//
+func (p *parser) parseOperand(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Operand"))
+ }
+
+ switch p.tok {
+ case token.IDENT:
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
+
+ case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+ x := &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ return x
+
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ p.exprLev++
+ x := p.parseRhs()
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, x, rparen}
+
+ case token.FUNC:
+ return p.parseFuncTypeOrLit()
+
+ default:
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
+ }
+ }
+
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+}
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Selector"))
+ }
+
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{x, sel}
+}
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
+ }
+
+ p.expect(token.LPAREN)
+ var typ ast.Expr
+ if p.tok == token.TYPE {
+ // type switch: typ == nil
+ p.next()
+ } else {
+ typ = p.parseType()
+ }
+ p.expect(token.RPAREN)
+
+ return &ast.TypeAssertExpr{x, typ}
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "IndexOrSlice"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ p.exprLev++
+ var low, high ast.Expr
+ isSlice := false
+ if p.tok != token.COLON {
+ low = p.parseRhs()
+ }
+ if p.tok == token.COLON {
+ isSlice = true
+ p.next()
+ if p.tok != token.RBRACK {
+ high = p.parseRhs()
+ }
+ }
+ p.exprLev--
+ rbrack := p.expect(token.RBRACK)
+
+ if isSlice {
+ return &ast.SliceExpr{x, lbrack, low, high, rbrack}
+ }
+ return &ast.IndexExpr{x, lbrack, low, rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+ if p.trace {
+ defer un(trace(p, "CallOrConversion"))
+ }
+
+ lparen := p.expect(token.LPAREN)
+ p.exprLev++
+ var list []ast.Expr
+ var ellipsis token.Pos
+ for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+ list = append(list, p.parseRhs())
+ if p.tok == token.ELLIPSIS {
+ ellipsis = p.pos
+ p.next()
+ }
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
+}
+
+func (p *parser) parseElement(keyOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Element"))
+ }
+
+ if p.tok == token.LBRACE {
+ return p.parseLiteralValue(nil)
+ }
+
+ x := p.parseExpr(keyOk) // don't resolve if map key
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ }
+ p.resolve(x) // not a map key
+ }
+
+ return x
+}
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ElementList"))
+ }
+
+ for p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseElement(true))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ return
+}
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "LiteralValue"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ var elts []ast.Expr
+ p.exprLev++
+ if p.tok != token.RBRACE {
+ elts = p.parseElementList()
+ }
+ p.exprLev--
+ rbrace := p.expect(token.RBRACE)
+ return &ast.CompositeLit{typ, lbrace, elts, rbrace}
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ case *ast.CompositeLit:
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.SelectorExpr:
+ case *ast.IndexExpr:
+ case *ast.SliceExpr:
+ case *ast.TypeAssertExpr:
+ if t.Type == nil {
+ // the form X.(type) is only allowed in type switch expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.CallExpr:
+ case *ast.StarExpr:
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.BinaryExpr:
+ default:
+ // all other nodes are not proper expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ return x
+}
+
+// isTypeName returns true iff x is a (qualified) TypeName.
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ default:
+ return false // all other nodes are not type names
+ }
+ return true
+}
+
+// isLiteralType returns true iff x is a legal composite literal type.
+func isLiteralType(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ case *ast.ArrayType:
+ case *ast.StructType:
+ case *ast.MapType:
+ default:
+ return false // all other nodes are not legal composite literal types
+ }
+ return true
+}
+
+// If x is of the form *T, deref returns T, otherwise it returns x.
+func deref(x ast.Expr) ast.Expr {
+ if p, isPtr := x.(*ast.StarExpr); isPtr {
+ x = p.X
+ }
+ return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+ if p, isParen := x.(*ast.ParenExpr); isParen {
+ x = unparen(p.X)
+ }
+ return x
+}
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+//
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.ArrayType:
+ if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+ p.error(len.Pos(), "expected array length, found '...'")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ }
+
+ // all other nodes are expressions or types
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "PrimaryExpr"))
+ }
+
+ x := p.parseOperand(lhs)
+L:
+ for {
+ switch p.tok {
+ case token.PERIOD:
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.next() // make progress
+ p.errorExpected(pos, "selector or type assertion")
+ x = &ast.BadExpr{pos, p.pos}
+ }
+ case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseIndexOrSlice(p.checkExpr(x))
+ case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseCallOrConversion(p.checkExprOrType(x))
+ case token.LBRACE:
+ if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseLiteralValue(x)
+ } else {
+ break L
+ }
+ default:
+ break L
+ }
+ lhs = false // no need to try to resolve again
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "UnaryExpr"))
+ }
+
+ switch p.tok {
+ case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
+ pos, op := p.pos, p.tok
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
+
+ case token.ARROW:
+ // channel type or receive expression
+ pos := p.pos
+ p.next()
+ if p.tok == token.CHAN {
+ p.next()
+ value := p.parseType()
+ return &ast.ChanType{pos, ast.RECV, value}
+ }
+
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
+
+ case token.MUL:
+ // pointer type or unary "*" expression
+ pos := p.pos
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.StarExpr{pos, p.checkExprOrType(x)}
+ }
+
+ return p.parsePrimaryExpr(lhs)
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "BinaryExpr"))
+ }
+
+ x := p.parseUnaryExpr(lhs)
+ for prec := p.tok.Precedence(); prec >= prec1; prec-- {
+ for p.tok.Precedence() == prec {
+ pos, op := p.pos, p.tok
+ p.next()
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
+ x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
+ }
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
+// should reject when a type/raw type is obviously not allowed
+func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+func (p *parser) parseRhs() ast.Expr {
+ return p.parseExpr(false)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SimpleStmt"))
+ }
+
+ x := p.parseLhsList()
+
+ switch p.tok {
+ case
+ token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+ token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+ token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+ token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+ // assignment statement
+ pos, tok := p.pos, p.tok
+ p.next()
+ y := p.parseRhsList()
+ return &ast.AssignStmt{x, pos, tok, y}
+ }
+
+ if len(x) > 1 {
+ p.errorExpected(x[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+
+ switch p.tok {
+ case token.COLON:
+ // labeled statement
+ colon := p.pos
+ p.next()
+ if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
+ // Go spec: The scope of a label is the body of the function
+ // in which it is declared and excludes the body of any nested
+ // function.
+ stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+ p.declare(stmt, p.labelScope, ast.Lbl, label)
+ return stmt
+ }
+ p.error(x[0].Pos(), "illegal label declaration")
+ return &ast.BadStmt{x[0].Pos(), colon + 1}
+
+ case token.ARROW:
+ // send statement
+ arrow := p.pos
+ p.next() // consume "<-"
+ y := p.parseRhs()
+ return &ast.SendStmt{x[0], arrow, y}
+
+ case token.INC, token.DEC:
+ // increment or decrement
+ s := &ast.IncDecStmt{x[0], p.pos, p.tok}
+ p.next() // consume "++" or "--"
+ return s
+ }
+
+ // expression
+ return &ast.ExprStmt{x[0]}
+}
+
+func (p *parser) parseCallExpr() *ast.CallExpr {
+ x := p.parseRhs()
+ if call, isCall := x.(*ast.CallExpr); isCall {
+ return call
+ }
+ p.errorExpected(x.Pos(), "function/method call")
+ return nil
+}
+
+func (p *parser) parseGoStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "GoStmt"))
+ }
+
+ pos := p.expect(token.GO)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 2} // len("go")
+ }
+
+ return &ast.GoStmt{pos, call}
+}
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "DeferStmt"))
+ }
+
+ pos := p.expect(token.DEFER)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 5} // len("defer")
+ }
+
+ return &ast.DeferStmt{pos, call}
+}
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+ if p.trace {
+ defer un(trace(p, "ReturnStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.RETURN)
+ var x []ast.Expr
+ if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+ x = p.parseRhsList()
+ }
+ p.expectSemi()
+
+ return &ast.ReturnStmt{pos, x}
+}
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+ if p.trace {
+ defer un(trace(p, "BranchStmt"))
+ }
+
+ pos := p.expect(tok)
+ var label *ast.Ident
+ if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+ label = p.parseIdent()
+ // add to list of unresolved targets
+ n := len(p.targetStack) - 1
+ p.targetStack[n] = append(p.targetStack[n], label)
+ }
+ p.expectSemi()
+
+ return &ast.BranchStmt{pos, tok, label}
+}
+
+func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
+ if s == nil {
+ return nil
+ }
+ if es, isExpr := s.(*ast.ExprStmt); isExpr {
+ return p.checkExpr(es.X)
+ }
+ p.error(s.Pos(), "expected condition, found simple statement")
+ return &ast.BadExpr{s.Pos(), s.End()}
+}
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+
+ pos := p.expect(token.IF)
+ p.openScope()
+ defer p.closeScope()
+
+ var s ast.Stmt
+ var x ast.Expr
+ {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ s = p.parseSimpleStmt(false)
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ x = p.makeExpr(s)
+ s = nil
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ var else_ ast.Stmt
+ if p.tok == token.ELSE {
+ p.next()
+ else_ = p.parseStmt()
+ } else {
+ p.expectSemi()
+ }
+
+ return &ast.IfStmt{pos, s, x, body, else_}
+}
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "TypeList"))
+ }
+
+ list = append(list, p.parseType())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseType())
+ }
+
+ return
+}
+
+func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
+ if p.trace {
+ defer un(trace(p, "CaseClause"))
+ }
+
+ pos := p.pos
+ var list []ast.Expr
+ if p.tok == token.CASE {
+ p.next()
+ if exprSwitch {
+ list = p.parseRhsList()
+ } else {
+ list = p.parseTypeList()
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ p.openScope()
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CaseClause{pos, list, colon, body}
+}
+
+func isExprSwitch(s ast.Stmt) bool {
+ if s == nil {
+ return true
+ }
+ if e, ok := s.(*ast.ExprStmt); ok {
+ if a, ok := e.X.(*ast.TypeAssertExpr); ok {
+ return a.Type != nil // regular type assertion
+ }
+ return true
+ }
+ return false
+}
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SwitchStmt"))
+ }
+
+ pos := p.expect(token.SWITCH)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.LBRACE {
+ s2 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ exprSwitch := isExprSwitch(s2)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCaseClause(exprSwitch))
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ if exprSwitch {
+ return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+ }
+ // type switch
+ // TODO(gri): do all the checks!
+ return &ast.TypeSwitchStmt{pos, s1, s2, body}
+}
+
+func (p *parser) parseCommClause() *ast.CommClause {
+ if p.trace {
+ defer un(trace(p, "CommClause"))
+ }
+
+ p.openScope()
+ pos := p.pos
+ var comm ast.Stmt
+ if p.tok == token.CASE {
+ p.next()
+ lhs := p.parseLhsList()
+ if p.tok == token.ARROW {
+ // SendStmt
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ arrow := p.pos
+ p.next()
+ rhs := p.parseRhs()
+ comm = &ast.SendStmt{lhs[0], arrow, rhs}
+ } else {
+ // RecvStmt
+ pos := p.pos
+ tok := p.tok
+ var rhs ast.Expr
+ if tok == token.ASSIGN || tok == token.DEFINE {
+ // RecvStmt with assignment
+ if len(lhs) > 2 {
+ p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+ // continue with first two expressions
+ lhs = lhs[0:2]
+ }
+ p.next()
+ rhs = p.parseRhs()
+ } else {
+ // rhs must be single receive operation
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ rhs = lhs[0]
+ lhs = nil // there is no lhs
+ }
+ if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
+ p.errorExpected(rhs.Pos(), "send or receive operation")
+ rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
+ }
+ if lhs != nil {
+ comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
+ } else {
+ comm = &ast.ExprStmt{rhs}
+ }
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CommClause{pos, comm, colon, body}
+}
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+ if p.trace {
+ defer un(trace(p, "SelectStmt"))
+ }
+
+ pos := p.expect(token.SELECT)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCommClause())
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ return &ast.SelectStmt{pos, body}
+}
+
+func (p *parser) parseForStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "ForStmt"))
+ }
+
+ pos := p.expect(token.FOR)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2, s3 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ p.expectSemi()
+ if p.tok != token.LBRACE {
+ s3 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ p.expectSemi()
+
+ if as, isAssign := s2.(*ast.AssignStmt); isAssign {
+ // possibly a for statement with a range clause; check assignment operator
+ if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
+ p.errorExpected(as.TokPos, "'=' or ':='")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check lhs
+ var key, value ast.Expr
+ switch len(as.Lhs) {
+ case 2:
+ key, value = as.Lhs[0], as.Lhs[1]
+ case 1:
+ key = as.Lhs[0]
+ default:
+ p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check rhs
+ if len(as.Rhs) != 1 {
+ p.errorExpected(as.Rhs[0].Pos(), "1 expression")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
+ // rhs is range expression
+ // (any short variable declaration was handled by parseSimpleStat above)
+ return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
+ }
+ p.errorExpected(s2.Pos(), "range clause")
+ return &ast.BadStmt{pos, body.End()}
+ }
+
+ // regular for statement
+ return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+}
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ s = &ast.DeclStmt{p.parseDecl()}
+ case
+ // tokens that may start a top-level expression
+ token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
+ token.LBRACK, token.STRUCT, // composite type
+ token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
+ s = p.parseSimpleStmt(true)
+ // because of the required look-ahead, labeled statements are
+ // parsed by parseSimpleStmt - don't expect a semicolon after
+ // them
+ if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+ p.expectSemi()
+ }
+ case token.GO:
+ s = p.parseGoStmt()
+ case token.DEFER:
+ s = p.parseDeferStmt()
+ case token.RETURN:
+ s = p.parseReturnStmt()
+ case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+ s = p.parseBranchStmt(p.tok)
+ case token.LBRACE:
+ s = p.parseBlockStmt()
+ p.expectSemi()
+ case token.IF:
+ s = p.parseIfStmt()
+ case token.SWITCH:
+ s = p.parseSwitchStmt()
+ case token.SELECT:
+ s = p.parseSelectStmt()
+ case token.FOR:
+ s = p.parseForStmt()
+ case token.SEMICOLON:
+ s = &ast.EmptyStmt{p.pos}
+ p.next()
+ case token.RBRACE:
+ // a semicolon may be omitted before a closing "}"
+ s = &ast.EmptyStmt{p.pos}
+ default:
+ // no statement found
+ pos := p.pos
+ p.errorExpected(pos, "statement")
+ p.next() // make progress
+ s = &ast.BadStmt{pos, p.pos}
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+
+func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ImportSpec"))
+ }
+
+ var ident *ast.Ident
+ switch p.tok {
+ case token.PERIOD:
+ ident = &ast.Ident{p.pos, ".", nil}
+ p.next()
+ case token.IDENT:
+ ident = p.parseIdent()
+ }
+
+ var path *ast.BasicLit
+ if p.tok == token.STRING {
+ path = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ } else {
+ p.expect(token.STRING) // use expect() error handling
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // collect imports
+ spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ p.imports = append(p.imports, spec)
+
+ return spec
+}
+
+func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ConstSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ != nil || p.tok == token.ASSIGN || iota == 0 {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Con, idents...)
+
+ return spec
+}
+
+func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "TypeSpec"))
+ }
+
+ ident := p.parseIdent()
+
+ // Go spec: The scope of a type identifier declared inside a function begins
+ // at the identifier in the TypeSpec and ends at the end of the innermost
+ // containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.TypeSpec{doc, ident, nil, nil}
+ p.declare(spec, p.topScope, ast.Typ, ident)
+
+ spec.Type = p.parseType()
+ p.expectSemi() // call before accessing p.linecomment
+ spec.Comment = p.lineComment
+
+ return spec
+}
+
+func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "VarSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ == nil || p.tok == token.ASSIGN {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Var, idents...)
+
+ return spec
+}
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+ if p.trace {
+ defer un(trace(p, "GenDecl("+keyword.String()+")"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(keyword)
+ var lparen, rparen token.Pos
+ var list []ast.Spec
+ if p.tok == token.LPAREN {
+ lparen = p.pos
+ p.next()
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, f(p, p.leadComment, iota))
+ }
+ rparen = p.expect(token.RPAREN)
+ p.expectSemi()
+ } else {
+ list = append(list, f(p, nil, 0))
+ }
+
+ return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
+}
+
+func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Receiver"))
+ }
+
+ pos := p.pos
+ par := p.parseParameters(scope, false)
+
+ // must have exactly one receiver
+ if par.NumFields() != 1 {
+ p.errorExpected(pos, "exactly one receiver")
+ // TODO determine a better range for BadExpr below
+ par.List = []*ast.Field{{Type: &ast.BadExpr{pos, pos}}}
+ return par
+ }
+
+ // recv type must be of the form ["*"] identifier
+ recv := par.List[0]
+ base := deref(recv.Type)
+ if _, isIdent := base.(*ast.Ident); !isIdent {
+ p.errorExpected(base.Pos(), "(unqualified) identifier")
+ par.List = []*ast.Field{{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+ }
+
+ return par
+}
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+ if p.trace {
+ defer un(trace(p, "FunctionDecl"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+
+ var recv *ast.FieldList
+ if p.tok == token.LPAREN {
+ recv = p.parseReceiver(scope)
+ }
+
+ ident := p.parseIdent()
+
+ params, results := p.parseSignature(scope)
+
+ var body *ast.BlockStmt
+ if p.tok == token.LBRACE {
+ body = p.parseBody(scope)
+ }
+ p.expectSemi()
+
+ decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ if recv == nil {
+ // Go spec: The scope of an identifier denoting a constant, type,
+ // variable, or function (but not method) declared at top level
+ // (outside any function) is the package block.
+ //
+ // init() functions cannot be referred to and there may
+ // be more than one - don't put them in the pkgScope
+ if ident.Name != "init" {
+ p.declare(decl, p.pkgScope, ast.Fun, ident)
+ }
+ }
+
+ return decl
+}
+
+func (p *parser) parseDecl() ast.Decl {
+ if p.trace {
+ defer un(trace(p, "Declaration"))
+ }
+
+ var f parseSpecFunction
+ switch p.tok {
+ case token.CONST:
+ f = parseConstSpec
+
+ case token.TYPE:
+ f = parseTypeSpec
+
+ case token.VAR:
+ f = parseVarSpec
+
+ case token.FUNC:
+ return p.parseFuncDecl()
+
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "declaration")
+ p.next() // make progress
+ decl := &ast.BadDecl{pos, p.pos}
+ return decl
+ }
+
+ return p.parseGenDecl(p.tok, f)
+}
+
+func (p *parser) parseDeclList() (list []ast.Decl) {
+ if p.trace {
+ defer un(trace(p, "DeclList"))
+ }
+
+ for p.tok != token.EOF {
+ list = append(list, p.parseDecl())
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+ if p.trace {
+ defer un(trace(p, "File"))
+ }
+
+ // package clause
+ doc := p.leadComment
+ pos := p.expect(token.PACKAGE)
+ // Go spec: The package clause is not a declaration;
+ // the package name does not appear in any scope.
+ ident := p.parseIdent()
+ if ident.Name == "_" {
+ p.error(p.pos, "invalid package name _")
+ }
+ p.expectSemi()
+
+ var decls []ast.Decl
+
+ // Don't bother parsing the rest if we had errors already.
+ // Likely not a Go source file at all.
+
+ if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
+ // import decls
+ for p.tok == token.IMPORT {
+ decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
+ }
+
+ if p.mode&ImportsOnly == 0 {
+ // rest of package body
+ for p.tok != token.EOF {
+ decls = append(decls, p.parseDecl())
+ }
+ }
+ }
+
+ assert(p.topScope == p.pkgScope, "imbalanced scopes")
+
+ // resolve global identifiers within the same file
+ i := 0
+ for _, ident := range p.unresolved {
+ // i <= index for current ident
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+ if ident.Obj == nil {
+ p.unresolved[i] = ident
+ i++
+ }
+ }
+
+ // TODO(gri): store p.imports in AST
+ return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
+}
diff --git a/src/go/printer/testdata/slow.golden b/src/go/printer/testdata/slow.golden
new file mode 100644
index 000000000..43a15cb1d
--- /dev/null
+++ b/src/go/printer/testdata/slow.golden
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() *Foo {
+ return &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 42: &Foo{},
+ 21: &Bar{},
+ 11: &Baz{whatever: "it's just a test"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 43: &Foo{},
+ 22: &Bar{},
+ 13: &Baz{whatever: "this is nuts"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 61: &Foo{},
+ 71: &Bar{},
+ 11: &Baz{whatever: "no, it's Go"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 0: &Foo{},
+ -2: &Bar{},
+ -11: &Baz{whatever: "we need to go deeper"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -2: &Foo{},
+ -5: &Bar{},
+ -7: &Baz{whatever: "are you serious?"}}}},
+ bang: &Bar{foo: []*Foo{}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -100: &Foo{},
+ 50: &Bar{},
+ 20: &Baz{whatever: "na, not really ..."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 2: &Foo{},
+ 1: &Bar{},
+ -1: &Baz{whatever: "... it's just a test."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/go/printer/testdata/slow.input b/src/go/printer/testdata/slow.input
new file mode 100644
index 000000000..0e5a23d88
--- /dev/null
+++ b/src/go/printer/testdata/slow.input
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() (*Foo) {
+return &Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+42: &Foo{},
+21: &Bar{},
+11: &Baz{ whatever: "it's just a test" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+43: &Foo{},
+22: &Bar{},
+13: &Baz{ whatever: "this is nuts" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+61: &Foo{},
+71: &Bar{},
+11: &Baz{ whatever: "no, it's Go" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+0: &Foo{},
+-2: &Bar{},
+-11: &Baz{ whatever: "we need to go deeper" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-2: &Foo{},
+-5: &Bar{},
+-7: &Baz{ whatever: "are you serious?" }}}},
+ bang: &Bar{foo: []*Foo{}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-100: &Foo{},
+50: &Bar{},
+20: &Baz{ whatever: "na, not really ..." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+2: &Foo{},
+1: &Bar{},
+-1: &Baz{ whatever: "... it's just a test." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/go/printer/testdata/statements.golden b/src/go/printer/testdata/statements.golden
new file mode 100644
index 000000000..324b6cdd0
--- /dev/null
+++ b/src/go/printer/testdata/statements.golden
@@ -0,0 +1,644 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{}) {}
+
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T{
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+ if true {
+ }
+ if true {
+ } // no semicolon printed
+ if expr {
+ }
+ if expr {
+ } // no semicolon printed
+ if expr {
+ } // no parens printed
+ if expr {
+ } // no semicolon and parens printed
+ if x := expr; true {
+ use(x)
+ }
+ if x := expr; expr {
+ use(x)
+ }
+}
+
+// Formatting of switch-statement headers.
+func _() {
+ switch {
+ }
+ switch {
+ } // no semicolon printed
+ switch expr {
+ }
+ switch expr {
+ } // no semicolon printed
+ switch expr {
+ } // no parens printed
+ switch expr {
+ } // no semicolon and parens printed
+ switch x := expr; {
+ default:
+ use(
+ x)
+ }
+ switch x := expr; expr {
+ default:
+ use(x)
+ }
+}
+
+// Formatting of switch statement bodies.
+func _() {
+ switch {
+ }
+
+ switch x := 0; x {
+ case 1:
+ use(x)
+ use(x) // followed by an empty line
+
+ case 2: // followed by an empty line
+
+ use(x) // followed by an empty line
+
+ case 3: // no empty lines
+ use(x)
+ use(x)
+ }
+
+ switch x {
+ case 0:
+ use(x)
+ case 1: // this comment should have no effect on the previous or next line
+ use(x)
+ }
+
+ switch x := 0; x {
+ case 1:
+ x = 0
+ // this comment should be indented
+ case 2:
+ x = 0
+ // this comment should not be indented, it is aligned with the next case
+ case 3:
+ x = 0
+ /* indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 4:
+ x = 0
+ /* not indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 5:
+ }
+}
+
+// Formatting of selected select statements.
+func _() {
+ select {}
+ select { /* this comment should not be tab-aligned because the closing } is on the same line */
+ }
+ select { /* this comment should be tab-aligned */
+ }
+ select { // this comment should be tab-aligned
+ }
+ select {
+ case <-c:
+ }
+}
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+ for {
+ }
+ for expr {
+ }
+ for expr {
+ } // no parens printed
+ for {
+ } // no semicolons printed
+ for x := expr; ; {
+ use(x)
+ }
+ for expr {
+ } // no semicolons printed
+ for expr {
+ } // no semicolons and parens printed
+ for ; ; expr = false {
+ }
+ for x := expr; expr; {
+ use(x)
+ }
+ for x := expr; ; expr = false {
+ use(x)
+ }
+ for ; expr; expr = false {
+ }
+ for x := expr; expr; expr = false {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ } // no parens printed
+}
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for {
+ }
+ for expr {
+ }
+ for expr {
+ } // no parens printed
+ for {
+ } // no semicolons printed
+ for x := expr; ; {
+ use(x)
+ }
+ for expr {
+ } // no semicolons printed
+ for expr {
+ } // no semicolons and parens printed
+ for ; ; expr = false {
+ }
+ for x := expr; expr; {
+ use(x)
+ }
+ for x := expr; ; expr = false {
+ use(x)
+ }
+ for ; expr; expr = false {
+ }
+ for x := expr; expr; expr = false {
+ use(x)
+ }
+ for range []int{} {
+ println("foo")
+ }
+ for x := range []int{} {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ } // no parens printed
+}
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {
+ }
+ if cond {
+ } // multiple lines
+ if cond {
+ } else {
+ } // else clause always requires multiple lines
+
+ for {
+ }
+ for i := 0; i < len(a); 1++ {
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ } // multiple lines
+
+ for range a {
+ }
+ for _ = range a {
+ }
+ for _, _ = range a {
+ }
+ for i := range a {
+ }
+ for i := range a {
+ a[i] = i
+ }
+ for i := range a {
+ a[i] = i
+ } // multiple lines
+
+ go func() {
+ for {
+ a <- <-b
+ }
+ }()
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Sprintf("error: %s", x.msg)
+ }
+ }()
+}
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+ // strip parentheses - no composite literals or composite literals don't start with a type name
+ if x {
+ }
+ if x {
+ }
+ if []T{} {
+ }
+ if []T{} {
+ }
+ if []T{} {
+ }
+
+ for x {
+ }
+ for x {
+ }
+ for []T{} {
+ }
+ for []T{} {
+ }
+ for []T{} {
+ }
+
+ switch x {
+ }
+ switch x {
+ }
+ switch []T{} {
+ }
+ switch []T{} {
+ }
+
+ for _ = range []T{T{42}} {
+ }
+
+ // leave parentheses - composite literals start with a type name
+ if (T{}) {
+ }
+ if (T{}) {
+ }
+ if (T{}) {
+ }
+
+ for (T{}) {
+ }
+ for (T{}) {
+ }
+ for (T{}) {
+ }
+
+ switch (T{}) {
+ }
+ switch (T{}) {
+ }
+
+ for _ = range (T1{T{42}}) {
+ }
+
+ if x == (T{42}[0]) {
+ }
+ if (x == T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if (x == a+b*T{42}[0]) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if x == a+(b * (T{42}[0])) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if (a + b*(T{42}[0])) == x {
+ }
+ if (a + b*(T{42}[0])) == x {
+ }
+
+ if struct{ x bool }{false}.x {
+ }
+ if (struct{ x bool }{false}.x) == false {
+ }
+ if struct{ x bool }{false}.x == false {
+ }
+}
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+ const _ = 0
+
+ const _ = 1
+ type _ int
+ type _ float
+
+ var _ = 0
+ var x = 1
+
+ // Each use(x) call below should have at most one empty line before and after.
+ // Known bug: The first use call may have more than one empty line before
+ // (see go/printer/nodes.go, func linebreak).
+
+ use(x)
+
+ if x < x {
+
+ use(x)
+
+ } else {
+
+ use(x)
+
+ }
+}
+
+// Formatting around labels.
+func _() {
+L:
+}
+
+func _() {
+ // this comment should be indented
+L: // no semicolon needed
+}
+
+func _() {
+ switch 0 {
+ case 0:
+ L0:
+ ; // semicolon required
+ case 1:
+ L1:
+ ; // semicolon required
+ default:
+ L2: // no semicolon needed
+ }
+}
+
+func _() {
+ f()
+L1:
+ f()
+L2:
+ ;
+L3:
+}
+
+func _() {
+ // this comment should be indented
+L:
+}
+
+func _() {
+L:
+ _ = 0
+}
+
+func _() {
+ // this comment should be indented
+L:
+ _ = 0
+}
+
+func _() {
+ for {
+ L1:
+ _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+func _() {
+ // this comment should be indented
+ for {
+ L1:
+ _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+func _() {
+ if true {
+ _ = 0
+ }
+ _ = 0 // the indentation here should not be affected by the long label name
+AnOverlongLabel:
+ _ = 0
+
+ if true {
+ _ = 0
+ }
+ _ = 0
+
+L:
+ _ = 0
+}
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto L
+ }
+L: // A comment on the same line as the label, followed by a single empty line.
+ // Known bug: There may be more than one empty line before MoreCode()
+ // (see go/printer/nodes.go, func linebreak).
+
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto AVeryLongLabelThatShouldNotAffectFormatting
+ }
+AVeryLongLabelThatShouldNotAffectFormatting:
+ // There should be a single empty line after this comment.
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+// Formatting of empty statements.
+func _() {
+
+}
+
+func _() {
+}
+
+func _() {
+}
+
+func _() {
+ f()
+}
+
+func _() {
+L:
+ ;
+}
+
+func _() {
+L:
+ ;
+ f()
+}
diff --git a/src/go/printer/testdata/statements.input b/src/go/printer/testdata/statements.input
new file mode 100644
index 000000000..cade1576b
--- /dev/null
+++ b/src/go/printer/testdata/statements.input
@@ -0,0 +1,555 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{}) {}
+
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T {
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+ if true {}
+ if; true {} // no semicolon printed
+ if expr{}
+ if;expr{} // no semicolon printed
+ if (expr){} // no parens printed
+ if;((expr)){} // no semicolon and parens printed
+ if x:=expr;true{
+ use(x)}
+ if x:=expr; expr {use(x)}
+}
+
+
+// Formatting of switch-statement headers.
+func _() {
+ switch {}
+ switch;{} // no semicolon printed
+ switch expr {}
+ switch;expr{} // no semicolon printed
+ switch (expr) {} // no parens printed
+ switch;((expr)){} // no semicolon and parens printed
+ switch x := expr; { default:use(
+x)
+ }
+ switch x := expr; expr {default:use(x)}
+}
+
+
+// Formatting of switch statement bodies.
+func _() {
+ switch {
+ }
+
+ switch x := 0; x {
+ case 1:
+ use(x)
+ use(x) // followed by an empty line
+
+ case 2: // followed by an empty line
+
+ use(x) // followed by an empty line
+
+ case 3: // no empty lines
+ use(x)
+ use(x)
+ }
+
+ switch x {
+ case 0:
+ use(x)
+ case 1: // this comment should have no effect on the previous or next line
+ use(x)
+ }
+
+ switch x := 0; x {
+ case 1:
+ x = 0
+ // this comment should be indented
+ case 2:
+ x = 0
+ // this comment should not be indented, it is aligned with the next case
+ case 3:
+ x = 0
+ /* indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 4:
+ x = 0
+ /* not indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 5:
+ }
+}
+
+
+// Formatting of selected select statements.
+func _() {
+ select {
+ }
+ select { /* this comment should not be tab-aligned because the closing } is on the same line */ }
+ select { /* this comment should be tab-aligned */
+ }
+ select { // this comment should be tab-aligned
+ }
+ select { case <-c: }
+}
+
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+ for{}
+ for expr {}
+ for (expr) {} // no parens printed
+ for;;{} // no semicolons printed
+ for x :=expr;; {use( x)}
+ for; expr;{} // no semicolons printed
+ for; ((expr));{} // no semicolons and parens printed
+ for; ; expr = false {}
+ for x :=expr; expr; {use(x)}
+ for x := expr;; expr=false {use(x)}
+ for;expr;expr =false {}
+ for x := expr;expr;expr = false { use(x) }
+ for x := range []int{} { use(x) }
+ for x := range (([]int{})) { use(x) } // no parens printed
+}
+
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for{
+ }
+ for expr {
+ }
+ for (expr) {
+ } // no parens printed
+ for;;{
+ } // no semicolons printed
+ for x :=expr;; {use( x)
+ }
+ for; expr;{
+ } // no semicolons printed
+ for; ((expr));{
+ } // no semicolons and parens printed
+ for; ; expr = false {
+ }
+ for x :=expr; expr; {use(x)
+ }
+ for x := expr;; expr=false {use(x)
+ }
+ for;expr;expr =false {
+ }
+ for x := expr;expr;expr = false {
+ use(x)
+ }
+ for range []int{} {
+ println("foo")}
+ for x := range []int{} {
+ use(x) }
+ for x := range (([]int{})) {
+ use(x) } // no parens printed
+}
+
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {}
+ if cond {
+ } // multiple lines
+ if cond {} else {} // else clause always requires multiple lines
+
+ for {}
+ for i := 0; i < len(a); 1++ {}
+ for i := 0; i < len(a); 1++ { a[i] = i }
+ for i := 0; i < len(a); 1++ { a[i] = i
+ } // multiple lines
+
+ for range a{}
+ for _ = range a{}
+ for _, _ = range a{}
+ for i := range a {}
+ for i := range a { a[i] = i }
+ for i := range a { a[i] = i
+ } // multiple lines
+
+ go func() { for { a <- <-b } }()
+ defer func() { if x := recover(); x != nil { err = fmt.Sprintf("error: %s", x.msg) } }()
+}
+
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+ // strip parentheses - no composite literals or composite literals don't start with a type name
+ if (x) {}
+ if (((x))) {}
+ if ([]T{}) {}
+ if (([]T{})) {}
+ if ; (((([]T{})))) {}
+
+ for (x) {}
+ for (((x))) {}
+ for ([]T{}) {}
+ for (([]T{})) {}
+ for ; (((([]T{})))) ; {}
+
+ switch (x) {}
+ switch (((x))) {}
+ switch ([]T{}) {}
+ switch ; (((([]T{})))) {}
+
+ for _ = range ((([]T{T{42}}))) {}
+
+ // leave parentheses - composite literals start with a type name
+ if (T{}) {}
+ if ((T{})) {}
+ if ; ((((T{})))) {}
+
+ for (T{}) {}
+ for ((T{})) {}
+ for ; ((((T{})))) ; {}
+
+ switch (T{}) {}
+ switch ; ((((T{})))) {}
+
+ for _ = range (((T1{T{42}}))) {}
+
+ if x == (T{42}[0]) {}
+ if (x == T{42}[0]) {}
+ if (x == (T{42}[0])) {}
+ if (x == (((T{42}[0])))) {}
+ if (((x == (T{42}[0])))) {}
+ if x == a + b*(T{42}[0]) {}
+ if (x == a + b*T{42}[0]) {}
+ if (x == a + b*(T{42}[0])) {}
+ if (x == a + ((b * (T{42}[0])))) {}
+ if (((x == a + b * (T{42}[0])))) {}
+ if (((a + b * (T{42}[0])) == x)) {}
+ if (((a + b * (T{42}[0])))) == x {}
+
+ if (struct{x bool}{false}.x) {}
+ if (struct{x bool}{false}.x) == false {}
+ if (struct{x bool}{false}.x == false) {}
+}
+
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+ const _ = 0
+
+ const _ = 1
+ type _ int
+ type _ float
+
+ var _ = 0
+ var x = 1
+
+ // Each use(x) call below should have at most one empty line before and after.
+ // Known bug: The first use call may have more than one empty line before
+ // (see go/printer/nodes.go, func linebreak).
+
+
+
+ use(x)
+
+ if x < x {
+
+ use(x)
+
+ } else {
+
+ use(x)
+
+ }
+}
+
+
+// Formatting around labels.
+func _() {
+ L:
+}
+
+
+func _() {
+ // this comment should be indented
+ L: ; // no semicolon needed
+}
+
+
+func _() {
+ switch 0 {
+ case 0:
+ L0: ; // semicolon required
+ case 1:
+ L1: ; // semicolon required
+ default:
+ L2: ; // no semicolon needed
+ }
+}
+
+
+func _() {
+ f()
+L1:
+ f()
+L2:
+ ;
+L3:
+}
+
+
+func _() {
+ // this comment should be indented
+ L:
+}
+
+
+func _() {
+ L: _ = 0
+}
+
+
+func _() {
+ // this comment should be indented
+ L: _ = 0
+}
+
+
+func _() {
+ for {
+ L1: _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+
+func _() {
+ // this comment should be indented
+ for {
+ L1: _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+
+func _() {
+ if true {
+ _ = 0
+ }
+ _ = 0 // the indentation here should not be affected by the long label name
+AnOverlongLabel:
+ _ = 0
+
+ if true {
+ _ = 0
+ }
+ _ = 0
+
+L: _ = 0
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L: // A comment on the same line as the label, followed by a single empty line.
+ // Known bug: There may be more than one empty line before MoreCode()
+ // (see go/printer/nodes.go, func linebreak).
+
+
+
+
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+
+
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto AVeryLongLabelThatShouldNotAffectFormatting
+ }
+AVeryLongLabelThatShouldNotAffectFormatting:
+ // There should be a single empty line after this comment.
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+
+// Formatting of empty statements.
+func _() {
+ ;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;}
+
+func _() {
+f();;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+ f()
+}
diff --git a/src/go/scanner/errors.go b/src/go/scanner/errors.go
new file mode 100644
index 000000000..22de69c3c
--- /dev/null
+++ b/src/go/scanner/errors.go
@@ -0,0 +1,126 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "fmt"
+ "go/token"
+ "io"
+ "sort"
+)
+
+// In an ErrorList, an error is represented by an *Error.
+// The position Pos, if valid, points to the beginning of
+// the offending token, and the error condition is described
+// by Msg.
+//
+type Error struct {
+ Pos token.Position
+ Msg string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+ if e.Pos.Filename != "" || e.Pos.IsValid() {
+ // don't print "<unknown position>"
+ // TODO(gri) reconsider the semantics of Position.IsValid
+ return e.Pos.String() + ": " + e.Msg
+ }
+ return e.Msg
+}
+
+// ErrorList is a list of *Errors.
+// The zero value for an ErrorList is an empty ErrorList ready to use.
+//
+type ErrorList []*Error
+
+// Add adds an Error with given position and error message to an ErrorList.
+func (p *ErrorList) Add(pos token.Position, msg string) {
+ *p = append(*p, &Error{pos, msg})
+}
+
+// Reset resets an ErrorList to no errors.
+func (p *ErrorList) Reset() { *p = (*p)[0:0] }
+
+// ErrorList implements the sort Interface.
+func (p ErrorList) Len() int { return len(p) }
+func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p ErrorList) Less(i, j int) bool {
+ e := &p[i].Pos
+ f := &p[j].Pos
+ // Note that it is not sufficient to simply compare file offsets because
+ // the offsets do not reflect modified line information (through //line
+ // comments).
+ if e.Filename < f.Filename {
+ return true
+ }
+ if e.Filename == f.Filename {
+ if e.Line < f.Line {
+ return true
+ }
+ if e.Line == f.Line {
+ return e.Column < f.Column
+ }
+ }
+ return false
+}
+
+// Sort sorts an ErrorList. *Error entries are sorted by position,
+// other errors are sorted by error message, and before any *Error
+// entry.
+//
+func (p ErrorList) Sort() {
+ sort.Sort(p)
+}
+
+// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
+func (p *ErrorList) RemoveMultiples() {
+ sort.Sort(p)
+ var last token.Position // initial last.Line is != any legal error line
+ i := 0
+ for _, e := range *p {
+ if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
+ last = e.Pos
+ (*p)[i] = e
+ i++
+ }
+ }
+ (*p) = (*p)[0:i]
+}
+
+// An ErrorList implements the error interface.
+func (p ErrorList) Error() string {
+ switch len(p) {
+ case 0:
+ return "no errors"
+ case 1:
+ return p[0].Error()
+ }
+ return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
+}
+
+// Err returns an error equivalent to this error list.
+// If the list is empty, Err returns nil.
+func (p ErrorList) Err() error {
+ if len(p) == 0 {
+ return nil
+ }
+ return p
+}
+
+// PrintError is a utility function that prints a list of errors to w,
+// one error per line, if the err parameter is an ErrorList. Otherwise
+// it prints the err string.
+//
+func PrintError(w io.Writer, err error) {
+ if list, ok := err.(ErrorList); ok {
+ for _, e := range list {
+ fmt.Fprintf(w, "%s\n", e)
+ }
+ } else if err != nil {
+ fmt.Fprintf(w, "%s\n", err)
+ }
+}
diff --git a/src/go/scanner/example_test.go b/src/go/scanner/example_test.go
new file mode 100644
index 000000000..9004a4ad3
--- /dev/null
+++ b/src/go/scanner/example_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner_test
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+)
+
+func ExampleScanner_Scan() {
+ // src is the input that we want to tokenize.
+ src := []byte("cos(x) + 1i*sin(x) // Euler")
+
+ // Initialize the scanner.
+ var s scanner.Scanner
+ fset := token.NewFileSet() // positions are relative to fset
+ file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
+ s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
+
+ // Repeated calls to Scan yield the token sequence found in the input.
+ for {
+ pos, tok, lit := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ fmt.Printf("%s\t%s\t%q\n", fset.Position(pos), tok, lit)
+ }
+
+ // output:
+ // 1:1 IDENT "cos"
+ // 1:4 ( ""
+ // 1:5 IDENT "x"
+ // 1:6 ) ""
+ // 1:8 + ""
+ // 1:10 IMAG "1i"
+ // 1:12 * ""
+ // 1:13 IDENT "sin"
+ // 1:16 ( ""
+ // 1:17 IDENT "x"
+ // 1:18 ) ""
+ // 1:20 ; "\n"
+ // 1:20 COMMENT "// Euler"
+}
diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go
new file mode 100644
index 000000000..cec82ea10
--- /dev/null
+++ b/src/go/scanner/scanner.go
@@ -0,0 +1,760 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scanner implements a scanner for Go source text.
+// It takes a []byte as source which can then be tokenized
+// through repeated calls to the Scan method.
+//
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "go/token"
+ "path/filepath"
+ "strconv"
+ "unicode"
+ "unicode/utf8"
+)
+
+// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
+// encountered and a handler was installed, the handler is called with a
+// position and an error message. The position points to the beginning of
+// the offending token.
+//
+type ErrorHandler func(pos token.Position, msg string)
+
+// A Scanner holds the scanner's internal state while processing
+// a given text. It can be allocated as part of another data
+// structure but must be initialized via Init before use.
+//
+type Scanner struct {
+ // immutable state
+ file *token.File // source file handle
+ dir string // directory portion of file.Name()
+ src []byte // source
+ err ErrorHandler // error reporting; or nil
+ mode Mode // scanning mode
+
+ // scanning state
+ ch rune // current character
+ offset int // character offset
+ rdOffset int // reading offset (position after current character)
+ lineOffset int // current line offset
+ insertSemi bool // insert a semicolon before next newline
+
+ // public state - ok to modify
+ ErrorCount int // number of errors encountered
+}
+
+const bom = 0xFEFF // byte order mark, only permitted as very first character
+
+// Read the next Unicode char into s.ch.
+// s.ch < 0 means end-of-file.
+//
+func (s *Scanner) next() {
+ if s.rdOffset < len(s.src) {
+ s.offset = s.rdOffset
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ r, w := rune(s.src[s.rdOffset]), 1
+ switch {
+ case r == 0:
+ s.error(s.offset, "illegal character NUL")
+ case r >= 0x80:
+ // not ASCII
+ r, w = utf8.DecodeRune(s.src[s.rdOffset:])
+ if r == utf8.RuneError && w == 1 {
+ s.error(s.offset, "illegal UTF-8 encoding")
+ } else if r == bom && s.offset > 0 {
+ s.error(s.offset, "illegal byte order mark")
+ }
+ }
+ s.rdOffset += w
+ s.ch = r
+ } else {
+ s.offset = len(s.src)
+ if s.ch == '\n' {
+ s.lineOffset = s.offset
+ s.file.AddLine(s.offset)
+ }
+ s.ch = -1 // eof
+ }
+}
+
+// A mode value is a set of flags (or 0).
+// They control scanner behavior.
+//
+type Mode uint
+
+const (
+ ScanComments Mode = 1 << iota // return comments as COMMENT tokens
+ dontInsertSemis // do not automatically insert semicolons - for testing only
+)
+
+// Init prepares the scanner s to tokenize the text src by setting the
+// scanner at the beginning of src. The scanner uses the file set file
+// for position information and it adds line information for each line.
+// It is ok to re-use the same file when re-scanning the same file as
+// line information which is already present is ignored. Init causes a
+// panic if the file size does not match the src size.
+//
+// Calls to Scan will invoke the error handler err if they encounter a
+// syntax error and err is not nil. Also, for each error encountered,
+// the Scanner field ErrorCount is incremented by one. The mode parameter
+// determines how comments are handled.
+//
+// Note that Init may call err if there is an error in the first character
+// of the file.
+//
+func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
+ // Explicitly initialize all fields since a scanner may be reused.
+ if file.Size() != len(src) {
+ panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
+ }
+ s.file = file
+ s.dir, _ = filepath.Split(file.Name())
+ s.src = src
+ s.err = err
+ s.mode = mode
+
+ s.ch = ' '
+ s.offset = 0
+ s.rdOffset = 0
+ s.lineOffset = 0
+ s.insertSemi = false
+ s.ErrorCount = 0
+
+ s.next()
+ if s.ch == bom {
+ s.next() // ignore BOM at file beginning
+ }
+}
+
+func (s *Scanner) error(offs int, msg string) {
+ if s.err != nil {
+ s.err(s.file.Position(s.file.Pos(offs)), msg)
+ }
+ s.ErrorCount++
+}
+
+var prefix = []byte("//line ")
+
+func (s *Scanner) interpretLineComment(text []byte) {
+ if bytes.HasPrefix(text, prefix) {
+ // get filename and line number, if any
+ if i := bytes.LastIndex(text, []byte{':'}); i > 0 {
+ if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 {
+ // valid //line filename:line comment
+ filename := string(bytes.TrimSpace(text[len(prefix):i]))
+ if filename != "" {
+ filename = filepath.Clean(filename)
+ if !filepath.IsAbs(filename) {
+ // make filename relative to current directory
+ filename = filepath.Join(s.dir, filename)
+ }
+ }
+ // update scanner position
+ s.file.AddLineInfo(s.lineOffset+len(text)+1, filename, line) // +len(text)+1 since comment applies to next line
+ }
+ }
+ }
+}
+
+func (s *Scanner) scanComment() string {
+ // initial '/' already consumed; s.ch == '/' || s.ch == '*'
+ offs := s.offset - 1 // position of initial '/'
+ hasCR := false
+
+ if s.ch == '/' {
+ //-style comment
+ s.next()
+ for s.ch != '\n' && s.ch >= 0 {
+ if s.ch == '\r' {
+ hasCR = true
+ }
+ s.next()
+ }
+ if offs == s.lineOffset {
+ // comment starts at the beginning of the current line
+ s.interpretLineComment(s.src[offs:s.offset])
+ }
+ goto exit
+ }
+
+ /*-style comment */
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ if ch == '\r' {
+ hasCR = true
+ }
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ goto exit
+ }
+ }
+
+ s.error(offs, "comment not terminated")
+
+exit:
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
+}
+
+func (s *Scanner) findLineEnd() bool {
+ // initial '/' already consumed
+
+ defer func(offs int) {
+ // reset scanner state to where it was upon calling findLineEnd
+ s.ch = '/'
+ s.offset = offs
+ s.rdOffset = offs + 1
+ s.next() // consume initial '/' again
+ }(s.offset - 1)
+
+ // read ahead until a newline, EOF, or non-comment token is found
+ for s.ch == '/' || s.ch == '*' {
+ if s.ch == '/' {
+ //-style comment always contains a newline
+ return true
+ }
+ /*-style comment: look for newline */
+ s.next()
+ for s.ch >= 0 {
+ ch := s.ch
+ if ch == '\n' {
+ return true
+ }
+ s.next()
+ if ch == '*' && s.ch == '/' {
+ s.next()
+ break
+ }
+ }
+ s.skipWhitespace() // s.insertSemi is set
+ if s.ch < 0 || s.ch == '\n' {
+ return true
+ }
+ if s.ch != '/' {
+ // non-comment token
+ return false
+ }
+ s.next() // consume '/'
+ }
+
+ return false
+}
+
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+func (s *Scanner) scanIdentifier() string {
+ offs := s.offset
+ for isLetter(s.ch) || isDigit(s.ch) {
+ s.next()
+ }
+ return string(s.src[offs:s.offset])
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func (s *Scanner) scanMantissa(base int) {
+ for digitVal(s.ch) < base {
+ s.next()
+ }
+}
+
+func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) {
+ // digitVal(s.ch) < 10
+ offs := s.offset
+ tok := token.INT
+
+ if seenDecimalPoint {
+ offs--
+ tok = token.FLOAT
+ s.scanMantissa(10)
+ goto exponent
+ }
+
+ if s.ch == '0' {
+ // int or float
+ offs := s.offset
+ s.next()
+ if s.ch == 'x' || s.ch == 'X' {
+ // hexadecimal int
+ s.next()
+ s.scanMantissa(16)
+ if s.offset-offs <= 2 {
+ // only scanned "0x" or "0X"
+ s.error(offs, "illegal hexadecimal number")
+ }
+ } else {
+ // octal int or float
+ seenDecimalDigit := false
+ s.scanMantissa(8)
+ if s.ch == '8' || s.ch == '9' {
+ // illegal octal int or float
+ seenDecimalDigit = true
+ s.scanMantissa(10)
+ }
+ if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' {
+ goto fraction
+ }
+ // octal int
+ if seenDecimalDigit {
+ s.error(offs, "illegal octal number")
+ }
+ }
+ goto exit
+ }
+
+ // decimal int or float
+ s.scanMantissa(10)
+
+fraction:
+ if s.ch == '.' {
+ tok = token.FLOAT
+ s.next()
+ s.scanMantissa(10)
+ }
+
+exponent:
+ if s.ch == 'e' || s.ch == 'E' {
+ tok = token.FLOAT
+ s.next()
+ if s.ch == '-' || s.ch == '+' {
+ s.next()
+ }
+ s.scanMantissa(10)
+ }
+
+ if s.ch == 'i' {
+ tok = token.IMAG
+ s.next()
+ }
+
+exit:
+ return tok, string(s.src[offs:s.offset])
+}
+
+// scanEscape parses an escape sequence where rune is the accepted
+// escaped quote. In case of a syntax error, it stops at the offending
+// character (without consuming it) and returns false. Otherwise
+// it returns true.
+func (s *Scanner) scanEscape(quote rune) bool {
+ offs := s.offset
+
+ var n int
+ var base, max uint32
+ switch s.ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ s.next()
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.next()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.next()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.next()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ msg := "unknown escape sequence"
+ if s.ch < 0 {
+ msg = "escape sequence not terminated"
+ }
+ s.error(offs, msg)
+ return false
+ }
+
+ var x uint32
+ for n > 0 {
+ d := uint32(digitVal(s.ch))
+ if d >= base {
+ msg := fmt.Sprintf("illegal character %#U in escape sequence", s.ch)
+ if s.ch < 0 {
+ msg = "escape sequence not terminated"
+ }
+ s.error(s.offset, msg)
+ return false
+ }
+ x = x*base + d
+ s.next()
+ n--
+ }
+
+ if x > max || 0xD800 <= x && x < 0xE000 {
+ s.error(offs, "escape sequence is invalid Unicode code point")
+ return false
+ }
+
+ return true
+}
+
+func (s *Scanner) scanRune() string {
+ // '\'' opening already consumed
+ offs := s.offset - 1
+
+ valid := true
+ n := 0
+ for {
+ ch := s.ch
+ if ch == '\n' || ch < 0 {
+ // only report error if we don't have one already
+ if valid {
+ s.error(offs, "rune literal not terminated")
+ valid = false
+ }
+ break
+ }
+ s.next()
+ if ch == '\'' {
+ break
+ }
+ n++
+ if ch == '\\' {
+ if !s.scanEscape('\'') {
+ valid = false
+ }
+ // continue to read to closing quote
+ }
+ }
+
+ if valid && n != 1 {
+ s.error(offs, "illegal rune literal")
+ }
+
+ return string(s.src[offs:s.offset])
+}
+
+func (s *Scanner) scanString() string {
+ // '"' opening already consumed
+ offs := s.offset - 1
+
+ for {
+ ch := s.ch
+ if ch == '\n' || ch < 0 {
+ s.error(offs, "string literal not terminated")
+ break
+ }
+ s.next()
+ if ch == '"' {
+ break
+ }
+ if ch == '\\' {
+ s.scanEscape('"')
+ }
+ }
+
+ return string(s.src[offs:s.offset])
+}
+
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+func (s *Scanner) scanRawString() string {
+ // '`' opening already consumed
+ offs := s.offset - 1
+
+ hasCR := false
+ for {
+ ch := s.ch
+ if ch < 0 {
+ s.error(offs, "raw string literal not terminated")
+ break
+ }
+ s.next()
+ if ch == '`' {
+ break
+ }
+ if ch == '\r' {
+ hasCR = true
+ }
+ }
+
+ lit := s.src[offs:s.offset]
+ if hasCR {
+ lit = stripCR(lit)
+ }
+
+ return string(lit)
+}
+
+func (s *Scanner) skipWhitespace() {
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' {
+ s.next()
+ }
+}
+
+// Helper functions for scanning multi-byte tokens such as >> += >>= .
+// Different routines recognize different length tok_i based on matches
+// of ch_i. If a token ends in '=', the result is tok1 or tok3
+// respectively. Otherwise, the result is tok0 if there was no other
+// matching character, or tok2 if the matching character was ch2.
+
+func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ return tok0
+}
+
+func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ if s.ch == ch2 {
+ s.next()
+ return tok2
+ }
+ return tok0
+}
+
+func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token {
+ if s.ch == '=' {
+ s.next()
+ return tok1
+ }
+ if s.ch == ch2 {
+ s.next()
+ if s.ch == '=' {
+ s.next()
+ return tok3
+ }
+ return tok2
+ }
+ return tok0
+}
+
+// Scan scans the next token and returns the token position, the token,
+// and its literal string if applicable. The source end is indicated by
+// token.EOF.
+//
+// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT,
+// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string
+// has the corresponding value.
+//
+// If the returned token is a keyword, the literal string is the keyword.
+//
+// If the returned token is token.SEMICOLON, the corresponding
+// literal string is ";" if the semicolon was present in the source,
+// and "\n" if the semicolon was inserted because of a newline or
+// at EOF.
+//
+// If the returned token is token.ILLEGAL, the literal string is the
+// offending character.
+//
+// In all other cases, Scan returns an empty literal string.
+//
+// For more tolerant parsing, Scan will return a valid token if
+// possible even if a syntax error was encountered. Thus, even
+// if the resulting token sequence contains no illegal tokens,
+// a client may not assume that no error occurred. Instead it
+// must check the scanner's ErrorCount or the number of calls
+// of the error handler, if there was one installed.
+//
+// Scan adds line information to the file added to the file
+// set with Init. Token positions are relative to that file
+// and thus relative to the file set.
+//
+func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
+scanAgain:
+ s.skipWhitespace()
+
+ // current token start
+ pos = s.file.Pos(s.offset)
+
+ // determine token value
+ insertSemi := false
+ switch ch := s.ch; {
+ case isLetter(ch):
+ lit = s.scanIdentifier()
+ if len(lit) > 1 {
+ // keywords are longer than one letter - avoid lookup otherwise
+ tok = token.Lookup(lit)
+ switch tok {
+ case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN:
+ insertSemi = true
+ }
+ } else {
+ insertSemi = true
+ tok = token.IDENT
+ }
+ case '0' <= ch && ch <= '9':
+ insertSemi = true
+ tok, lit = s.scanNumber(false)
+ default:
+ s.next() // always make progress
+ switch ch {
+ case -1:
+ if s.insertSemi {
+ s.insertSemi = false // EOF consumed
+ return pos, token.SEMICOLON, "\n"
+ }
+ tok = token.EOF
+ case '\n':
+ // we only reach here if s.insertSemi was
+ // set in the first place and exited early
+ // from s.skipWhitespace()
+ s.insertSemi = false // newline consumed
+ return pos, token.SEMICOLON, "\n"
+ case '"':
+ insertSemi = true
+ tok = token.STRING
+ lit = s.scanString()
+ case '\'':
+ insertSemi = true
+ tok = token.CHAR
+ lit = s.scanRune()
+ case '`':
+ insertSemi = true
+ tok = token.STRING
+ lit = s.scanRawString()
+ case ':':
+ tok = s.switch2(token.COLON, token.DEFINE)
+ case '.':
+ if '0' <= s.ch && s.ch <= '9' {
+ insertSemi = true
+ tok, lit = s.scanNumber(true)
+ } else if s.ch == '.' {
+ s.next()
+ if s.ch == '.' {
+ s.next()
+ tok = token.ELLIPSIS
+ }
+ } else {
+ tok = token.PERIOD
+ }
+ case ',':
+ tok = token.COMMA
+ case ';':
+ tok = token.SEMICOLON
+ lit = ";"
+ case '(':
+ tok = token.LPAREN
+ case ')':
+ insertSemi = true
+ tok = token.RPAREN
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ insertSemi = true
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ insertSemi = true
+ tok = token.RBRACE
+ case '+':
+ tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC)
+ if tok == token.INC {
+ insertSemi = true
+ }
+ case '-':
+ tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC)
+ if tok == token.DEC {
+ insertSemi = true
+ }
+ case '*':
+ tok = s.switch2(token.MUL, token.MUL_ASSIGN)
+ case '/':
+ if s.ch == '/' || s.ch == '*' {
+ // comment
+ if s.insertSemi && s.findLineEnd() {
+ // reset position to the beginning of the comment
+ s.ch = '/'
+ s.offset = s.file.Offset(pos)
+ s.rdOffset = s.offset + 1
+ s.insertSemi = false // newline consumed
+ return pos, token.SEMICOLON, "\n"
+ }
+ lit = s.scanComment()
+ if s.mode&ScanComments == 0 {
+ // skip comment
+ s.insertSemi = false // newline consumed
+ goto scanAgain
+ }
+ tok = token.COMMENT
+ } else {
+ tok = s.switch2(token.QUO, token.QUO_ASSIGN)
+ }
+ case '%':
+ tok = s.switch2(token.REM, token.REM_ASSIGN)
+ case '^':
+ tok = s.switch2(token.XOR, token.XOR_ASSIGN)
+ case '<':
+ if s.ch == '-' {
+ s.next()
+ tok = token.ARROW
+ } else {
+ tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN)
+ }
+ case '>':
+ tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN)
+ case '=':
+ tok = s.switch2(token.ASSIGN, token.EQL)
+ case '!':
+ tok = s.switch2(token.NOT, token.NEQ)
+ case '&':
+ if s.ch == '^' {
+ s.next()
+ tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN)
+ } else {
+ tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND)
+ }
+ case '|':
+ tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+ default:
+ // next reports unexpected BOMs - don't repeat
+ if ch != bom {
+ s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
+ }
+ insertSemi = s.insertSemi // preserve insertSemi info
+ tok = token.ILLEGAL
+ lit = string(ch)
+ }
+ }
+ if s.mode&dontInsertSemis == 0 {
+ s.insertSemi = insertSemi
+ }
+
+ return
+}
diff --git a/src/go/scanner/scanner_test.go b/src/go/scanner/scanner_test.go
new file mode 100644
index 000000000..fc450d8a6
--- /dev/null
+++ b/src/go/scanner/scanner_test.go
@@ -0,0 +1,775 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scanner
+
+import (
+ "go/token"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+)
+
+var fset = token.NewFileSet()
+
+const /* class */ (
+ special = iota
+ literal
+ operator
+ keyword
+)
+
+func tokenclass(tok token.Token) int {
+ switch {
+ case tok.IsLiteral():
+ return literal
+ case tok.IsOperator():
+ return operator
+ case tok.IsKeyword():
+ return keyword
+ }
+ return special
+}
+
+type elt struct {
+ tok token.Token
+ lit string
+ class int
+}
+
+var tokens = [...]elt{
+ // Special tokens
+ {token.COMMENT, "/* a comment */", special},
+ {token.COMMENT, "// a comment \n", special},
+ {token.COMMENT, "/*\r*/", special},
+ {token.COMMENT, "//\r\n", special},
+
+ // Identifiers and basic type literals
+ {token.IDENT, "foobar", literal},
+ {token.IDENT, "a۰۱۸", literal},
+ {token.IDENT, "foo६४", literal},
+ {token.IDENT, "bar9876", literal},
+ {token.IDENT, "ŝ", literal}, // was bug (issue 4000)
+ {token.IDENT, "ŝfoo", literal}, // was bug (issue 4000)
+ {token.INT, "0", literal},
+ {token.INT, "1", literal},
+ {token.INT, "123456789012345678890", literal},
+ {token.INT, "01234567", literal},
+ {token.INT, "0xcafebabe", literal},
+ {token.FLOAT, "0.", literal},
+ {token.FLOAT, ".0", literal},
+ {token.FLOAT, "3.14159265", literal},
+ {token.FLOAT, "1e0", literal},
+ {token.FLOAT, "1e+100", literal},
+ {token.FLOAT, "1e-100", literal},
+ {token.FLOAT, "2.71828e-1000", literal},
+ {token.IMAG, "0i", literal},
+ {token.IMAG, "1i", literal},
+ {token.IMAG, "012345678901234567889i", literal},
+ {token.IMAG, "123456789012345678890i", literal},
+ {token.IMAG, "0.i", literal},
+ {token.IMAG, ".0i", literal},
+ {token.IMAG, "3.14159265i", literal},
+ {token.IMAG, "1e0i", literal},
+ {token.IMAG, "1e+100i", literal},
+ {token.IMAG, "1e-100i", literal},
+ {token.IMAG, "2.71828e-1000i", literal},
+ {token.CHAR, "'a'", literal},
+ {token.CHAR, "'\\000'", literal},
+ {token.CHAR, "'\\xFF'", literal},
+ {token.CHAR, "'\\uff16'", literal},
+ {token.CHAR, "'\\U0000ff16'", literal},
+ {token.STRING, "`foobar`", literal},
+ {token.STRING, "`" + `foo
+ bar` +
+ "`",
+ literal,
+ },
+ {token.STRING, "`\r`", literal},
+ {token.STRING, "`foo\r\nbar`", literal},
+
+ // Operators and delimiters
+ {token.ADD, "+", operator},
+ {token.SUB, "-", operator},
+ {token.MUL, "*", operator},
+ {token.QUO, "/", operator},
+ {token.REM, "%", operator},
+
+ {token.AND, "&", operator},
+ {token.OR, "|", operator},
+ {token.XOR, "^", operator},
+ {token.SHL, "<<", operator},
+ {token.SHR, ">>", operator},
+ {token.AND_NOT, "&^", operator},
+
+ {token.ADD_ASSIGN, "+=", operator},
+ {token.SUB_ASSIGN, "-=", operator},
+ {token.MUL_ASSIGN, "*=", operator},
+ {token.QUO_ASSIGN, "/=", operator},
+ {token.REM_ASSIGN, "%=", operator},
+
+ {token.AND_ASSIGN, "&=", operator},
+ {token.OR_ASSIGN, "|=", operator},
+ {token.XOR_ASSIGN, "^=", operator},
+ {token.SHL_ASSIGN, "<<=", operator},
+ {token.SHR_ASSIGN, ">>=", operator},
+ {token.AND_NOT_ASSIGN, "&^=", operator},
+
+ {token.LAND, "&&", operator},
+ {token.LOR, "||", operator},
+ {token.ARROW, "<-", operator},
+ {token.INC, "++", operator},
+ {token.DEC, "--", operator},
+
+ {token.EQL, "==", operator},
+ {token.LSS, "<", operator},
+ {token.GTR, ">", operator},
+ {token.ASSIGN, "=", operator},
+ {token.NOT, "!", operator},
+
+ {token.NEQ, "!=", operator},
+ {token.LEQ, "<=", operator},
+ {token.GEQ, ">=", operator},
+ {token.DEFINE, ":=", operator},
+ {token.ELLIPSIS, "...", operator},
+
+ {token.LPAREN, "(", operator},
+ {token.LBRACK, "[", operator},
+ {token.LBRACE, "{", operator},
+ {token.COMMA, ",", operator},
+ {token.PERIOD, ".", operator},
+
+ {token.RPAREN, ")", operator},
+ {token.RBRACK, "]", operator},
+ {token.RBRACE, "}", operator},
+ {token.SEMICOLON, ";", operator},
+ {token.COLON, ":", operator},
+
+ // Keywords
+ {token.BREAK, "break", keyword},
+ {token.CASE, "case", keyword},
+ {token.CHAN, "chan", keyword},
+ {token.CONST, "const", keyword},
+ {token.CONTINUE, "continue", keyword},
+
+ {token.DEFAULT, "default", keyword},
+ {token.DEFER, "defer", keyword},
+ {token.ELSE, "else", keyword},
+ {token.FALLTHROUGH, "fallthrough", keyword},
+ {token.FOR, "for", keyword},
+
+ {token.FUNC, "func", keyword},
+ {token.GO, "go", keyword},
+ {token.GOTO, "goto", keyword},
+ {token.IF, "if", keyword},
+ {token.IMPORT, "import", keyword},
+
+ {token.INTERFACE, "interface", keyword},
+ {token.MAP, "map", keyword},
+ {token.PACKAGE, "package", keyword},
+ {token.RANGE, "range", keyword},
+ {token.RETURN, "return", keyword},
+
+ {token.SELECT, "select", keyword},
+ {token.STRUCT, "struct", keyword},
+ {token.SWITCH, "switch", keyword},
+ {token.TYPE, "type", keyword},
+ {token.VAR, "var", keyword},
+}
+
+const whitespace = " \t \n\n\n" // to separate tokens
+
+var source = func() []byte {
+ var src []byte
+ for _, t := range tokens {
+ src = append(src, t.lit...)
+ src = append(src, whitespace...)
+ }
+ return src
+}()
+
+func newlineCount(s string) int {
+ n := 0
+ for i := 0; i < len(s); i++ {
+ if s[i] == '\n' {
+ n++
+ }
+ }
+ return n
+}
+
+func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
+ pos := fset.Position(p)
+ if pos.Filename != expected.Filename {
+ t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
+ }
+ if pos.Offset != expected.Offset {
+ t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
+ }
+ if pos.Line != expected.Line {
+ t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
+ }
+ if pos.Column != expected.Column {
+ t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
+ }
+}
+
+// Verify that calling Scan() provides the correct results.
+func TestScan(t *testing.T) {
+ whitespace_linecount := newlineCount(whitespace)
+
+ // error handler
+ eh := func(_ token.Position, msg string) {
+ t.Errorf("error handler called (msg = %s)", msg)
+ }
+
+ // verify scan
+ var s Scanner
+ s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments|dontInsertSemis)
+
+ // set up expected position
+ epos := token.Position{
+ Filename: "",
+ Offset: 0,
+ Line: 1,
+ Column: 1,
+ }
+
+ index := 0
+ for {
+ pos, tok, lit := s.Scan()
+
+ // check position
+ if tok == token.EOF {
+ // correction for EOF
+ epos.Line = newlineCount(string(source))
+ epos.Column = 2
+ }
+ checkPos(t, lit, pos, epos)
+
+ // check token
+ e := elt{token.EOF, "", special}
+ if index < len(tokens) {
+ e = tokens[index]
+ index++
+ }
+ if tok != e.tok {
+ t.Errorf("bad token for %q: got %s, expected %s", lit, tok, e.tok)
+ }
+
+ // check token class
+ if tokenclass(tok) != e.class {
+ t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
+ }
+
+ // check literal
+ elit := ""
+ switch e.tok {
+ case token.COMMENT:
+ // no CRs in comments
+ elit = string(stripCR([]byte(e.lit)))
+ //-style comment literal doesn't contain newline
+ if elit[1] == '/' {
+ elit = elit[0 : len(elit)-1]
+ }
+ case token.IDENT:
+ elit = e.lit
+ case token.SEMICOLON:
+ elit = ";"
+ default:
+ if e.tok.IsLiteral() {
+ // no CRs in raw string literals
+ elit = e.lit
+ if elit[0] == '`' {
+ elit = string(stripCR([]byte(elit)))
+ }
+ } else if e.tok.IsKeyword() {
+ elit = e.lit
+ }
+ }
+ if lit != elit {
+ t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
+ }
+
+ if tok == token.EOF {
+ break
+ }
+
+ // update position
+ epos.Offset += len(e.lit) + len(whitespace)
+ epos.Line += newlineCount(e.lit) + whitespace_linecount
+
+ }
+
+ if s.ErrorCount != 0 {
+ t.Errorf("found %d errors", s.ErrorCount)
+ }
+}
+
+func checkSemi(t *testing.T, line string, mode Mode) {
+ var S Scanner
+ file := fset.AddFile("TestSemis", fset.Base(), len(line))
+ S.Init(file, []byte(line), nil, mode)
+ pos, tok, lit := S.Scan()
+ for tok != token.EOF {
+ if tok == token.ILLEGAL {
+ // the illegal token literal indicates what
+ // kind of semicolon literal to expect
+ semiLit := "\n"
+ if lit[0] == '#' {
+ semiLit = ";"
+ }
+ // next token must be a semicolon
+ semiPos := file.Position(pos)
+ semiPos.Offset++
+ semiPos.Column++
+ pos, tok, lit = S.Scan()
+ if tok == token.SEMICOLON {
+ if lit != semiLit {
+ t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
+ }
+ checkPos(t, line, pos, semiPos)
+ } else {
+ t.Errorf("bad token for %q: got %s, expected ;", line, tok)
+ }
+ } else if tok == token.SEMICOLON {
+ t.Errorf("bad token for %q: got ;, expected no ;", line)
+ }
+ pos, tok, lit = S.Scan()
+ }
+}
+
+var lines = []string{
+ // # indicates a semicolon present in the source
+ // $ indicates an automatically inserted semicolon
+ "",
+ "\ufeff#;", // first BOM is ignored
+ "#;",
+ "foo$\n",
+ "123$\n",
+ "1.2$\n",
+ "'x'$\n",
+ `"x"` + "$\n",
+ "`x`$\n",
+
+ "+\n",
+ "-\n",
+ "*\n",
+ "/\n",
+ "%\n",
+
+ "&\n",
+ "|\n",
+ "^\n",
+ "<<\n",
+ ">>\n",
+ "&^\n",
+
+ "+=\n",
+ "-=\n",
+ "*=\n",
+ "/=\n",
+ "%=\n",
+
+ "&=\n",
+ "|=\n",
+ "^=\n",
+ "<<=\n",
+ ">>=\n",
+ "&^=\n",
+
+ "&&\n",
+ "||\n",
+ "<-\n",
+ "++$\n",
+ "--$\n",
+
+ "==\n",
+ "<\n",
+ ">\n",
+ "=\n",
+ "!\n",
+
+ "!=\n",
+ "<=\n",
+ ">=\n",
+ ":=\n",
+ "...\n",
+
+ "(\n",
+ "[\n",
+ "{\n",
+ ",\n",
+ ".\n",
+
+ ")$\n",
+ "]$\n",
+ "}$\n",
+ "#;\n",
+ ":\n",
+
+ "break$\n",
+ "case\n",
+ "chan\n",
+ "const\n",
+ "continue$\n",
+
+ "default\n",
+ "defer\n",
+ "else\n",
+ "fallthrough$\n",
+ "for\n",
+
+ "func\n",
+ "go\n",
+ "goto\n",
+ "if\n",
+ "import\n",
+
+ "interface\n",
+ "map\n",
+ "package\n",
+ "range\n",
+ "return$\n",
+
+ "select\n",
+ "struct\n",
+ "switch\n",
+ "type\n",
+ "var\n",
+
+ "foo$//comment\n",
+ "foo$//comment",
+ "foo$/*comment*/\n",
+ "foo$/*\n*/",
+ "foo$/*comment*/ \n",
+ "foo$/*\n*/ ",
+
+ "foo $// comment\n",
+ "foo $// comment",
+ "foo $/*comment*/\n",
+ "foo $/*\n*/",
+ "foo $/* */ /* \n */ bar$/**/\n",
+ "foo $/*0*/ /*1*/ /*2*/\n",
+
+ "foo $/*comment*/ \n",
+ "foo $/*0*/ /*1*/ /*2*/ \n",
+ "foo $/**/ /*-------------*/ /*----\n*/bar $/* \n*/baa$\n",
+ "foo $/* an EOF terminates a line */",
+ "foo $/* an EOF terminates a line */ /*",
+ "foo $/* an EOF terminates a line */ //",
+
+ "package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
+ "package main$",
+}
+
+func TestSemis(t *testing.T) {
+ for _, line := range lines {
+ checkSemi(t, line, 0)
+ checkSemi(t, line, ScanComments)
+
+ // if the input ended in newlines, the input must tokenize the
+ // same with or without those newlines
+ for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
+ checkSemi(t, line[0:i], 0)
+ checkSemi(t, line[0:i], ScanComments)
+ }
+ }
+}
+
+type segment struct {
+ srcline string // a line of source text
+ filename string // filename for current token
+ line int // line number for current token
+}
+
+var segments = []segment{
+ // exactly one token per line since the test consumes one token per segment
+ {" line1", filepath.Join("dir", "TestLineComments"), 1},
+ {"\nline2", filepath.Join("dir", "TestLineComments"), 2},
+ {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored
+ {"\nline4", filepath.Join("dir", "TestLineComments"), 4},
+ {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 100},
+ {"\n//line \t :42\n line1", "", 42},
+ {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 200},
+ {"\n//line foo\t:42\n line42", filepath.Join("dir", "foo"), 42},
+ {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored
+ {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored
+ {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored
+ {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42},
+ {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100},
+}
+
+var unixsegments = []segment{
+ {"\n//line /bar:42\n line42", "/bar", 42},
+}
+
+var winsegments = []segment{
+ {"\n//line c:\\bar:42\n line42", "c:\\bar", 42},
+ {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100},
+}
+
+// Verify that comments of the form "//line filename:line" are interpreted correctly.
+func TestLineComments(t *testing.T) {
+ segs := segments
+ if runtime.GOOS == "windows" {
+ segs = append(segs, winsegments...)
+ } else {
+ segs = append(segs, unixsegments...)
+ }
+
+ // make source
+ var src string
+ for _, e := range segs {
+ src += e.srcline
+ }
+
+ // verify scan
+ var S Scanner
+ file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src))
+ S.Init(file, []byte(src), nil, dontInsertSemis)
+ for _, s := range segs {
+ p, _, lit := S.Scan()
+ pos := file.Position(p)
+ checkPos(t, lit, p, token.Position{
+ Filename: s.filename,
+ Offset: pos.Offset,
+ Line: s.line,
+ Column: pos.Column,
+ })
+ }
+
+ if S.ErrorCount != 0 {
+ t.Errorf("found %d errors", S.ErrorCount)
+ }
+}
+
+// Verify that initializing the same scanner more than once works correctly.
+func TestInit(t *testing.T) {
+ var s Scanner
+
+ // 1st init
+ src1 := "if true { }"
+ f1 := fset.AddFile("src1", fset.Base(), len(src1))
+ s.Init(f1, []byte(src1), nil, dontInsertSemis)
+ if f1.Size() != len(src1) {
+ t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
+ }
+ s.Scan() // if
+ s.Scan() // true
+ _, tok, _ := s.Scan() // {
+ if tok != token.LBRACE {
+ t.Errorf("bad token: got %s, expected %s", tok, token.LBRACE)
+ }
+
+ // 2nd init
+ src2 := "go true { ]"
+ f2 := fset.AddFile("src2", fset.Base(), len(src2))
+ s.Init(f2, []byte(src2), nil, dontInsertSemis)
+ if f2.Size() != len(src2) {
+ t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
+ }
+ _, tok, _ = s.Scan() // go
+ if tok != token.GO {
+ t.Errorf("bad token: got %s, expected %s", tok, token.GO)
+ }
+
+ if s.ErrorCount != 0 {
+ t.Errorf("found %d errors", s.ErrorCount)
+ }
+}
+
+func TestStdErrorHander(t *testing.T) {
+ const src = "@\n" + // illegal character, cause an error
+ "@ @\n" + // two errors on the same line
+ "//line File2:20\n" +
+ "@\n" + // different file, but same line
+ "//line File2:1\n" +
+ "@ @\n" + // same file, decreasing line number
+ "//line File1:1\n" +
+ "@ @ @" // original file, line 1 again
+
+ var list ErrorList
+ eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
+
+ var s Scanner
+ s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, dontInsertSemis)
+ for {
+ if _, tok, _ := s.Scan(); tok == token.EOF {
+ break
+ }
+ }
+
+ if len(list) != s.ErrorCount {
+ t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
+ }
+
+ if len(list) != 9 {
+ t.Errorf("found %d raw errors, expected 9", len(list))
+ PrintError(os.Stderr, list)
+ }
+
+ list.Sort()
+ if len(list) != 9 {
+ t.Errorf("found %d sorted errors, expected 9", len(list))
+ PrintError(os.Stderr, list)
+ }
+
+ list.RemoveMultiples()
+ if len(list) != 4 {
+ t.Errorf("found %d one-per-line errors, expected 4", len(list))
+ PrintError(os.Stderr, list)
+ }
+}
+
+type errorCollector struct {
+ cnt int // number of errors encountered
+ msg string // last error message encountered
+ pos token.Position // last error position encountered
+}
+
+func checkError(t *testing.T, src string, tok token.Token, pos int, lit, err string) {
+ var s Scanner
+ var h errorCollector
+ eh := func(pos token.Position, msg string) {
+ h.cnt++
+ h.msg = msg
+ h.pos = pos
+ }
+ s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments|dontInsertSemis)
+ _, tok0, lit0 := s.Scan()
+ if tok0 != tok {
+ t.Errorf("%q: got %s, expected %s", src, tok0, tok)
+ }
+ if tok0 != token.ILLEGAL && lit0 != lit {
+ t.Errorf("%q: got literal %q, expected %q", src, lit0, lit)
+ }
+ cnt := 0
+ if err != "" {
+ cnt = 1
+ }
+ if h.cnt != cnt {
+ t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
+ }
+ if h.msg != err {
+ t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
+ }
+ if h.pos.Offset != pos {
+ t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
+ }
+}
+
+var errors = []struct {
+ src string
+ tok token.Token
+ pos int
+ lit string
+ err string
+}{
+ {"\a", token.ILLEGAL, 0, "", "illegal character U+0007"},
+ {`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"},
+ {`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"},
+ {`' '`, token.CHAR, 0, `' '`, ""},
+ {`''`, token.CHAR, 0, `''`, "illegal rune literal"},
+ {`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"},
+ {`'123'`, token.CHAR, 0, `'123'`, "illegal rune literal"},
+ {`'\0'`, token.CHAR, 3, `'\0'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\07'`, token.CHAR, 4, `'\07'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\8'`, token.CHAR, 2, `'\8'`, "unknown escape sequence"},
+ {`'\08'`, token.CHAR, 3, `'\08'`, "illegal character U+0038 '8' in escape sequence"},
+ {`'\x'`, token.CHAR, 3, `'\x'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\x0'`, token.CHAR, 4, `'\x0'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\x0g'`, token.CHAR, 4, `'\x0g'`, "illegal character U+0067 'g' in escape sequence"},
+ {`'\u'`, token.CHAR, 3, `'\u'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\u0'`, token.CHAR, 4, `'\u0'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\u00'`, token.CHAR, 5, `'\u00'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\u000'`, token.CHAR, 6, `'\u000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\u000`, token.CHAR, 6, `'\u000`, "escape sequence not terminated"},
+ {`'\u0000'`, token.CHAR, 0, `'\u0000'`, ""},
+ {`'\U'`, token.CHAR, 3, `'\U'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U0'`, token.CHAR, 4, `'\U0'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U00'`, token.CHAR, 5, `'\U00'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U000'`, token.CHAR, 6, `'\U000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U0000'`, token.CHAR, 7, `'\U0000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U00000'`, token.CHAR, 8, `'\U00000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U000000'`, token.CHAR, 9, `'\U000000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U0000000'`, token.CHAR, 10, `'\U0000000'`, "illegal character U+0027 ''' in escape sequence"},
+ {`'\U0000000`, token.CHAR, 10, `'\U0000000`, "escape sequence not terminated"},
+ {`'\U00000000'`, token.CHAR, 0, `'\U00000000'`, ""},
+ {`'\Uffffffff'`, token.CHAR, 2, `'\Uffffffff'`, "escape sequence is invalid Unicode code point"},
+ {`'`, token.CHAR, 0, `'`, "rune literal not terminated"},
+ {`'\`, token.CHAR, 2, `'\`, "escape sequence not terminated"},
+ {"'\n", token.CHAR, 0, "'", "rune literal not terminated"},
+ {"'\n ", token.CHAR, 0, "'", "rune literal not terminated"},
+ {`""`, token.STRING, 0, `""`, ""},
+ {`"abc`, token.STRING, 0, `"abc`, "string literal not terminated"},
+ {"\"abc\n", token.STRING, 0, `"abc`, "string literal not terminated"},
+ {"\"abc\n ", token.STRING, 0, `"abc`, "string literal not terminated"},
+ {"``", token.STRING, 0, "``", ""},
+ {"`", token.STRING, 0, "`", "raw string literal not terminated"},
+ {"/**/", token.COMMENT, 0, "/**/", ""},
+ {"/*", token.COMMENT, 0, "/*", "comment not terminated"},
+ {"077", token.INT, 0, "077", ""},
+ {"078.", token.FLOAT, 0, "078.", ""},
+ {"07801234567.", token.FLOAT, 0, "07801234567.", ""},
+ {"078e0", token.FLOAT, 0, "078e0", ""},
+ {"078", token.INT, 0, "078", "illegal octal number"},
+ {"07800000009", token.INT, 0, "07800000009", "illegal octal number"},
+ {"0x", token.INT, 0, "0x", "illegal hexadecimal number"},
+ {"0X", token.INT, 0, "0X", "illegal hexadecimal number"},
+ {"\"abc\x00def\"", token.STRING, 4, "\"abc\x00def\"", "illegal character NUL"},
+ {"\"abc\x80def\"", token.STRING, 4, "\"abc\x80def\"", "illegal UTF-8 encoding"},
+ {"\ufeff\ufeff", token.ILLEGAL, 3, "\ufeff\ufeff", "illegal byte order mark"}, // only first BOM is ignored
+ {"//\ufeff", token.COMMENT, 2, "//\ufeff", "illegal byte order mark"}, // only first BOM is ignored
+ {"'\ufeff" + `'`, token.CHAR, 1, "'\ufeff" + `'`, "illegal byte order mark"}, // only first BOM is ignored
+ {`"` + "abc\ufeffdef" + `"`, token.STRING, 4, `"` + "abc\ufeffdef" + `"`, "illegal byte order mark"}, // only first BOM is ignored
+}
+
+func TestScanErrors(t *testing.T) {
+ for _, e := range errors {
+ checkError(t, e.src, e.tok, e.pos, e.lit, e.err)
+ }
+}
+
+func BenchmarkScan(b *testing.B) {
+ b.StopTimer()
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(source))
+ var s Scanner
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ s.Init(file, source, nil, ScanComments)
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ }
+}
+
+func BenchmarkScanFile(b *testing.B) {
+ b.StopTimer()
+ const filename = "scanner.go"
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(err)
+ }
+ fset := token.NewFileSet()
+ file := fset.AddFile(filename, fset.Base(), len(src))
+ b.SetBytes(int64(len(src)))
+ var s Scanner
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ s.Init(file, src, nil, ScanComments)
+ for {
+ _, tok, _ := s.Scan()
+ if tok == token.EOF {
+ break
+ }
+ }
+ }
+}
diff --git a/src/go/token/position.go b/src/go/token/position.go
new file mode 100644
index 000000000..82d90eeb7
--- /dev/null
+++ b/src/go/token/position.go
@@ -0,0 +1,485 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// -----------------------------------------------------------------------------
+// Positions
+
+// Position describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+//
+type Position struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (pos *Position) IsValid() bool { return pos.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+//
+func (pos Position) String() string {
+ s := pos.Filename
+ if pos.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Pos is a compact encoding of a source position within a file set.
+// It can be converted into a Position for a more convenient, but much
+// larger, representation.
+//
+// The Pos value for a given file is a number in the range [base, base+size],
+// where base and size are specified when adding the file to the file set via
+// AddFile.
+//
+// To create the Pos value for a specific source offset, first add
+// the respective file to the current file set (via FileSet.AddFile)
+// and then call File.Pos(offset) for that file. Given a Pos value p
+// for a specific file set fset, the corresponding Position value is
+// obtained by calling fset.Position(p).
+//
+// Pos values can be compared directly with the usual comparison operators:
+// If two Pos values p and q are in the same file, comparing p and q is
+// equivalent to comparing the respective source file offsets. If p and q
+// are in different files, p < q is true if the file implied by p was added
+// to the respective file set before the file implied by q.
+//
+type Pos int
+
+// The zero value for Pos is NoPos; there is no file and line information
+// associated with it, and NoPos().IsValid() is false. NoPos is always
+// smaller than any other Pos value. The corresponding Position value
+// for NoPos is the zero value for Position.
+//
+const NoPos Pos = 0
+
+// IsValid returns true if the position is valid.
+func (p Pos) IsValid() bool {
+ return p != NoPos
+}
+
+// -----------------------------------------------------------------------------
+// File
+
+// A File is a handle for a file belonging to a FileSet.
+// A File has a name, size, and line offset table.
+//
+type File struct {
+ set *FileSet
+ name string // file name as provided to AddFile
+ base int // Pos value range for this file is [base...base+size]
+ size int // file size as provided to AddFile
+
+ // lines and infos are protected by set.mutex
+ lines []int // lines contains the offset of the first character for each line (the first entry is always 0)
+ infos []lineInfo
+}
+
+// Name returns the file name of file f as registered with AddFile.
+func (f *File) Name() string {
+ return f.name
+}
+
+// Base returns the base offset of file f as registered with AddFile.
+func (f *File) Base() int {
+ return f.base
+}
+
+// Size returns the size of file f as registered with AddFile.
+func (f *File) Size() int {
+ return f.size
+}
+
+// LineCount returns the number of lines in file f.
+func (f *File) LineCount() int {
+ f.set.mutex.RLock()
+ n := len(f.lines)
+ f.set.mutex.RUnlock()
+ return n
+}
+
+// AddLine adds the line offset for a new line.
+// The line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise the line offset is ignored.
+//
+func (f *File) AddLine(offset int) {
+ f.set.mutex.Lock()
+ if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
+ f.lines = append(f.lines, offset)
+ }
+ f.set.mutex.Unlock()
+}
+
+// MergeLine merges a line with the following line. It is akin to replacing
+// the newline character at the end of the line with a space (to not change the
+// remaining offsets). To obtain the line number, consult e.g. Position.Line.
+// MergeLine will panic if given an invalid line number.
+//
+func (f *File) MergeLine(line int) {
+ if line <= 0 {
+ panic("illegal line number (line numbering starts at 1)")
+ }
+ f.set.mutex.Lock()
+ defer f.set.mutex.Unlock()
+ if line >= len(f.lines) {
+ panic("illegal line number")
+ }
+ // To merge the line numbered <line> with the line numbered <line+1>,
+ // we need to remove the entry in lines corresponding to the line
+ // numbered <line+1>. The entry in lines corresponding to the line
+ // numbered <line+1> is located at index <line>, since indices in lines
+ // are 0-based and line numbers are 1-based.
+ copy(f.lines[line:], f.lines[line+1:])
+ f.lines = f.lines[:len(f.lines)-1]
+}
+
+// SetLines sets the line offsets for a file and returns true if successful.
+// The line offsets are the offsets of the first character of each line;
+// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
+// An empty file has an empty line offset table.
+// Each line offset must be larger than the offset for the previous line
+// and smaller than the file size; otherwise SetLines fails and returns
+// false.
+//
+func (f *File) SetLines(lines []int) bool {
+ // verify validity of lines table
+ size := f.size
+ for i, offset := range lines {
+ if i > 0 && offset <= lines[i-1] || size <= offset {
+ return false
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+ return true
+}
+
+// SetLinesForContent sets the line offsets for the given file content.
+// It ignores position-altering //line comments.
+func (f *File) SetLinesForContent(content []byte) {
+ var lines []int
+ line := 0
+ for offset, b := range content {
+ if line >= 0 {
+ lines = append(lines, line)
+ }
+ line = -1
+ if b == '\n' {
+ line = offset + 1
+ }
+ }
+
+ // set lines table
+ f.set.mutex.Lock()
+ f.lines = lines
+ f.set.mutex.Unlock()
+}
+
+// A lineInfo object describes alternative file and line number
+// information (such as provided via a //line comment in a .go
+// file) for a given file offset.
+type lineInfo struct {
+ // fields are exported to make them accessible to gob
+ Offset int
+ Filename string
+ Line int
+}
+
+// AddLineInfo adds alternative file and line number information for
+// a given file offset. The offset must be larger than the offset for
+// the previously added alternative line info and smaller than the
+// file size; otherwise the information is ignored.
+//
+// AddLineInfo is typically used to register alternative position
+// information for //line filename:line comments in source files.
+//
+func (f *File) AddLineInfo(offset int, filename string, line int) {
+ f.set.mutex.Lock()
+ if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
+ f.infos = append(f.infos, lineInfo{offset, filename, line})
+ }
+ f.set.mutex.Unlock()
+}
+
+// Pos returns the Pos value for the given file offset;
+// the offset must be <= f.Size().
+// f.Pos(f.Offset(p)) == p.
+//
+func (f *File) Pos(offset int) Pos {
+ if offset > f.size {
+ panic("illegal file offset")
+ }
+ return Pos(f.base + offset)
+}
+
+// Offset returns the offset for the given file position p;
+// p must be a valid Pos value in that file.
+// f.Offset(f.Pos(offset)) == offset.
+//
+func (f *File) Offset(p Pos) int {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ return int(p) - f.base
+}
+
+// Line returns the line number for the given file position p;
+// p must be a Pos value in that file or NoPos.
+//
+func (f *File) Line(p Pos) int {
+ return f.Position(p).Line
+}
+
+func searchLineInfos(a []lineInfo, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
+}
+
+// unpack returns the filename and line and column number for a file offset.
+// If adjusted is set, unpack will return the filename and line information
+// possibly adjusted by //line comments; otherwise those comments are ignored.
+//
+func (f *File) unpack(offset int, adjusted bool) (filename string, line, column int) {
+ filename = f.name
+ if i := searchInts(f.lines, offset); i >= 0 {
+ line, column = i+1, offset-f.lines[i]+1
+ }
+ if adjusted && len(f.infos) > 0 {
+ // almost no files have extra line infos
+ if i := searchLineInfos(f.infos, offset); i >= 0 {
+ alt := &f.infos[i]
+ filename = alt.Filename
+ if i := searchInts(f.lines, alt.Offset); i >= 0 {
+ line += alt.Line - i - 1
+ }
+ }
+ }
+ return
+}
+
+func (f *File) position(p Pos, adjusted bool) (pos Position) {
+ offset := int(p) - f.base
+ pos.Offset = offset
+ pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted)
+ return
+}
+
+// PositionFor returns the Position value for the given file position p.
+// If adjusted is set, the position may be adjusted by position-altering
+// //line comments; otherwise those comments are ignored.
+// p must be a Pos value in f or NoPos.
+//
+func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) {
+ if p != NoPos {
+ if int(p) < f.base || int(p) > f.base+f.size {
+ panic("illegal Pos value")
+ }
+ pos = f.position(p, adjusted)
+ }
+ return
+}
+
+// Position returns the Position value for the given file position p.
+// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true).
+//
+func (f *File) Position(p Pos) (pos Position) {
+ return f.PositionFor(p, true)
+}
+
+// -----------------------------------------------------------------------------
+// FileSet
+
+// A FileSet represents a set of source files.
+// Methods of file sets are synchronized; multiple goroutines
+// may invoke them concurrently.
+//
+type FileSet struct {
+ mutex sync.RWMutex // protects the file set
+ base int // base offset for the next file
+ files []*File // list of files in the order added to the set
+ last *File // cache of last file looked up
+}
+
+// NewFileSet creates a new file set.
+func NewFileSet() *FileSet {
+ return &FileSet{
+ base: 1, // 0 == NoPos
+ }
+}
+
+// Base returns the minimum base offset that must be provided to
+// AddFile when adding the next file.
+//
+func (s *FileSet) Base() int {
+ s.mutex.RLock()
+ b := s.base
+ s.mutex.RUnlock()
+ return b
+
+}
+
+// AddFile adds a new file with a given filename, base offset, and file size
+// to the file set s and returns the file. Multiple files may have the same
+// name. The base offset must not be smaller than the FileSet's Base(), and
+// size must not be negative. As a special case, if a negative base is provided,
+// the current value of the FileSet's Base() is used instead.
+//
+// Adding the file will set the file set's Base() value to base + size + 1
+// as the minimum base value for the next file. The following relationship
+// exists between a Pos value p for a given file offset offs:
+//
+// int(p) = base + offs
+//
+// with offs in the range [0, size] and thus p in the range [base, base+size].
+// For convenience, File.Pos may be used to create file-specific position
+// values from a file offset.
+//
+func (s *FileSet) AddFile(filename string, base, size int) *File {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ if base < 0 {
+ base = s.base
+ }
+ if base < s.base || size < 0 {
+ panic("illegal base or size")
+ }
+ // base >= s.base && size >= 0
+ f := &File{s, filename, base, size, []int{0}, nil}
+ base += size + 1 // +1 because EOF also has a position
+ if base < 0 {
+ panic("token.Pos offset overflow (> 2G of source code in file set)")
+ }
+ // add the file to the file set
+ s.base = base
+ s.files = append(s.files, f)
+ s.last = f
+ return f
+}
+
+// Iterate calls f for the files in the file set in the order they were added
+// until f returns false.
+//
+func (s *FileSet) Iterate(f func(*File) bool) {
+ for i := 0; ; i++ {
+ var file *File
+ s.mutex.RLock()
+ if i < len(s.files) {
+ file = s.files[i]
+ }
+ s.mutex.RUnlock()
+ if file == nil || !f(file) {
+ break
+ }
+ }
+}
+
+func searchFiles(a []*File, x int) int {
+ return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
+}
+
+func (s *FileSet) file(p Pos) *File {
+ s.mutex.RLock()
+ // common case: p is in last file
+ if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
+ s.mutex.RUnlock()
+ return f
+ }
+ // p is not in last file - search all files
+ if i := searchFiles(s.files, int(p)); i >= 0 {
+ f := s.files[i]
+ // f.base <= int(p) by definition of searchFiles
+ if int(p) <= f.base+f.size {
+ s.mutex.RUnlock()
+ s.mutex.Lock()
+ s.last = f // race is ok - s.last is only a cache
+ s.mutex.Unlock()
+ return f
+ }
+ }
+ s.mutex.RUnlock()
+ return nil
+}
+
+// File returns the file that contains the position p.
+// If no such file is found (for instance for p == NoPos),
+// the result is nil.
+//
+func (s *FileSet) File(p Pos) (f *File) {
+ if p != NoPos {
+ f = s.file(p)
+ }
+ return
+}
+
+// PositionFor converts a Pos p in the fileset into a Position value.
+// If adjusted is set, the position may be adjusted by position-altering
+// //line comments; otherwise those comments are ignored.
+// p must be a Pos value in s or NoPos.
+//
+func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) {
+ if p != NoPos {
+ if f := s.file(p); f != nil {
+ pos = f.position(p, adjusted)
+ }
+ }
+ return
+}
+
+// Position converts a Pos p in the fileset into a Position value.
+// Calling s.Position(p) is equivalent to calling s.PositionFor(p, true).
+//
+func (s *FileSet) Position(p Pos) (pos Position) {
+ return s.PositionFor(p, true)
+}
+
+// -----------------------------------------------------------------------------
+// Helper functions
+
+func searchInts(a []int, x int) int {
+ // This function body is a manually inlined version of:
+ //
+ // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
+ //
+ // With better compiler optimizations, this may not be needed in the
+ // future, but at the moment this change improves the go/printer
+ // benchmark performance by ~30%. This has a direct impact on the
+ // speed of gofmt and thus seems worthwhile (2011-04-29).
+ // TODO(gri): Remove this when compilers have caught up.
+ i, j := 0, len(a)
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h
+ // i ≤ h < j
+ if a[h] <= x {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ return i - 1
+}
diff --git a/src/go/token/position_test.go b/src/go/token/position_test.go
new file mode 100644
index 000000000..d26939ce2
--- /dev/null
+++ b/src/go/token/position_test.go
@@ -0,0 +1,297 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "fmt"
+ "math/rand"
+ "sync"
+ "testing"
+)
+
+func checkPos(t *testing.T, msg string, got, want Position) {
+ if got.Filename != want.Filename {
+ t.Errorf("%s: got filename = %q; want %q", msg, got.Filename, want.Filename)
+ }
+ if got.Offset != want.Offset {
+ t.Errorf("%s: got offset = %d; want %d", msg, got.Offset, want.Offset)
+ }
+ if got.Line != want.Line {
+ t.Errorf("%s: got line = %d; want %d", msg, got.Line, want.Line)
+ }
+ if got.Column != want.Column {
+ t.Errorf("%s: got column = %d; want %d", msg, got.Column, want.Column)
+ }
+}
+
+func TestNoPos(t *testing.T) {
+ if NoPos.IsValid() {
+ t.Errorf("NoPos should not be valid")
+ }
+ var fset *FileSet
+ checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
+ fset = NewFileSet()
+ checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
+}
+
+var tests = []struct {
+ filename string
+ source []byte // may be nil
+ size int
+ lines []int
+}{
+ {"a", []byte{}, 0, []int{}},
+ {"b", []byte("01234"), 5, []int{0}},
+ {"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
+ {"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
+ {"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
+ {"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
+ {"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
+ {"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
+}
+
+func linecol(lines []int, offs int) (int, int) {
+ prevLineOffs := 0
+ for line, lineOffs := range lines {
+ if offs < lineOffs {
+ return line, offs - prevLineOffs + 1
+ }
+ prevLineOffs = lineOffs
+ }
+ return len(lines), offs - prevLineOffs + 1
+}
+
+func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
+ for offs := 0; offs < f.Size(); offs++ {
+ p := f.Pos(offs)
+ offs2 := f.Offset(p)
+ if offs2 != offs {
+ t.Errorf("%s, Offset: got offset %d; want %d", f.Name(), offs2, offs)
+ }
+ line, col := linecol(lines, offs)
+ msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+ checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
+ checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
+ }
+}
+
+func makeTestSource(size int, lines []int) []byte {
+ src := make([]byte, size)
+ for _, offs := range lines {
+ if offs > 0 {
+ src[offs-1] = '\n'
+ }
+ }
+ return src
+}
+
+func TestPositions(t *testing.T) {
+ const delta = 7 // a non-zero base offset increment
+ fset := NewFileSet()
+ for _, test := range tests {
+ // verify consistency of test case
+ if test.source != nil && len(test.source) != test.size {
+ t.Errorf("%s: inconsistent test case: got file size %d; want %d", test.filename, len(test.source), test.size)
+ }
+
+ // add file and verify name and size
+ f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
+ if f.Name() != test.filename {
+ t.Errorf("got filename %q; want %q", f.Name(), test.filename)
+ }
+ if f.Size() != test.size {
+ t.Errorf("%s: got file size %d; want %d", f.Name(), f.Size(), test.size)
+ }
+ if fset.File(f.Pos(0)) != f {
+ t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
+ }
+
+ // add lines individually and verify all positions
+ for i, offset := range test.lines {
+ f.AddLine(offset)
+ if f.LineCount() != i+1 {
+ t.Errorf("%s, AddLine: got line count %d; want %d", f.Name(), f.LineCount(), i+1)
+ }
+ // adding the same offset again should be ignored
+ f.AddLine(offset)
+ if f.LineCount() != i+1 {
+ t.Errorf("%s, AddLine: got unchanged line count %d; want %d", f.Name(), f.LineCount(), i+1)
+ }
+ verifyPositions(t, fset, f, test.lines[0:i+1])
+ }
+
+ // add lines with SetLines and verify all positions
+ if ok := f.SetLines(test.lines); !ok {
+ t.Errorf("%s: SetLines failed", f.Name())
+ }
+ if f.LineCount() != len(test.lines) {
+ t.Errorf("%s, SetLines: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
+ }
+ verifyPositions(t, fset, f, test.lines)
+
+ // add lines with SetLinesForContent and verify all positions
+ src := test.source
+ if src == nil {
+ // no test source available - create one from scratch
+ src = makeTestSource(test.size, test.lines)
+ }
+ f.SetLinesForContent(src)
+ if f.LineCount() != len(test.lines) {
+ t.Errorf("%s, SetLinesForContent: got line count %d; want %d", f.Name(), f.LineCount(), len(test.lines))
+ }
+ verifyPositions(t, fset, f, test.lines)
+ }
+}
+
+func TestLineInfo(t *testing.T) {
+ fset := NewFileSet()
+ f := fset.AddFile("foo", fset.Base(), 500)
+ lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
+ // add lines individually and provide alternative line information
+ for _, offs := range lines {
+ f.AddLine(offs)
+ f.AddLineInfo(offs, "bar", 42)
+ }
+ // verify positions for all offsets
+ for offs := 0; offs <= f.Size(); offs++ {
+ p := f.Pos(offs)
+ _, col := linecol(lines, offs)
+ msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
+ checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
+ checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
+ }
+}
+
+func TestFiles(t *testing.T) {
+ fset := NewFileSet()
+ for i, test := range tests {
+ base := fset.Base()
+ if i%2 == 1 {
+ // Setting a negative base is equivalent to
+ // fset.Base(), so test some of each.
+ base = -1
+ }
+ fset.AddFile(test.filename, base, test.size)
+ j := 0
+ fset.Iterate(func(f *File) bool {
+ if f.Name() != tests[j].filename {
+ t.Errorf("got filename = %s; want %s", f.Name(), tests[j].filename)
+ }
+ j++
+ return true
+ })
+ if j != i+1 {
+ t.Errorf("got %d files; want %d", j, i+1)
+ }
+ }
+}
+
+// FileSet.File should return nil if Pos is past the end of the FileSet.
+func TestFileSetPastEnd(t *testing.T) {
+ fset := NewFileSet()
+ for _, test := range tests {
+ fset.AddFile(test.filename, fset.Base(), test.size)
+ }
+ if f := fset.File(Pos(fset.Base())); f != nil {
+ t.Errorf("got %v, want nil", f)
+ }
+}
+
+func TestFileSetCacheUnlikely(t *testing.T) {
+ fset := NewFileSet()
+ offsets := make(map[string]int)
+ for _, test := range tests {
+ offsets[test.filename] = fset.Base()
+ fset.AddFile(test.filename, fset.Base(), test.size)
+ }
+ for file, pos := range offsets {
+ f := fset.File(Pos(pos))
+ if f.Name() != file {
+ t.Errorf("got %q at position %d, want %q", f.Name(), pos, file)
+ }
+ }
+}
+
+// issue 4345. Test concurrent use of FileSet.Pos does not trigger a
+// race in the FileSet position cache.
+func TestFileSetRace(t *testing.T) {
+ fset := NewFileSet()
+ for i := 0; i < 100; i++ {
+ fset.AddFile(fmt.Sprintf("file-%d", i), fset.Base(), 1031)
+ }
+ max := int32(fset.Base())
+ var stop sync.WaitGroup
+ r := rand.New(rand.NewSource(7))
+ for i := 0; i < 2; i++ {
+ r := rand.New(rand.NewSource(r.Int63()))
+ stop.Add(1)
+ go func() {
+ for i := 0; i < 1000; i++ {
+ fset.Position(Pos(r.Int31n(max)))
+ }
+ stop.Done()
+ }()
+ }
+ stop.Wait()
+}
+
+func TestPositionFor(t *testing.T) {
+ src := []byte(`
+foo
+b
+ar
+//line :100
+foobar
+//line bar:3
+done
+`)
+
+ const filename = "foo"
+ fset := NewFileSet()
+ f := fset.AddFile(filename, fset.Base(), len(src))
+ f.SetLinesForContent(src)
+
+ // verify position info
+ for i, offs := range f.lines {
+ got1 := f.PositionFor(f.Pos(offs), false)
+ got2 := f.PositionFor(f.Pos(offs), true)
+ got3 := f.Position(f.Pos(offs))
+ want := Position{filename, offs, i + 1, 1}
+ checkPos(t, "1. PositionFor unadjusted", got1, want)
+ checkPos(t, "1. PositionFor adjusted", got2, want)
+ checkPos(t, "1. Position", got3, want)
+ }
+
+ // manually add //line info on lines l1, l2
+ const l1, l2 = 5, 7
+ f.AddLineInfo(f.lines[l1-1], "", 100)
+ f.AddLineInfo(f.lines[l2-1], "bar", 3)
+
+ // unadjusted position info must remain unchanged
+ for i, offs := range f.lines {
+ got1 := f.PositionFor(f.Pos(offs), false)
+ want := Position{filename, offs, i + 1, 1}
+ checkPos(t, "2. PositionFor unadjusted", got1, want)
+ }
+
+ // adjusted position info should have changed
+ for i, offs := range f.lines {
+ got2 := f.PositionFor(f.Pos(offs), true)
+ got3 := f.Position(f.Pos(offs))
+ want := Position{filename, offs, i + 1, 1}
+ // manually compute wanted filename and line
+ line := want.Line
+ if i+1 >= l1 {
+ want.Filename = ""
+ want.Line = line - l1 + 100
+ }
+ if i+1 >= l2 {
+ want.Filename = "bar"
+ want.Line = line - l2 + 3
+ }
+ checkPos(t, "3. PositionFor adjusted", got2, want)
+ checkPos(t, "3. Position", got3, want)
+ }
+}
diff --git a/src/go/token/serialize.go b/src/go/token/serialize.go
new file mode 100644
index 000000000..4adc8f9e3
--- /dev/null
+++ b/src/go/token/serialize.go
@@ -0,0 +1,56 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+type serializedFile struct {
+ // fields correspond 1:1 to fields with same (lower-case) name in File
+ Name string
+ Base int
+ Size int
+ Lines []int
+ Infos []lineInfo
+}
+
+type serializedFileSet struct {
+ Base int
+ Files []serializedFile
+}
+
+// Read calls decode to deserialize a file set into s; s must not be nil.
+func (s *FileSet) Read(decode func(interface{}) error) error {
+ var ss serializedFileSet
+ if err := decode(&ss); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.base = ss.Base
+ files := make([]*File, len(ss.Files))
+ for i := 0; i < len(ss.Files); i++ {
+ f := &ss.Files[i]
+ files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
+ }
+ s.files = files
+ s.last = nil
+ s.mutex.Unlock()
+
+ return nil
+}
+
+// Write calls encode to serialize the file set s.
+func (s *FileSet) Write(encode func(interface{}) error) error {
+ var ss serializedFileSet
+
+ s.mutex.Lock()
+ ss.Base = s.base
+ files := make([]serializedFile, len(s.files))
+ for i, f := range s.files {
+ files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
+ }
+ ss.Files = files
+ s.mutex.Unlock()
+
+ return encode(ss)
+}
diff --git a/src/go/token/serialize_test.go b/src/go/token/serialize_test.go
new file mode 100644
index 000000000..4e925adb6
--- /dev/null
+++ b/src/go/token/serialize_test.go
@@ -0,0 +1,111 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package token
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "testing"
+)
+
+// equal returns nil if p and q describe the same file set;
+// otherwise it returns an error describing the discrepancy.
+func equal(p, q *FileSet) error {
+ if p == q {
+ // avoid deadlock if p == q
+ return nil
+ }
+
+ // not strictly needed for the test
+ p.mutex.Lock()
+ q.mutex.Lock()
+ defer q.mutex.Unlock()
+ defer p.mutex.Unlock()
+
+ if p.base != q.base {
+ return fmt.Errorf("different bases: %d != %d", p.base, q.base)
+ }
+
+ if len(p.files) != len(q.files) {
+ return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
+ }
+
+ for i, f := range p.files {
+ g := q.files[i]
+ if f.set != p {
+ return fmt.Errorf("wrong fileset for %q", f.name)
+ }
+ if g.set != q {
+ return fmt.Errorf("wrong fileset for %q", g.name)
+ }
+ if f.name != g.name {
+ return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
+ }
+ if f.base != g.base {
+ return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
+ }
+ if f.size != g.size {
+ return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
+ }
+ for j, l := range f.lines {
+ m := g.lines[j]
+ if l != m {
+ return fmt.Errorf("different offsets for %q", f.name)
+ }
+ }
+ for j, l := range f.infos {
+ m := g.infos[j]
+ if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
+ return fmt.Errorf("different infos for %q", f.name)
+ }
+ }
+ }
+
+ // we don't care about .last - it's just a cache
+ return nil
+}
+
+func checkSerialize(t *testing.T, p *FileSet) {
+ var buf bytes.Buffer
+ encode := func(x interface{}) error {
+ return gob.NewEncoder(&buf).Encode(x)
+ }
+ if err := p.Write(encode); err != nil {
+ t.Errorf("writing fileset failed: %s", err)
+ return
+ }
+ q := NewFileSet()
+ decode := func(x interface{}) error {
+ return gob.NewDecoder(&buf).Decode(x)
+ }
+ if err := q.Read(decode); err != nil {
+ t.Errorf("reading fileset failed: %s", err)
+ return
+ }
+ if err := equal(p, q); err != nil {
+ t.Errorf("filesets not identical: %s", err)
+ }
+}
+
+func TestSerialization(t *testing.T) {
+ p := NewFileSet()
+ checkSerialize(t, p)
+ // add some files
+ for i := 0; i < 10; i++ {
+ f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
+ checkSerialize(t, p)
+ // add some lines and alternative file infos
+ line := 1000
+ for offs := 0; offs < f.Size(); offs += 40 + i {
+ f.AddLine(offs)
+ if offs%7 == 0 {
+ f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
+ line += 33
+ }
+ }
+ checkSerialize(t, p)
+ }
+}
diff --git a/src/go/token/token.go b/src/go/token/token.go
new file mode 100644
index 000000000..865f63f4a
--- /dev/null
+++ b/src/go/token/token.go
@@ -0,0 +1,308 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package token defines constants representing the lexical tokens of the Go
+// programming language and basic operations on tokens (printing, predicates).
+//
+package token
+
+import "strconv"
+
+// Token is the set of lexical tokens of the Go programming language.
+type Token int
+
+// The list of tokens.
+const (
+ // Special tokens
+ ILLEGAL Token = iota
+ EOF
+ COMMENT
+
+ literal_beg
+ // Identifiers and basic type literals
+ // (these tokens stand for classes of literals)
+ IDENT // main
+ INT // 12345
+ FLOAT // 123.45
+ IMAG // 123.45i
+ CHAR // 'a'
+ STRING // "abc"
+ literal_end
+
+ operator_beg
+ // Operators and delimiters
+ ADD // +
+ SUB // -
+ MUL // *
+ QUO // /
+ REM // %
+
+ AND // &
+ OR // |
+ XOR // ^
+ SHL // <<
+ SHR // >>
+ AND_NOT // &^
+
+ ADD_ASSIGN // +=
+ SUB_ASSIGN // -=
+ MUL_ASSIGN // *=
+ QUO_ASSIGN // /=
+ REM_ASSIGN // %=
+
+ AND_ASSIGN // &=
+ OR_ASSIGN // |=
+ XOR_ASSIGN // ^=
+ SHL_ASSIGN // <<=
+ SHR_ASSIGN // >>=
+ AND_NOT_ASSIGN // &^=
+
+ LAND // &&
+ LOR // ||
+ ARROW // <-
+ INC // ++
+ DEC // --
+
+ EQL // ==
+ LSS // <
+ GTR // >
+ ASSIGN // =
+ NOT // !
+
+ NEQ // !=
+ LEQ // <=
+ GEQ // >=
+ DEFINE // :=
+ ELLIPSIS // ...
+
+ LPAREN // (
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RPAREN // )
+ RBRACK // ]
+ RBRACE // }
+ SEMICOLON // ;
+ COLON // :
+ operator_end
+
+ keyword_beg
+ // Keywords
+ BREAK
+ CASE
+ CHAN
+ CONST
+ CONTINUE
+
+ DEFAULT
+ DEFER
+ ELSE
+ FALLTHROUGH
+ FOR
+
+ FUNC
+ GO
+ GOTO
+ IF
+ IMPORT
+
+ INTERFACE
+ MAP
+ PACKAGE
+ RANGE
+ RETURN
+
+ SELECT
+ STRUCT
+ SWITCH
+ TYPE
+ VAR
+ keyword_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ INT: "INT",
+ FLOAT: "FLOAT",
+ IMAG: "IMAG",
+ CHAR: "CHAR",
+ STRING: "STRING",
+
+ ADD: "+",
+ SUB: "-",
+ MUL: "*",
+ QUO: "/",
+ REM: "%",
+
+ AND: "&",
+ OR: "|",
+ XOR: "^",
+ SHL: "<<",
+ SHR: ">>",
+ AND_NOT: "&^",
+
+ ADD_ASSIGN: "+=",
+ SUB_ASSIGN: "-=",
+ MUL_ASSIGN: "*=",
+ QUO_ASSIGN: "/=",
+ REM_ASSIGN: "%=",
+
+ AND_ASSIGN: "&=",
+ OR_ASSIGN: "|=",
+ XOR_ASSIGN: "^=",
+ SHL_ASSIGN: "<<=",
+ SHR_ASSIGN: ">>=",
+ AND_NOT_ASSIGN: "&^=",
+
+ LAND: "&&",
+ LOR: "||",
+ ARROW: "<-",
+ INC: "++",
+ DEC: "--",
+
+ EQL: "==",
+ LSS: "<",
+ GTR: ">",
+ ASSIGN: "=",
+ NOT: "!",
+
+ NEQ: "!=",
+ LEQ: "<=",
+ GEQ: ">=",
+ DEFINE: ":=",
+ ELLIPSIS: "...",
+
+ LPAREN: "(",
+ LBRACK: "[",
+ LBRACE: "{",
+ COMMA: ",",
+ PERIOD: ".",
+
+ RPAREN: ")",
+ RBRACK: "]",
+ RBRACE: "}",
+ SEMICOLON: ";",
+ COLON: ":",
+
+ BREAK: "break",
+ CASE: "case",
+ CHAN: "chan",
+ CONST: "const",
+ CONTINUE: "continue",
+
+ DEFAULT: "default",
+ DEFER: "defer",
+ ELSE: "else",
+ FALLTHROUGH: "fallthrough",
+ FOR: "for",
+
+ FUNC: "func",
+ GO: "go",
+ GOTO: "goto",
+ IF: "if",
+ IMPORT: "import",
+
+ INTERFACE: "interface",
+ MAP: "map",
+ PACKAGE: "package",
+ RANGE: "range",
+ RETURN: "return",
+
+ SELECT: "select",
+ STRUCT: "struct",
+ SWITCH: "switch",
+ TYPE: "type",
+ VAR: "var",
+}
+
+// String returns the string corresponding to the token tok.
+// For operators, delimiters, and keywords the string is the actual
+// token character sequence (e.g., for the token ADD, the string is
+// "+"). For all other tokens the string corresponds to the token
+// constant name (e.g. for the token IDENT, the string is "IDENT").
+//
+func (tok Token) String() string {
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
+}
+
+// A set of constants for precedence-based expression parsing.
+// Non-operators have lowest precedence, followed by operators
+// starting with precedence 1 up to unary operators. The highest
+// precedence serves as "catch-all" precedence for selector,
+// indexing, and other operator and delimiter tokens.
+//
+const (
+ LowestPrec = 0 // non-operators
+ UnaryPrec = 6
+ HighestPrec = 7
+)
+
+// Precedence returns the operator precedence of the binary
+// operator op. If op is not a binary operator, the result
+// is LowestPrecedence.
+//
+func (op Token) Precedence() int {
+ switch op {
+ case LOR:
+ return 1
+ case LAND:
+ return 2
+ case EQL, NEQ, LSS, LEQ, GTR, GEQ:
+ return 3
+ case ADD, SUB, OR, XOR:
+ return 4
+ case MUL, QUO, REM, SHL, SHR, AND, AND_NOT:
+ return 5
+ }
+ return LowestPrec
+}
+
+var keywords map[string]Token
+
+func init() {
+ keywords = make(map[string]Token)
+ for i := keyword_beg + 1; i < keyword_end; i++ {
+ keywords[tokens[i]] = i
+ }
+}
+
+// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
+//
+func Lookup(ident string) Token {
+ if tok, is_keyword := keywords[ident]; is_keyword {
+ return tok
+ }
+ return IDENT
+}
+
+// Predicates
+
+// IsLiteral returns true for tokens corresponding to identifiers
+// and basic type literals; it returns false otherwise.
+//
+func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+//
+func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
+
+// IsKeyword returns true for tokens corresponding to keywords;
+// it returns false otherwise.
+//
+func (tok Token) IsKeyword() bool { return keyword_beg < tok && tok < keyword_end }