summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cmd/6g/ggen.c40
-rw-r--r--src/cmd/8g/ggen.c28
-rw-r--r--src/cmd/gc/builtin.c1
-rw-r--r--src/cmd/gc/gen.c12
-rw-r--r--src/cmd/gc/go.h2
-rw-r--r--src/cmd/gc/mparith2.c4
-rw-r--r--src/cmd/gc/runtime.go1
-rw-r--r--src/cmd/gc/sinit.c12
-rw-r--r--src/cmd/gc/typecheck.c57
-rw-r--r--src/cmd/gc/walk.c278
-rw-r--r--src/cmd/go/build.go1
-rw-r--r--src/cmd/go/generate.go4
-rw-r--r--src/cmd/go/get.go25
-rwxr-xr-xsrc/cmd/go/test.bash10
-rw-r--r--src/cmd/go/vcs.go50
-rw-r--r--src/cmd/ld/data.c4
-rw-r--r--src/cmd/ld/ldelf.c2
-rw-r--r--src/cmd/objdump/main.go2
-rw-r--r--src/cmd/objdump/objdump_test.go19
-rw-r--r--src/compress/flate/inflate.go27
-rw-r--r--src/compress/flate/inflate_test.go39
-rw-r--r--src/compress/gzip/gunzip.go33
-rw-r--r--src/compress/gzip/gunzip_test.go41
-rw-r--r--src/compress/zlib/reader.go80
-rw-r--r--src/crypto/tls/alert.go2
-rw-r--r--src/crypto/tls/cipher_suites.go5
-rw-r--r--src/crypto/tls/handshake_server.go12
-rw-r--r--src/crypto/tls/handshake_server_test.go27
-rw-r--r--src/crypto/tls/testdata/Server-TLSv11-FallbackSCSV17
-rw-r--r--src/debug/pe/file.go12
-rw-r--r--src/encoding/csv/writer.go16
-rw-r--r--src/encoding/csv/writer_test.go11
-rw-r--r--src/encoding/gob/codec_test.go22
-rw-r--r--src/encoding/gob/dec_helpers.go468
-rw-r--r--src/encoding/gob/decgen.go240
-rw-r--r--src/encoding/gob/decode.go96
-rw-r--r--src/encoding/gob/decoder.go42
-rw-r--r--src/encoding/gob/enc_helpers.go414
-rw-r--r--src/encoding/gob/encgen.go218
-rw-r--r--src/encoding/gob/encode.go86
-rw-r--r--src/encoding/gob/encoder.go7
-rw-r--r--src/encoding/gob/timing_test.go192
-rw-r--r--src/encoding/json/encode.go8
-rw-r--r--src/encoding/json/encode_test.go52
-rw-r--r--src/flag/flag.go1
-rw-r--r--src/fmt/doc.go4
-rw-r--r--src/html/template/js_test.go2
-rw-r--r--src/net/http/pprof/pprof.go4
-rw-r--r--src/net/lookup.go51
-rw-r--r--src/net/singleflight.go66
-rw-r--r--src/net/z_last_test.go62
-rw-r--r--src/os/dir_unix.go2
-rw-r--r--src/os/exec/exec_test.go10
-rw-r--r--src/os/file.go9
-rw-r--r--src/os/file_plan9.go8
-rw-r--r--src/os/file_posix.go2
-rw-r--r--src/os/file_unix.go8
-rw-r--r--src/os/file_windows.go4
-rw-r--r--src/reflect/all_test.go37
-rw-r--r--src/reflect/makefunc.go6
-rw-r--r--src/reflect/type.go3
-rw-r--r--src/reflect/value.go225
-rw-r--r--src/regexp/all_test.go16
-rw-r--r--src/regexp/regexp.go2
-rw-r--r--src/regexp/syntax/parse.go7
-rw-r--r--src/runtime/asm_386.s20
-rw-r--r--src/runtime/asm_amd64.s21
-rw-r--r--src/runtime/asm_amd64p32.s7
-rw-r--r--src/runtime/asm_arm.s18
-rw-r--r--src/runtime/cgo/gcc_arm.S15
-rw-r--r--src/runtime/chan_test.go29
-rw-r--r--src/runtime/crash_cgo_test.go52
-rw-r--r--src/runtime/crash_test.go17
-rw-r--r--src/runtime/debug/garbage.go30
-rw-r--r--src/runtime/debug/garbage_test.go13
-rw-r--r--src/runtime/env_plan9.go6
-rw-r--r--src/runtime/extern.go6
-rw-r--r--src/runtime/funcdata.h3
-rw-r--r--src/runtime/malloc.h3
-rw-r--r--src/runtime/mem.go3
-rw-r--r--src/runtime/mgc0.c35
-rw-r--r--src/runtime/mprof.go6
-rw-r--r--src/runtime/pprof/mprof_test.go99
-rw-r--r--src/runtime/pprof/pprof_test.go2
-rw-r--r--src/runtime/print1.go27
-rw-r--r--src/runtime/proc.c20
-rw-r--r--src/runtime/rt0_nacl_amd64p32.s2
-rw-r--r--src/runtime/runtime.c4
-rw-r--r--src/runtime/runtime.h2
-rw-r--r--src/runtime/select.go8
-rw-r--r--src/runtime/stack.c4
-rw-r--r--src/runtime/stubs.go4
-rw-r--r--src/runtime/symtab.go11
-rw-r--r--src/runtime/sys_nacl_amd64p32.s6
-rw-r--r--src/runtime/time.go29
-rw-r--r--src/sync/pool.go2
-rw-r--r--src/sync/pool_test.go54
-rw-r--r--src/syscall/env_plan9.go97
-rw-r--r--src/syscall/fs_nacl.go6
-rw-r--r--src/syscall/route_bsd.go2
-rw-r--r--src/testing/testing.go1
-rw-r--r--src/text/template/exec.go2
-rw-r--r--src/text/template/exec_test.go12
-rw-r--r--src/time/sleep.go6
-rw-r--r--src/time/sleep_test.go21
-rw-r--r--src/unicode/utf8/utf8.go26
-rw-r--r--src/unsafe/unsafe.go3
107 files changed, 3282 insertions, 700 deletions
diff --git a/src/cmd/6g/ggen.c b/src/cmd/6g/ggen.c
index 987473cca..363620769 100644
--- a/src/cmd/6g/ggen.c
+++ b/src/cmd/6g/ggen.c
@@ -1102,26 +1102,54 @@ clearfat(Node *nl)
c = w % 8; // bytes
q = w / 8; // quads
+ if(q < 4) {
+ // Write sequence of MOV 0, off(base) instead of using STOSQ.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSQ loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ agenr(nl, &n1, N);
+ n1.op = OINDREG;
+ nodconst(&z, types[TUINT64], 0);
+ while(q-- > 0) {
+ n1.type = z.type;
+ gins(AMOVQ, &z, &n1);
+ n1.xoffset += 8;
+ }
+ if(c >= 4) {
+ nodconst(&z, types[TUINT32], 0);
+ n1.type = z.type;
+ gins(AMOVL, &z, &n1);
+ n1.xoffset += 4;
+ c -= 4;
+ }
+ nodconst(&z, types[TUINT8], 0);
+ while(c-- > 0) {
+ n1.type = z.type;
+ gins(AMOVB, &z, &n1);
+ n1.xoffset++;
+ }
+ regfree(&n1);
+ return;
+ }
+
savex(D_DI, &n1, &oldn1, N, types[tptr]);
agen(nl, &n1);
savex(D_AX, &ax, &oldax, N, types[tptr]);
gconreg(AMOVL, 0, D_AX);
- if(q > 128 || (q >= 4 && nacl)) {
+ if(q > 128 || nacl) {
gconreg(movptr, q, D_CX);
gins(AREP, N, N); // repeat
gins(ASTOSQ, N, N); // STOQ AL,*(DI)+
- } else if(q >= 4) {
+ } else {
p = gins(ADUFFZERO, N, N);
p->to.type = D_ADDR;
p->to.sym = linksym(pkglookup("duffzero", runtimepkg));
// 2 and 128 = magic constants: see ../../runtime/asm_amd64.s
p->to.offset = 2*(128-q);
- } else
- while(q > 0) {
- gins(ASTOSQ, N, N); // STOQ AL,*(DI)+
- q--;
}
z = ax;
diff --git a/src/cmd/8g/ggen.c b/src/cmd/8g/ggen.c
index 7c986cc64..6333a60bb 100644
--- a/src/cmd/8g/ggen.c
+++ b/src/cmd/8g/ggen.c
@@ -157,7 +157,7 @@ void
clearfat(Node *nl)
{
uint32 w, c, q;
- Node n1;
+ Node n1, z;
Prog *p;
/* clear a fat object */
@@ -172,6 +172,32 @@ clearfat(Node *nl)
c = w % 4; // bytes
q = w / 4; // quads
+ if(q < 4) {
+ // Write sequence of MOV 0, off(base) instead of using STOSL.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSL loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ regalloc(&n1, types[tptr], N);
+ agen(nl, &n1);
+ n1.op = OINDREG;
+ nodconst(&z, types[TUINT64], 0);
+ while(q-- > 0) {
+ n1.type = z.type;
+ gins(AMOVL, &z, &n1);
+ n1.xoffset += 4;
+ }
+ nodconst(&z, types[TUINT8], 0);
+ while(c-- > 0) {
+ n1.type = z.type;
+ gins(AMOVB, &z, &n1);
+ n1.xoffset++;
+ }
+ regfree(&n1);
+ return;
+ }
+
nodreg(&n1, types[tptr], D_DI);
agen(nl, &n1);
gconreg(AMOVL, 0, D_AX);
diff --git a/src/cmd/gc/builtin.c b/src/cmd/gc/builtin.c
index 5fbb4f0cf..fbca4ee5f 100644
--- a/src/cmd/gc/builtin.c
+++ b/src/cmd/gc/builtin.c
@@ -24,7 +24,6 @@ char *runtimeimport =
"func @\"\".printslice (? any)\n"
"func @\"\".printnl ()\n"
"func @\"\".printsp ()\n"
- "func @\"\".goprintf ()\n"
"func @\"\".concatstring2 (? string, ? string) (? string)\n"
"func @\"\".concatstring3 (? string, ? string, ? string) (? string)\n"
"func @\"\".concatstring4 (? string, ? string, ? string, ? string) (? string)\n"
diff --git a/src/cmd/gc/gen.c b/src/cmd/gc/gen.c
index eb9eacca8..c7c9fcdaf 100644
--- a/src/cmd/gc/gen.c
+++ b/src/cmd/gc/gen.c
@@ -731,14 +731,10 @@ cgen_as(Node *nl, Node *nr)
return;
}
- if(nr == N || isnil(nr)) {
- // externals and heaps should already be clear
- if(nr == N) {
- if(nl->class == PEXTERN)
- return;
- if(nl->class & PHEAP)
- return;
- }
+ if(nr == N || iszero(nr)) {
+ // heaps should already be clear
+ if(nr == N && (nl->class & PHEAP))
+ return;
tl = nl->type;
if(tl == T)
diff --git a/src/cmd/gc/go.h b/src/cmd/gc/go.h
index 8178f7272..965a0550d 100644
--- a/src/cmd/gc/go.h
+++ b/src/cmd/gc/go.h
@@ -283,6 +283,7 @@ struct Node
uchar addrtaken; // address taken, even if not moved to heap
uchar dupok; // duplicate definitions ok (for func)
uchar wrapper; // is method wrapper (for func)
+ uchar reslice; // this is a reslice x = x[0:y] or x = append(x, ...)
schar likely; // likeliness of if statement
uchar hasbreak; // has break statement
uchar needzero; // if it contains pointers, needs to be zeroed on function entry
@@ -1374,6 +1375,7 @@ int isnilinter(Type *t);
int isptrto(Type *t, int et);
int isslice(Type *t);
int istype(Type *t, int et);
+int iszero(Node *n);
void linehist(char *file, int32 off, int relative);
NodeList* list(NodeList *l, Node *n);
NodeList* list1(Node *n);
diff --git a/src/cmd/gc/mparith2.c b/src/cmd/gc/mparith2.c
index 5cf98c62c..fd9f591ce 100644
--- a/src/cmd/gc/mparith2.c
+++ b/src/cmd/gc/mparith2.c
@@ -656,7 +656,7 @@ mpdivmodfixfix(Mpint *q, Mpint *r, Mpint *n, Mpint *d)
}
static int
-iszero(Mpint *a)
+mpiszero(Mpint *a)
{
long *a1;
int i;
@@ -687,7 +687,7 @@ mpdivfract(Mpint *a, Mpint *b)
for(j=0; j<Mpscale; j++) {
x <<= 1;
if(mpcmp(&d, &n) <= 0) {
- if(!iszero(&d))
+ if(!mpiszero(&d))
x |= 1;
mpsubfixfix(&n, &d);
}
diff --git a/src/cmd/gc/runtime.go b/src/cmd/gc/runtime.go
index 86afe67f1..0fb15c265 100644
--- a/src/cmd/gc/runtime.go
+++ b/src/cmd/gc/runtime.go
@@ -36,7 +36,6 @@ func printeface(any)
func printslice(any)
func printnl()
func printsp()
-func goprintf()
func concatstring2(string, string) string
func concatstring3(string, string, string) string
diff --git a/src/cmd/gc/sinit.c b/src/cmd/gc/sinit.c
index f050026d9..8ad7ae7ab 100644
--- a/src/cmd/gc/sinit.c
+++ b/src/cmd/gc/sinit.c
@@ -17,7 +17,6 @@ enum
InitPending = 2,
};
-static int iszero(Node*);
static void initplan(Node*);
static NodeList *initlist;
static void init2(Node*, NodeList**);
@@ -1068,7 +1067,7 @@ anylit(int ctxt, Node *n, Node *var, NodeList **init)
if(t->etype != TSTRUCT)
fatal("anylit: not struct");
- if(simplename(var)) {
+ if(simplename(var) && count(n->list) > 4) {
if(ctxt == 0) {
// lay out static data
@@ -1091,7 +1090,7 @@ anylit(int ctxt, Node *n, Node *var, NodeList **init)
}
// initialize of not completely specified
- if(count(n->list) < structcount(t)) {
+ if(simplename(var) || count(n->list) < structcount(t)) {
a = nod(OAS, var, N);
typecheck(&a, Etop);
walkexpr(&a, init);
@@ -1108,7 +1107,7 @@ anylit(int ctxt, Node *n, Node *var, NodeList **init)
break;
}
- if(simplename(var)) {
+ if(simplename(var) && count(n->list) > 4) {
if(ctxt == 0) {
// lay out static data
@@ -1131,7 +1130,7 @@ anylit(int ctxt, Node *n, Node *var, NodeList **init)
}
// initialize of not completely specified
- if(count(n->list) < t->bound) {
+ if(simplename(var) || count(n->list) < t->bound) {
a = nod(OAS, var, N);
typecheck(&a, Etop);
walkexpr(&a, init);
@@ -1356,7 +1355,6 @@ no:
return 0;
}
-static int iszero(Node*);
static int isvaluelit(Node*);
static InitEntry* entry(InitPlan*);
static void addvalue(InitPlan*, vlong, Node*, Node*);
@@ -1440,7 +1438,7 @@ addvalue(InitPlan *p, vlong xoffset, Node *key, Node *n)
e->expr = n;
}
-static int
+int
iszero(Node *n)
{
NodeList *l;
diff --git a/src/cmd/gc/typecheck.c b/src/cmd/gc/typecheck.c
index ff49fe6f9..714c66268 100644
--- a/src/cmd/gc/typecheck.c
+++ b/src/cmd/gc/typecheck.c
@@ -2127,13 +2127,16 @@ lookdot(Node *n, Type *t, int dostrcmp)
n->left = nod(OADDR, n->left, N);
n->left->implicit = 1;
typecheck(&n->left, Etype|Erv);
- } else if(tt->etype == tptr && eqtype(tt->type, rcvr)) {
+ } else if(tt->etype == tptr && rcvr->etype != tptr && eqtype(tt->type, rcvr)) {
n->left = nod(OIND, n->left, N);
n->left->implicit = 1;
typecheck(&n->left, Etype|Erv);
- } else if(tt->etype == tptr && tt->type->etype == tptr && eqtype(derefall(tt), rcvr)) {
+ } else if(tt->etype == tptr && tt->type->etype == tptr && eqtype(derefall(tt), derefall(rcvr))) {
yyerror("calling method %N with receiver %lN requires explicit dereference", n->right, n->left);
while(tt->etype == tptr) {
+ // Stop one level early for method with pointer receiver.
+ if(rcvr->etype == tptr && tt->type->etype != tptr)
+ break;
n->left = nod(OIND, n->left, N);
n->left->implicit = 1;
typecheck(&n->left, Etype|Erv);
@@ -2814,6 +2817,33 @@ checkassignlist(NodeList *l)
checkassign(l->n);
}
+// Check whether l and r are the same side effect-free expression,
+// so that it is safe to reuse one instead of computing both.
+static int
+samesafeexpr(Node *l, Node *r)
+{
+ if(l->op != r->op || !eqtype(l->type, r->type))
+ return 0;
+
+ switch(l->op) {
+ case ONAME:
+ case OCLOSUREVAR:
+ return l == r;
+
+ case ODOT:
+ case ODOTPTR:
+ return l->right != nil && r->right != nil && l->right->sym == r->right->sym && samesafeexpr(l->left, r->left);
+
+ case OIND:
+ return samesafeexpr(l->left, r->left);
+
+ case OINDEX:
+ return samesafeexpr(l->left, r->left) && samesafeexpr(l->right, r->right);
+ }
+
+ return 0;
+}
+
/*
* type check assignment.
* if this assignment is the definition of a var on the left side,
@@ -2851,6 +2881,29 @@ typecheckas(Node *n)
n->typecheck = 1;
if(n->left->typecheck == 0)
typecheck(&n->left, Erv | Easgn);
+
+ // Recognize slices being updated in place, for better code generation later.
+ // Don't rewrite if using race detector, to avoid needing to teach race detector
+ // about this optimization.
+ if(n->left && n->left->op != OINDEXMAP && n->right && !flag_race) {
+ switch(n->right->op) {
+ case OSLICE:
+ case OSLICE3:
+ case OSLICESTR:
+ // For x = x[0:y], x can be updated in place, without touching pointer.
+ if(samesafeexpr(n->left, n->right->left) && (n->right->right->left == N || iszero(n->right->right->left)))
+ n->right->reslice = 1;
+ break;
+
+ case OAPPEND:
+ // For x = append(x, ...), x can be updated in place when there is capacity,
+ // without touching the pointer; otherwise the emitted code to growslice
+ // can take care of updating the pointer, and only in that case.
+ if(n->right->list != nil && samesafeexpr(n->left, n->right->list->n))
+ n->right->reslice = 1;
+ break;
+ }
+ }
}
static void
diff --git a/src/cmd/gc/walk.c b/src/cmd/gc/walk.c
index 241d7d74a..ff9b36208 100644
--- a/src/cmd/gc/walk.c
+++ b/src/cmd/gc/walk.c
@@ -7,7 +7,7 @@
#include "go.h"
#include "../ld/textflag.h"
-static Node* walkprint(Node*, NodeList**, int);
+static Node* walkprint(Node*, NodeList**);
static Node* writebarrierfn(char*, Type*, Type*);
static Node* applywritebarrier(Node*, NodeList**);
static Node* mapfn(char*, Type*);
@@ -32,6 +32,7 @@ static void walkmul(Node**, NodeList**);
static void walkdiv(Node**, NodeList**);
static int bounded(Node*, int64);
static Mpint mpzero;
+static void walkprintfunc(Node**, NodeList**);
void
walk(Node *fn)
@@ -226,8 +227,7 @@ walkstmt(Node **np)
switch(n->left->op) {
case OPRINT:
case OPRINTN:
- walkexprlist(n->left->list, &n->ninit);
- n->left = walkprint(n->left, &n->ninit, 1);
+ walkprintfunc(&n->left, &n->ninit);
break;
case OCOPY:
n->left = copyany(n->left, &n->ninit, 1);
@@ -260,8 +260,7 @@ walkstmt(Node **np)
switch(n->left->op) {
case OPRINT:
case OPRINTN:
- walkexprlist(n->left->list, &n->ninit);
- n->left = walkprint(n->left, &n->ninit, 1);
+ walkprintfunc(&n->left, &n->ninit);
break;
case OCOPY:
n->left = copyany(n->left, &n->ninit, 1);
@@ -543,7 +542,7 @@ walkexpr(Node **np, NodeList **init)
case OPRINT:
case OPRINTN:
walkexprlist(n->list, init);
- n = walkprint(n, init, 0);
+ n = walkprint(n, init);
goto ret;
case OPANIC:
@@ -614,7 +613,7 @@ walkexpr(Node **np, NodeList **init)
if(oaslit(n, init))
goto ret;
- if(n->right == N)
+ if(n->right == N || iszero(n->right) && !flag_race)
goto ret;
switch(n->right->op) {
@@ -1390,7 +1389,6 @@ walkexpr(Node **np, NodeList **init)
case OMAPLIT:
case OSTRUCTLIT:
case OPTRLIT:
- // XXX TODO do we need to clear var?
var = temp(n->type);
anylit(0, n, var, init);
n = var;
@@ -1494,7 +1492,7 @@ fncall(Node *l, Type *rt)
if(l->ullman >= UINF || l->op == OINDEXMAP)
return 1;
- r.op = 0;
+ memset(&r, 0, sizeof r);
if(needwritebarrier(l, &r))
return 1;
if(eqtype(l->type, rt))
@@ -1758,7 +1756,7 @@ ret:
// generate code for print
static Node*
-walkprint(Node *nn, NodeList **init, int defer)
+walkprint(Node *nn, NodeList **init)
{
Node *r;
Node *n;
@@ -1766,31 +1764,17 @@ walkprint(Node *nn, NodeList **init, int defer)
Node *on;
Type *t;
int notfirst, et, op;
- NodeList *calls, *intypes, *args;
- Fmt fmt;
+ NodeList *calls;
on = nil;
op = nn->op;
all = nn->list;
calls = nil;
notfirst = 0;
- intypes = nil;
- args = nil;
-
- memset(&fmt, 0, sizeof fmt);
- if(defer) {
- // defer print turns into defer printf with format string
- fmtstrinit(&fmt);
- intypes = list(intypes, nod(ODCLFIELD, N, typenod(types[TSTRING])));
- args = list1(nod(OXXX, N, N));
- }
for(l=all; l; l=l->next) {
if(notfirst) {
- if(defer)
- fmtprint(&fmt, " ");
- else
- calls = list(calls, mkcall("printsp", T, init));
+ calls = list(calls, mkcall("printsp", T, init));
}
notfirst = op == OPRINTN;
@@ -1818,122 +1802,63 @@ walkprint(Node *nn, NodeList **init, int defer)
t = n->type;
et = n->type->etype;
if(isinter(n->type)) {
- if(defer) {
- if(isnilinter(n->type))
- fmtprint(&fmt, "%%e");
- else
- fmtprint(&fmt, "%%i");
- } else {
- if(isnilinter(n->type))
- on = syslook("printeface", 1);
- else
- on = syslook("printiface", 1);
- argtype(on, n->type); // any-1
- }
+ if(isnilinter(n->type))
+ on = syslook("printeface", 1);
+ else
+ on = syslook("printiface", 1);
+ argtype(on, n->type); // any-1
} else if(isptr[et] || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR) {
- if(defer) {
- fmtprint(&fmt, "%%p");
- } else {
- on = syslook("printpointer", 1);
- argtype(on, n->type); // any-1
- }
+ on = syslook("printpointer", 1);
+ argtype(on, n->type); // any-1
} else if(isslice(n->type)) {
- if(defer) {
- fmtprint(&fmt, "%%a");
- } else {
- on = syslook("printslice", 1);
- argtype(on, n->type); // any-1
- }
+ on = syslook("printslice", 1);
+ argtype(on, n->type); // any-1
} else if(isint[et]) {
- if(defer) {
- if(et == TUINT64)
- fmtprint(&fmt, "%%U");
- else {
- fmtprint(&fmt, "%%D");
- t = types[TINT64];
- }
- } else {
- if(et == TUINT64) {
- if((t->sym->pkg == runtimepkg || compiling_runtime) && strcmp(t->sym->name, "hex") == 0)
- on = syslook("printhex", 0);
- else
- on = syslook("printuint", 0);
- } else
- on = syslook("printint", 0);
- }
- } else if(isfloat[et]) {
- if(defer) {
- fmtprint(&fmt, "%%f");
- t = types[TFLOAT64];
+ if(et == TUINT64) {
+ if((t->sym->pkg == runtimepkg || compiling_runtime) && strcmp(t->sym->name, "hex") == 0)
+ on = syslook("printhex", 0);
+ else
+ on = syslook("printuint", 0);
} else
- on = syslook("printfloat", 0);
+ on = syslook("printint", 0);
+ } else if(isfloat[et]) {
+ on = syslook("printfloat", 0);
} else if(iscomplex[et]) {
- if(defer) {
- fmtprint(&fmt, "%%C");
- t = types[TCOMPLEX128];
- } else
- on = syslook("printcomplex", 0);
+ on = syslook("printcomplex", 0);
} else if(et == TBOOL) {
- if(defer)
- fmtprint(&fmt, "%%t");
- else
- on = syslook("printbool", 0);
+ on = syslook("printbool", 0);
} else if(et == TSTRING) {
- if(defer)
- fmtprint(&fmt, "%%S");
- else
- on = syslook("printstring", 0);
+ on = syslook("printstring", 0);
} else {
badtype(OPRINT, n->type, T);
continue;
}
- if(!defer) {
- t = *getinarg(on->type);
- if(t != nil)
- t = t->type;
- if(t != nil)
- t = t->type;
- }
+ t = *getinarg(on->type);
+ if(t != nil)
+ t = t->type;
+ if(t != nil)
+ t = t->type;
if(!eqtype(t, n->type)) {
n = nod(OCONV, n, N);
n->type = t;
}
- if(defer) {
- intypes = list(intypes, nod(ODCLFIELD, N, typenod(t)));
- args = list(args, n);
- } else {
- r = nod(OCALL, on, N);
- r->list = list1(n);
- calls = list(calls, r);
- }
+ r = nod(OCALL, on, N);
+ r->list = list1(n);
+ calls = list(calls, r);
}
- if(defer) {
- if(op == OPRINTN)
- fmtprint(&fmt, "\n");
- on = syslook("goprintf", 1);
- on->type = functype(nil, intypes, nil);
- args->n = nod(OLITERAL, N, N);
- args->n->val.ctype = CTSTR;
- args->n->val.u.sval = strlit(fmtstrflush(&fmt));
- r = nod(OCALL, on, N);
- r->list = args;
- typecheck(&r, Etop);
- walkexpr(&r, init);
- } else {
- if(op == OPRINTN)
- calls = list(calls, mkcall("printnl", T, nil));
- typechecklist(calls, Etop);
- walkexprlist(calls, init);
+ if(op == OPRINTN)
+ calls = list(calls, mkcall("printnl", T, nil));
+ typechecklist(calls, Etop);
+ walkexprlist(calls, init);
- r = nod(OEMPTY, N, N);
- typecheck(&r, Etop);
- walkexpr(&r, init);
- r->ninit = calls;
- }
+ r = nod(OEMPTY, N, N);
+ typecheck(&r, Etop);
+ walkexpr(&r, init);
+ r->ninit = calls;
return r;
}
@@ -2009,8 +1934,8 @@ needwritebarrier(Node *l, Node *r)
if(isstack(l))
return 0;
- // No write barrier for zeroing.
- if(r == N)
+ // No write barrier for implicit or explicit zeroing.
+ if(r == N || iszero(r))
return 0;
// No write barrier for initialization to constant.
@@ -2031,6 +1956,28 @@ needwritebarrier(Node *l, Node *r)
if(r->op == OADDR && isglobal(r->left))
return 0;
+ // No write barrier for reslice: x = x[0:y] or x = append(x, ...).
+ // Both are compiled to modify x directly.
+ // In the case of append, a write barrier may still be needed
+ // if the underlying array grows, but the append code can
+ // generate the write barrier directly in that case.
+ // (It does not yet, but the cost of the write barrier will be
+ // small compared to the cost of the allocation.)
+ if(r->reslice) {
+ switch(r->op) {
+ case OSLICE:
+ case OSLICE3:
+ case OSLICESTR:
+ case OAPPEND:
+ break;
+ default:
+ dump("bad reslice-l", l);
+ dump("bad reslice-r", r);
+ break;
+ }
+ return 0;
+ }
+
// Otherwise, be conservative and use write barrier.
return 1;
}
@@ -3208,7 +3155,7 @@ countfield(Type *t)
static void
walkcompare(Node **np, NodeList **init)
{
- Node *n, *l, *r, *call, *a, *li, *ri, *expr;
+ Node *n, *l, *r, *call, *a, *li, *ri, *expr, *cmpl, *cmpr;
int andor, i;
Type *t, *t1;
@@ -3228,18 +3175,25 @@ walkcompare(Node **np, NodeList **init)
break;
}
- if(!islvalue(n->left) || !islvalue(n->right)) {
- fatal("arguments of comparison must be lvalues");
+ cmpl = n->left;
+ while(cmpl != N && cmpl->op == OCONVNOP)
+ cmpl = cmpl->left;
+ cmpr = n->right;
+ while(cmpr != N && cmpr->op == OCONVNOP)
+ cmpr = cmpr->left;
+
+ if(!islvalue(cmpl) || !islvalue(cmpr)) {
+ fatal("arguments of comparison must be lvalues - %N %N", cmpl, cmpr);
}
l = temp(ptrto(t));
- a = nod(OAS, l, nod(OADDR, n->left, N));
+ a = nod(OAS, l, nod(OADDR, cmpl, N));
a->right->etype = 1; // addr does not escape
typecheck(&a, Etop);
*init = list(*init, a);
r = temp(ptrto(t));
- a = nod(OAS, r, nod(OADDR, n->right, N));
+ a = nod(OAS, r, nod(OADDR, cmpr, N));
a->right->etype = 1; // addr does not escape
typecheck(&a, Etop);
*init = list(*init, a);
@@ -3913,3 +3867,71 @@ candiscard(Node *n)
return 1;
}
+
+// rewrite
+// print(x, y, z)
+// into
+// func(a1, a2, a3) {
+// print(a1, a2, a3)
+// }(x, y, z)
+// and same for println.
+static void
+walkprintfunc(Node **np, NodeList **init)
+{
+ Node *n;
+ Node *a, *fn, *t, *oldfn;
+ NodeList *l, *printargs;
+ int num;
+ char buf[100];
+ static int prgen;
+
+ n = *np;
+
+ if(n->ninit != nil) {
+ walkstmtlist(n->ninit);
+ *init = concat(*init, n->ninit);
+ n->ninit = nil;
+ }
+
+ t = nod(OTFUNC, N, N);
+ num = 0;
+ printargs = nil;
+ for(l=n->list; l != nil; l=l->next) {
+ snprint(buf, sizeof buf, "a%d", num++);
+ a = nod(ODCLFIELD, newname(lookup(buf)), typenod(l->n->type));
+ t->list = list(t->list, a);
+ printargs = list(printargs, a->left);
+ }
+
+ fn = nod(ODCLFUNC, N, N);
+ snprint(buf, sizeof buf, "print·%d", ++prgen);
+ fn->nname = newname(lookup(buf));
+ fn->nname->defn = fn;
+ fn->nname->ntype = t;
+ declare(fn->nname, PFUNC);
+
+ oldfn = curfn;
+ curfn = nil;
+ funchdr(fn);
+
+ a = nod(n->op, N, N);
+ a->list = printargs;
+ typecheck(&a, Etop);
+ walkstmt(&a);
+
+ fn->nbody = list1(a);
+
+ funcbody(fn);
+
+ typecheck(&fn, Etop);
+ typechecklist(fn->nbody, Etop);
+ xtop = list(xtop, fn);
+ curfn = oldfn;
+
+ a = nod(OCALL, N, N);
+ a->left = fn->nname;
+ a->list = n->list;
+ typecheck(&a, Etop);
+ walkexpr(&a, init);
+ *np = a;
+}
diff --git a/src/cmd/go/build.go b/src/cmd/go/build.go
index 49b84709e..79a27116a 100644
--- a/src/cmd/go/build.go
+++ b/src/cmd/go/build.go
@@ -1945,6 +1945,7 @@ func (tools gccgoToolchain) ld(b *builder, p *Package, out string, allactions []
}
ldflags = append(ldflags, afiles...)
ldflags = append(ldflags, cgoldflags...)
+ ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
ldflags = append(ldflags, p.CgoLDFLAGS...)
if usesCgo && goos == "linux" {
ldflags = append(ldflags, "-Wl,-E")
diff --git a/src/cmd/go/generate.go b/src/cmd/go/generate.go
index 167758207..4227abbe7 100644
--- a/src/cmd/go/generate.go
+++ b/src/cmd/go/generate.go
@@ -169,6 +169,7 @@ func (g *Generator) run() (ok bool) {
if e != stop {
panic(e)
}
+ setExitStatus(1)
}
}()
g.dir, g.file = filepath.Split(g.path)
@@ -267,7 +268,8 @@ Words:
var stop = fmt.Errorf("error in generation")
// errorf logs an error message prefixed with the file and line number.
-// It then exits the program because generation stops at the first error.
+// It then exits the program (with exit status 1) because generation stops
+// at the first error.
func (g *Generator) errorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, "%s:%d: %s\n", shortPath(g.path), g.lineNum,
fmt.Sprintf(format, args...))
diff --git a/src/cmd/go/get.go b/src/cmd/go/get.go
index 264033941..86e169761 100644
--- a/src/cmd/go/get.go
+++ b/src/cmd/go/get.go
@@ -16,7 +16,7 @@ import (
)
var cmdGet = &Command{
- UsageLine: "get [-d] [-fix] [-t] [-u] [build flags] [packages]",
+ UsageLine: "get [-d] [-f] [-fix] [-t] [-u] [build flags] [packages]",
Short: "download and install packages and dependencies",
Long: `
Get downloads and installs the packages named by the import paths,
@@ -25,6 +25,11 @@ along with their dependencies.
The -d flag instructs get to stop after downloading the packages; that is,
it instructs get not to install the packages.
+The -f flag, valid only when -u is set, forces get -u not to verify that
+each package has been checked out from the source control repository
+implied by its import path. This can be useful if the source is a local fork
+of the original.
+
The -fix flag instructs get to run the fix tool on the downloaded packages
before resolving dependencies or building the code.
@@ -53,6 +58,7 @@ See also: go build, go install, go clean.
}
var getD = cmdGet.Flag.Bool("d", false, "")
+var getF = cmdGet.Flag.Bool("f", false, "")
var getT = cmdGet.Flag.Bool("t", false, "")
var getU = cmdGet.Flag.Bool("u", false, "")
var getFix = cmdGet.Flag.Bool("fix", false, "")
@@ -63,6 +69,10 @@ func init() {
}
func runGet(cmd *Command, args []string) {
+ if *getF && !*getU {
+ fatalf("go get: cannot use -f flag without -u")
+ }
+
// Phase 1. Download/update.
var stk importStack
for _, arg := range downloadPaths(args) {
@@ -268,12 +278,19 @@ func downloadPackage(p *Package) error {
repo = "<local>" // should be unused; make distinctive
// Double-check where it came from.
- if *getU && vcs.remoteRepo != nil {
+ if *getU && vcs.remoteRepo != nil && !*getF {
dir := filepath.Join(p.build.SrcRoot, rootPath)
if remote, err := vcs.remoteRepo(vcs, dir); err == nil {
if rr, err := repoRootForImportPath(p.ImportPath); err == nil {
- if remote != rr.repo {
- return fmt.Errorf("%s is from %s, should be from %s", dir, remote, rr.repo)
+ repo := rr.repo
+ if rr.vcs.resolveRepo != nil {
+ resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo)
+ if err == nil {
+ repo = resolved
+ }
+ }
+ if remote != repo {
+ return fmt.Errorf("%s is from %s, should be from %s", dir, remote, repo)
}
}
}
diff --git a/src/cmd/go/test.bash b/src/cmd/go/test.bash
index 652ef3b5b..2b5230b1a 100755
--- a/src/cmd/go/test.bash
+++ b/src/cmd/go/test.bash
@@ -219,6 +219,16 @@ q' | ed $d/src/$config >/dev/null 2>&1
cat $d/err
ok=false
fi
+
+ if GOPATH=$d ./testgo get -d -f -u $url 2>$d/err; then
+ echo "go get -d -u $url succeeded with wrong remote repo"
+ cat $d/err
+ ok=false
+ elif ! egrep -i 'validating server certificate|not found' $d/err >/dev/null; then
+ echo "go get -d -f -u $url failed for wrong reason"
+ cat $d/err
+ ok=false
+ fi
fi
rm -rf $d
}
diff --git a/src/cmd/go/vcs.go b/src/cmd/go/vcs.go
index 0834a7d19..1cac61338 100644
--- a/src/cmd/go/vcs.go
+++ b/src/cmd/go/vcs.go
@@ -34,7 +34,8 @@ type vcsCmd struct {
scheme []string
pingCmd string
- remoteRepo func(v *vcsCmd, rootDir string) (remoteRepo string, err error)
+ remoteRepo func(v *vcsCmd, rootDir string) (remoteRepo string, err error)
+ resolveRepo func(v *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error)
}
// A tagCmd describes a command to list available tags
@@ -164,8 +165,51 @@ var vcsBzr = &vcsCmd{
tagSyncCmd: "update -r {tag}",
tagSyncDefault: "update -r revno:-1",
- scheme: []string{"https", "http", "bzr", "bzr+ssh"},
- pingCmd: "info {scheme}://{repo}",
+ scheme: []string{"https", "http", "bzr", "bzr+ssh"},
+ pingCmd: "info {scheme}://{repo}",
+ remoteRepo: bzrRemoteRepo,
+ resolveRepo: bzrResolveRepo,
+}
+
+func bzrRemoteRepo(vcsBzr *vcsCmd, rootDir string) (remoteRepo string, err error) {
+ outb, err := vcsBzr.runOutput(rootDir, "config parent_location")
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(outb)), nil
+}
+
+func bzrResolveRepo(vcsBzr *vcsCmd, rootDir, remoteRepo string) (realRepo string, err error) {
+ outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo)
+ if err != nil {
+ return "", err
+ }
+ out := string(outb)
+
+ // Expect:
+ // ...
+ // (branch root|repository branch): <URL>
+ // ...
+
+ found := false
+ for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} {
+ i := strings.Index(out, prefix)
+ if i >= 0 {
+ out = out[i+len(prefix):]
+ found = true
+ break
+ }
+ }
+ if !found {
+ return "", fmt.Errorf("unable to parse output of bzr info")
+ }
+
+ i := strings.Index(out, "\n")
+ if i < 0 {
+ return "", fmt.Errorf("unable to parse output of bzr info")
+ }
+ out = out[:i]
+ return strings.TrimSpace(string(out)), nil
}
// vcsSvn describes how to use Subversion.
diff --git a/src/cmd/ld/data.c b/src/cmd/ld/data.c
index 9983a9281..61847546a 100644
--- a/src/cmd/ld/data.c
+++ b/src/cmd/ld/data.c
@@ -633,9 +633,7 @@ addstrdata(char *name, char *value)
s->dupok = 1;
reachable = s->reachable;
addaddr(ctxt, s, sp);
- adduint32(ctxt, s, strlen(value));
- if(PtrSize == 8)
- adduint32(ctxt, s, 0); // round struct to pointer width
+ adduintxx(ctxt, s, strlen(value), PtrSize);
// addstring, addaddr, etc., mark the symbols as reachable.
// In this case that is not necessarily true, so stick to what
diff --git a/src/cmd/ld/ldelf.c b/src/cmd/ld/ldelf.c
index 35f8b4985..b5d081949 100644
--- a/src/cmd/ld/ldelf.c
+++ b/src/cmd/ld/ldelf.c
@@ -819,7 +819,7 @@ readsym(ElfObj *obj, int i, ElfSym *sym, int needSym)
}
break;
case ElfSymBindLocal:
- if(!(thechar == '5' && (strcmp(sym->name, "$a") == 0 || strcmp(sym->name, "$d") == 0))) // binutils for arm generate these mapping symbols, ignore these
+ if(!(thechar == '5' && (strncmp(sym->name, "$a", 2) == 0 || strncmp(sym->name, "$d", 2) == 0))) // binutils for arm generate these mapping symbols, ignore these
if(needSym) {
// local names and hidden visiblity global names are unique
// and should only reference by its index, not name, so we
diff --git a/src/cmd/objdump/main.go b/src/cmd/objdump/main.go
index aafc50111..0f66f20a4 100644
--- a/src/cmd/objdump/main.go
+++ b/src/cmd/objdump/main.go
@@ -159,7 +159,7 @@ func dump(tab *gosym.Table, lookup lookupFunc, disasm disasmFunc, goarch string,
printed := false
for _, sym := range syms {
- if sym.Code != 'T' || sym.Size == 0 || sym.Name == "_text" || sym.Name == "text" || sym.Addr < textStart || symRE != nil && !symRE.MatchString(sym.Name) {
+ if (sym.Code != 'T' && sym.Code != 't') || sym.Size == 0 || sym.Name == "_text" || sym.Name == "text" || sym.Addr < textStart || symRE != nil && !symRE.MatchString(sym.Name) {
continue
}
if sym.Addr >= textStart+uint64(len(textData)) || sym.Addr+uint64(sym.Size) > textStart+uint64(len(textData)) {
diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go
index 541085626..0a2d2565a 100644
--- a/src/cmd/objdump/objdump_test.go
+++ b/src/cmd/objdump/objdump_test.go
@@ -157,12 +157,15 @@ var armNeed = []string{
// binary for the current system (only) and test that objdump
// can handle that one.
-func TestDisasm(t *testing.T) {
+func testDisasm(t *testing.T, flags ...string) {
tmp, exe := buildObjdump(t)
defer os.RemoveAll(tmp)
hello := filepath.Join(tmp, "hello.exe")
- out, err := exec.Command("go", "build", "-o", hello, "testdata/fmthello.go").CombinedOutput()
+ args := []string{"build", "-o", hello}
+ args = append(args, flags...)
+ args = append(args, "testdata/fmthello.go")
+ out, err := exec.Command("go", args...).CombinedOutput()
if err != nil {
t.Fatalf("go build fmthello.go: %v\n%s", err, out)
}
@@ -194,3 +197,15 @@ func TestDisasm(t *testing.T) {
t.Logf("full disassembly:\n%s", text)
}
}
+
+func TestDisasm(t *testing.T) {
+ testDisasm(t)
+}
+
+func TestDisasmExtld(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping on %s", runtime.GOOS)
+ }
+ testDisasm(t, "-ldflags=-linkmode=external")
+}
diff --git a/src/compress/flate/inflate.go b/src/compress/flate/inflate.go
index a7fe94c50..76519bbf4 100644
--- a/src/compress/flate/inflate.go
+++ b/src/compress/flate/inflate.go
@@ -56,6 +56,15 @@ func (e *WriteError) Error() string {
return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
}
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
// Note that much of the implementation of huffmanDecoder is also copied
// into gen.go (in package main) for the purpose of precomputing the
// fixed huffman tables so they can be included statically.
@@ -679,12 +688,28 @@ func makeReader(r io.Reader) Reader {
return bufio.NewReader(r)
}
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ hist: f.hist,
+ step: (*decompressor).nextBlock,
+ }
+ if dict != nil {
+ f.setDict(dict)
+ }
+ return nil
+}
+
// NewReader returns a new ReadCloser that can be used
// to read the uncompressed version of r.
// If r does not also implement io.ByteReader,
// the decompressor may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser
// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) io.ReadCloser {
var f decompressor
f.bits = new([maxLit + maxDist]int)
@@ -700,6 +725,8 @@ func NewReader(r io.Reader) io.ReadCloser {
// the uncompressed data stream started with the given dictionary,
// which has already been read. NewReaderDict is typically used
// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
var f decompressor
f.r = makeReader(r)
diff --git a/src/compress/flate/inflate_test.go b/src/compress/flate/inflate_test.go
new file mode 100644
index 000000000..9f25d30b3
--- /dev/null
+++ b/src/compress/flate/inflate_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestReset(t *testing.T) {
+ ss := []string{
+ "lorem ipsum izzle fo rizzle",
+ "the quick brown fox jumped over",
+ }
+
+ deflated := make([]bytes.Buffer, 2)
+ for i, s := range ss {
+ w, _ := NewWriter(&deflated[i], 1)
+ w.Write([]byte(s))
+ w.Close()
+ }
+
+ inflated := make([]bytes.Buffer, 2)
+
+ f := NewReader(&deflated[0])
+ io.Copy(&inflated[0], f)
+ f.(Resetter).Reset(&deflated[1], nil)
+ io.Copy(&inflated[1], f)
+ f.Close()
+
+ for i, s := range ss {
+ if s != inflated[i].String() {
+ t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s)
+ }
+ }
+}
diff --git a/src/compress/gzip/gunzip.go b/src/compress/gzip/gunzip.go
index fc08c7a48..72ee55c4f 100644
--- a/src/compress/gzip/gunzip.go
+++ b/src/compress/gzip/gunzip.go
@@ -74,6 +74,7 @@ type Reader struct {
flg byte
buf [512]byte
err error
+ multistream bool
}
// NewReader creates a new Reader reading the given reader.
@@ -83,6 +84,7 @@ type Reader struct {
func NewReader(r io.Reader) (*Reader, error) {
z := new(Reader)
z.r = makeReader(r)
+ z.multistream = true
z.digest = crc32.NewIEEE()
if err := z.readHeader(true); err != nil {
return nil, err
@@ -102,9 +104,30 @@ func (z *Reader) Reset(r io.Reader) error {
}
z.size = 0
z.err = nil
+ z.multistream = true
return z.readHeader(true)
}
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+ z.multistream = ok
+}
+
// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
func get4(p []byte) uint32 {
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
@@ -208,7 +231,11 @@ func (z *Reader) readHeader(save bool) error {
}
z.digest.Reset()
- z.decompressor = flate.NewReader(z.r)
+ if z.decompressor == nil {
+ z.decompressor = flate.NewReader(z.r)
+ } else {
+ z.decompressor.(flate.Resetter).Reset(z.r, nil)
+ }
return nil
}
@@ -241,6 +268,10 @@ func (z *Reader) Read(p []byte) (n int, err error) {
}
// File is ok; is there another?
+ if !z.multistream {
+ return 0, io.EOF
+ }
+
if err = z.readHeader(false); err != nil {
z.err = err
return
diff --git a/src/compress/gzip/gunzip_test.go b/src/compress/gzip/gunzip_test.go
index 2471038f5..0636dec9a 100644
--- a/src/compress/gzip/gunzip_test.go
+++ b/src/compress/gzip/gunzip_test.go
@@ -9,6 +9,7 @@ import (
"io"
"io/ioutil"
"os"
+ "strings"
"testing"
"time"
)
@@ -367,3 +368,43 @@ func TestInitialReset(t *testing.T) {
t.Errorf("got %q want %q", s, gunzipTests[1].raw)
}
}
+
+func TestMultistreamFalse(t *testing.T) {
+ // Find concatenation test.
+ var tt gunzipTest
+ for _, tt = range gunzipTests {
+ if strings.HasSuffix(tt.desc, " x2") {
+ goto Found
+ }
+ }
+ t.Fatal("cannot find hello.txt x2 in gunzip tests")
+
+Found:
+ br := bytes.NewReader(tt.gzip)
+ var r Reader
+ if err := r.Reset(br); err != nil {
+ t.Fatalf("first reset: %v", err)
+ }
+
+ // Expect two streams with "hello world\n", then real EOF.
+ const hello = "hello world\n"
+
+ r.Multistream(false)
+ data, err := ioutil.ReadAll(&r)
+ if string(data) != hello || err != nil {
+ t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil)
+ }
+
+ if err := r.Reset(br); err != nil {
+ t.Fatalf("second reset: %v", err)
+ }
+ r.Multistream(false)
+ data, err = ioutil.ReadAll(&r)
+ if string(data) != hello || err != nil {
+ t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil)
+ }
+
+ if err := r.Reset(br); err != io.EOF {
+ t.Fatalf("third reset: err=%v, want io.EOF", err)
+ }
+}
diff --git a/src/compress/zlib/reader.go b/src/compress/zlib/reader.go
index e1191816d..816f1bf6b 100644
--- a/src/compress/zlib/reader.go
+++ b/src/compress/zlib/reader.go
@@ -51,10 +51,21 @@ type reader struct {
scratch [4]byte
}
-// NewReader creates a new io.ReadCloser.
-// Reads from the returned io.ReadCloser read and decompress data from r.
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// NewReader creates a new ReadCloser.
+// Reads from the returned ReadCloser read and decompress data from r.
// The implementation buffers input and may read more data than necessary from r.
// It is the caller's responsibility to call Close on the ReadCloser when done.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
func NewReader(r io.Reader) (io.ReadCloser, error) {
return NewReaderDict(r, nil)
}
@@ -62,35 +73,14 @@ func NewReader(r io.Reader) (io.ReadCloser, error) {
// NewReaderDict is like NewReader but uses a preset dictionary.
// NewReaderDict ignores the dictionary if the compressed data does not refer to it.
// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary.
+//
+// The ReadCloser returned by NewReaderDict also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) {
z := new(reader)
- if fr, ok := r.(flate.Reader); ok {
- z.r = fr
- } else {
- z.r = bufio.NewReader(r)
- }
- _, err := io.ReadFull(z.r, z.scratch[0:2])
+ err := z.Reset(r, dict)
if err != nil {
return nil, err
}
- h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
- if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
- return nil, ErrHeader
- }
- if z.scratch[1]&0x20 != 0 {
- _, err = io.ReadFull(z.r, z.scratch[0:4])
- if err != nil {
- return nil, err
- }
- checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
- if checksum != adler32.Checksum(dict) {
- return nil, ErrDictionary
- }
- z.decompressor = flate.NewReaderDict(z.r, dict)
- } else {
- z.decompressor = flate.NewReader(z.r)
- }
- z.digest = adler32.New()
return z, nil
}
@@ -131,3 +121,41 @@ func (z *reader) Close() error {
z.err = z.decompressor.Close()
return z.err
}
+
+func (z *reader) Reset(r io.Reader, dict []byte) error {
+ if fr, ok := r.(flate.Reader); ok {
+ z.r = fr
+ } else {
+ z.r = bufio.NewReader(r)
+ }
+ _, err := io.ReadFull(z.r, z.scratch[0:2])
+ if err != nil {
+ return err
+ }
+ h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
+ if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
+ return ErrHeader
+ }
+ haveDict := z.scratch[1]&0x20 != 0
+ if haveDict {
+ _, err = io.ReadFull(z.r, z.scratch[0:4])
+ if err != nil {
+ return err
+ }
+ checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
+ if checksum != adler32.Checksum(dict) {
+ return ErrDictionary
+ }
+ }
+ if z.decompressor == nil {
+ if haveDict {
+ z.decompressor = flate.NewReaderDict(z.r, dict)
+ } else {
+ z.decompressor = flate.NewReader(z.r)
+ }
+ } else {
+ z.decompressor.(flate.Resetter).Reset(z.r, dict)
+ }
+ z.digest = adler32.New()
+ return nil
+}
diff --git a/src/crypto/tls/alert.go b/src/crypto/tls/alert.go
index 0856311e4..3de4834d3 100644
--- a/src/crypto/tls/alert.go
+++ b/src/crypto/tls/alert.go
@@ -35,6 +35,7 @@ const (
alertProtocolVersion alert = 70
alertInsufficientSecurity alert = 71
alertInternalError alert = 80
+ alertInappropriateFallback alert = 86
alertUserCanceled alert = 90
alertNoRenegotiation alert = 100
)
@@ -60,6 +61,7 @@ var alertText = map[alert]string{
alertProtocolVersion: "protocol version not supported",
alertInsufficientSecurity: "insufficient security level",
alertInternalError: "internal error",
+ alertInappropriateFallback: "inappropriate fallback",
alertUserCanceled: "user canceled",
alertNoRenegotiation: "no renegotiation",
}
diff --git a/src/crypto/tls/cipher_suites.go b/src/crypto/tls/cipher_suites.go
index 39a51459d..226e06d68 100644
--- a/src/crypto/tls/cipher_suites.go
+++ b/src/crypto/tls/cipher_suites.go
@@ -267,4 +267,9 @@ const (
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
+
+ // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
+ // that the client is doing version fallback. See
+ // https://tools.ietf.org/html/draft-ietf-tls-downgrade-scsv-00.
+ TLS_FALLBACK_SCSV uint16 = 0x5600
)
diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go
index 520675dfb..0d907656c 100644
--- a/src/crypto/tls/handshake_server.go
+++ b/src/crypto/tls/handshake_server.go
@@ -224,6 +224,18 @@ Curves:
return false, errors.New("tls: no cipher suite supported by both client and server")
}
+ // See https://tools.ietf.org/html/draft-ietf-tls-downgrade-scsv-00.
+ for _, id := range hs.clientHello.cipherSuites {
+ if id == TLS_FALLBACK_SCSV {
+ // The client is doing a fallback connection.
+ if hs.clientHello.vers < c.config.MaxVersion {
+ c.sendAlert(alertInappropriateFallback)
+ return false, errors.New("tls: client using inppropriate protocol fallback")
+ }
+ break
+ }
+ }
+
return false, nil
}
diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go
index 580fbc0bf..0338af457 100644
--- a/src/crypto/tls/handshake_server_test.go
+++ b/src/crypto/tls/handshake_server_test.go
@@ -260,6 +260,9 @@ type serverTest struct {
// expectAlert, if true, indicates that a fatal alert should be returned
// when handshaking with the server.
expectAlert bool
+ // expectHandshakeErrorIncluding, when not empty, contains a string
+ // that must be a substring of the error resulting from the handshake.
+ expectHandshakeErrorIncluding string
// validate, if not nil, is a function that will be called with the
// ConnectionState of the resulting connection. It returns false if the
// ConnectionState is unacceptable.
@@ -362,9 +365,17 @@ func (test *serverTest) run(t *testing.T, write bool) {
server := Server(serverConn, config)
connStateChan := make(chan ConnectionState, 1)
go func() {
- if _, err := server.Write([]byte("hello, world\n")); err != nil {
+ var err error
+ if _, err = server.Write([]byte("hello, world\n")); err != nil {
t.Logf("Error from Server.Write: %s", err)
}
+ if len(test.expectHandshakeErrorIncluding) > 0 {
+ if err == nil {
+ t.Errorf("Error expected, but no error returned")
+ } else if s := err.Error(); !strings.Contains(s, test.expectHandshakeErrorIncluding) {
+ t.Errorf("Error expected containing '%s' but got '%s'", test.expectHandshakeErrorIncluding, s)
+ }
+ }
server.Close()
serverConn.Close()
connStateChan <- server.ConnectionState()
@@ -429,7 +440,9 @@ func (test *serverTest) run(t *testing.T, write bool) {
recordingConn.Close()
if len(recordingConn.flows) < 3 {
childProcess.Stdout.(*bytes.Buffer).WriteTo(os.Stdout)
- t.Fatalf("Handshake failed")
+ if len(test.expectHandshakeErrorIncluding) == 0 {
+ t.Fatalf("Handshake failed")
+ }
}
recordingConn.WriteTo(out)
fmt.Printf("Wrote %s\n", path)
@@ -702,6 +715,16 @@ func TestResumptionDisabled(t *testing.T) {
// file for ResumeDisabled does not include a resumption handshake.
}
+func TestFallbackSCSV(t *testing.T) {
+ test := &serverTest{
+ name: "FallbackSCSV",
+ // OpenSSL 1.0.1j is needed for the -fallback_scsv option.
+ command: []string{"openssl", "s_client", "-fallback_scsv"},
+ expectHandshakeErrorIncluding: "inppropriate protocol fallback",
+ }
+ runServerTestTLS11(t, test)
+}
+
// cert.pem and key.pem were generated with generate_cert.go
// Thus, they have no ExtKeyUsage fields and trigger an error
// when verification is turned on.
diff --git a/src/crypto/tls/testdata/Server-TLSv11-FallbackSCSV b/src/crypto/tls/testdata/Server-TLSv11-FallbackSCSV
new file mode 100644
index 000000000..2d8dfbc3b
--- /dev/null
+++ b/src/crypto/tls/testdata/Server-TLSv11-FallbackSCSV
@@ -0,0 +1,17 @@
+>>> Flow 1 (client to server)
+00000000 16 03 01 00 d4 01 00 00 d0 03 02 74 2d da 6d 98 |...........t-.m.|
+00000010 ad 3e a5 ec 90 ea d1 5b f0 e0 a7 45 33 d9 5e 8d |.>.....[...E3.^.|
+00000020 0f 1d 01 16 6d 00 31 65 ed 50 88 00 00 5e c0 14 |....m.1e.P...^..|
+00000030 c0 0a 00 39 00 38 00 88 00 87 c0 0f c0 05 00 35 |...9.8.........5|
+00000040 00 84 c0 13 c0 09 00 33 00 32 00 9a 00 99 00 45 |.......3.2.....E|
+00000050 00 44 c0 0e c0 04 00 2f 00 96 00 41 00 07 c0 11 |.D...../...A....|
+00000060 c0 07 c0 0c c0 02 00 05 00 04 c0 12 c0 08 00 16 |................|
+00000070 00 13 c0 0d c0 03 00 0a 00 15 00 12 00 09 00 14 |................|
+00000080 00 11 00 08 00 06 00 03 00 ff 56 00 01 00 00 49 |..........V....I|
+00000090 00 0b 00 04 03 00 01 02 00 0a 00 34 00 32 00 0e |...........4.2..|
+000000a0 00 0d 00 19 00 0b 00 0c 00 18 00 09 00 0a 00 16 |................|
+000000b0 00 17 00 08 00 06 00 07 00 14 00 15 00 04 00 05 |................|
+000000c0 00 12 00 13 00 01 00 02 00 03 00 0f 00 10 00 11 |................|
+000000d0 00 23 00 00 00 0f 00 01 01 |.#.......|
+>>> Flow 2 (server to client)
+00000000 15 03 02 00 02 02 56 |......V|
diff --git a/src/debug/pe/file.go b/src/debug/pe/file.go
index ce6f1408f..759e5674f 100644
--- a/src/debug/pe/file.go
+++ b/src/debug/pe/file.go
@@ -13,7 +13,6 @@ import (
"io"
"os"
"strconv"
- "unsafe"
)
// A File represents an open PE file.
@@ -125,6 +124,11 @@ func (f *File) Close() error {
return err
}
+var (
+ sizeofOptionalHeader32 = uint16(binary.Size(OptionalHeader32{}))
+ sizeofOptionalHeader64 = uint16(binary.Size(OptionalHeader64{}))
+)
+
// NewFile creates a new File for accessing a PE binary in an underlying reader.
func NewFile(r io.ReaderAt) (*File, error) {
f := new(File)
@@ -205,8 +209,8 @@ func NewFile(r io.ReaderAt) (*File, error) {
}
var oh32 OptionalHeader32
var oh64 OptionalHeader64
- switch uintptr(f.FileHeader.SizeOfOptionalHeader) {
- case unsafe.Sizeof(oh32):
+ switch f.FileHeader.SizeOfOptionalHeader {
+ case sizeofOptionalHeader32:
if err := binary.Read(sr, binary.LittleEndian, &oh32); err != nil {
return nil, err
}
@@ -214,7 +218,7 @@ func NewFile(r io.ReaderAt) (*File, error) {
return nil, fmt.Errorf("pe32 optional header has unexpected Magic of 0x%x", oh32.Magic)
}
f.OptionalHeader = &oh32
- case unsafe.Sizeof(oh64):
+ case sizeofOptionalHeader64:
if err := binary.Read(sr, binary.LittleEndian, &oh64); err != nil {
return nil, err
}
diff --git a/src/encoding/csv/writer.go b/src/encoding/csv/writer.go
index 1faecb664..17e7bb7f5 100644
--- a/src/encoding/csv/writer.go
+++ b/src/encoding/csv/writer.go
@@ -115,10 +115,22 @@ func (w *Writer) WriteAll(records [][]string) (err error) {
}
// fieldNeedsQuotes returns true if our field must be enclosed in quotes.
-// Empty fields, files with a Comma, fields with a quote or newline, and
+// Fields with a Comma, fields with a quote or newline, and
// fields which start with a space must be enclosed in quotes.
+// We used to quote empty strings, but we do not anymore (as of Go 1.4).
+// The two representations should be equivalent, but Postgres distinguishes
+// quoted vs non-quoted empty string during database imports, and it has
+// an option to force the quoted behavior for non-quoted CSV but it has
+// no option to force the non-quoted behavior for quoted CSV, making
+// CSV with quoted empty strings strictly less useful.
+// Not quoting the empty string also makes this package match the behavior
+// of Microsoft Excel and Google Drive.
+// For Postgres, quote the data termating string `\.`.
func (w *Writer) fieldNeedsQuotes(field string) bool {
- if len(field) == 0 || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 {
+ if field == "" {
+ return false
+ }
+ if field == `\.` || strings.IndexRune(field, w.Comma) >= 0 || strings.IndexAny(field, "\"\r\n") >= 0 {
return true
}
diff --git a/src/encoding/csv/writer_test.go b/src/encoding/csv/writer_test.go
index 22b740c07..8ddca0abe 100644
--- a/src/encoding/csv/writer_test.go
+++ b/src/encoding/csv/writer_test.go
@@ -28,6 +28,17 @@ var writeTests = []struct {
{Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true},
{Input: [][]string{{"abc\rdef"}}, Output: "\"abcdef\"\r\n", UseCRLF: true},
{Input: [][]string{{"abc\rdef"}}, Output: "\"abc\rdef\"\n", UseCRLF: false},
+ {Input: [][]string{{""}}, Output: "\n"},
+ {Input: [][]string{{"", ""}}, Output: ",\n"},
+ {Input: [][]string{{"", "", ""}}, Output: ",,\n"},
+ {Input: [][]string{{"", "", "a"}}, Output: ",,a\n"},
+ {Input: [][]string{{"", "a", ""}}, Output: ",a,\n"},
+ {Input: [][]string{{"", "a", "a"}}, Output: ",a,a\n"},
+ {Input: [][]string{{"a", "", ""}}, Output: "a,,\n"},
+ {Input: [][]string{{"a", "", "a"}}, Output: "a,,a\n"},
+ {Input: [][]string{{"a", "a", ""}}, Output: "a,a,\n"},
+ {Input: [][]string{{"a", "a", "a"}}, Output: "a,a,a\n"},
+ {Input: [][]string{{`\.`}}, Output: "\"\\.\"\n"},
}
func TestWrite(t *testing.T) {
diff --git a/src/encoding/gob/codec_test.go b/src/encoding/gob/codec_test.go
index 4f17a2893..56a7298fa 100644
--- a/src/encoding/gob/codec_test.go
+++ b/src/encoding/gob/codec_test.go
@@ -50,10 +50,16 @@ func testError(t *testing.T) {
return
}
+func newDecBuffer(data []byte) *decBuffer {
+ return &decBuffer{
+ data: data,
+ }
+}
+
// Test basic encode/decode routines for unsigned integers
func TestUintCodec(t *testing.T) {
defer testError(t)
- b := new(bytes.Buffer)
+ b := new(encBuffer)
encState := newEncoderState(b)
for _, tt := range encodeT {
b.Reset()
@@ -62,10 +68,10 @@ func TestUintCodec(t *testing.T) {
t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes())
}
}
- decState := newDecodeState(b)
for u := uint64(0); ; u = (u + 1) * 7 {
b.Reset()
encState.encodeUint(u)
+ decState := newDecodeState(newDecBuffer(b.Bytes()))
v := decState.decodeUint()
if u != v {
t.Errorf("Encode/Decode: sent %#x received %#x", u, v)
@@ -78,10 +84,10 @@ func TestUintCodec(t *testing.T) {
func verifyInt(i int64, t *testing.T) {
defer testError(t)
- var b = new(bytes.Buffer)
+ var b = new(encBuffer)
encState := newEncoderState(b)
encState.encodeInt(i)
- decState := newDecodeState(b)
+ decState := newDecodeState(newDecBuffer(b.Bytes()))
decState.buf = make([]byte, 8)
j := decState.decodeInt()
if i != j {
@@ -118,14 +124,14 @@ var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40}
// The result of encoding "hello" with field number 7
var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'}
-func newDecodeState(buf *bytes.Buffer) *decoderState {
+func newDecodeState(buf *decBuffer) *decoderState {
d := new(decoderState)
d.b = buf
d.buf = make([]byte, uint64Size)
return d
}
-func newEncoderState(b *bytes.Buffer) *encoderState {
+func newEncoderState(b *encBuffer) *encoderState {
b.Reset()
state := &encoderState{enc: nil, b: b}
state.fieldnum = -1
@@ -135,7 +141,7 @@ func newEncoderState(b *bytes.Buffer) *encoderState {
// Test instruction execution for encoding.
// Do not run the machine yet; instead do individual instructions crafted by hand.
func TestScalarEncInstructions(t *testing.T) {
- var b = new(bytes.Buffer)
+ var b = new(encBuffer)
// bool
{
@@ -328,7 +334,7 @@ func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, val
}
func newDecodeStateFromData(data []byte) *decoderState {
- b := bytes.NewBuffer(data)
+ b := newDecBuffer(data)
state := newDecodeState(b)
state.fieldnum = -1
return state
diff --git a/src/encoding/gob/dec_helpers.go b/src/encoding/gob/dec_helpers.go
new file mode 100644
index 000000000..a1b67661d
--- /dev/null
+++ b/src/encoding/gob/dec_helpers.go
@@ -0,0 +1,468 @@
+// Created by decgen --output dec_helpers.go; DO NOT EDIT
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "math"
+ "reflect"
+)
+
+var decArrayHelper = map[reflect.Kind]decHelper{
+ reflect.Bool: decBoolArray,
+ reflect.Complex64: decComplex64Array,
+ reflect.Complex128: decComplex128Array,
+ reflect.Float32: decFloat32Array,
+ reflect.Float64: decFloat64Array,
+ reflect.Int: decIntArray,
+ reflect.Int16: decInt16Array,
+ reflect.Int32: decInt32Array,
+ reflect.Int64: decInt64Array,
+ reflect.Int8: decInt8Array,
+ reflect.String: decStringArray,
+ reflect.Uint: decUintArray,
+ reflect.Uint16: decUint16Array,
+ reflect.Uint32: decUint32Array,
+ reflect.Uint64: decUint64Array,
+ reflect.Uintptr: decUintptrArray,
+}
+
+var decSliceHelper = map[reflect.Kind]decHelper{
+ reflect.Bool: decBoolSlice,
+ reflect.Complex64: decComplex64Slice,
+ reflect.Complex128: decComplex128Slice,
+ reflect.Float32: decFloat32Slice,
+ reflect.Float64: decFloat64Slice,
+ reflect.Int: decIntSlice,
+ reflect.Int16: decInt16Slice,
+ reflect.Int32: decInt32Slice,
+ reflect.Int64: decInt64Slice,
+ reflect.Int8: decInt8Slice,
+ reflect.String: decStringSlice,
+ reflect.Uint: decUintSlice,
+ reflect.Uint16: decUint16Slice,
+ reflect.Uint32: decUint32Slice,
+ reflect.Uint64: decUint64Slice,
+ reflect.Uintptr: decUintptrSlice,
+}
+
+func decBoolArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decBoolSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decBoolSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]bool)
+ if !ok {
+ // It is kind bool but not type bool. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding bool array or slice: length exceeds input size (%d elements)", length)
+ }
+ slice[i] = state.decodeUint() != 0
+ }
+ return true
+}
+
+func decComplex64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decComplex64Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decComplex64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]complex64)
+ if !ok {
+ // It is kind complex64 but not type complex64. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding complex64 array or slice: length exceeds input size (%d elements)", length)
+ }
+ real := float32FromBits(state.decodeUint(), ovfl)
+ imag := float32FromBits(state.decodeUint(), ovfl)
+ slice[i] = complex(float32(real), float32(imag))
+ }
+ return true
+}
+
+func decComplex128Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decComplex128Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decComplex128Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]complex128)
+ if !ok {
+ // It is kind complex128 but not type complex128. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding complex128 array or slice: length exceeds input size (%d elements)", length)
+ }
+ real := float64FromBits(state.decodeUint())
+ imag := float64FromBits(state.decodeUint())
+ slice[i] = complex(real, imag)
+ }
+ return true
+}
+
+func decFloat32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decFloat32Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decFloat32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]float32)
+ if !ok {
+ // It is kind float32 but not type float32. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding float32 array or slice: length exceeds input size (%d elements)", length)
+ }
+ slice[i] = float32(float32FromBits(state.decodeUint(), ovfl))
+ }
+ return true
+}
+
+func decFloat64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decFloat64Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decFloat64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]float64)
+ if !ok {
+ // It is kind float64 but not type float64. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding float64 array or slice: length exceeds input size (%d elements)", length)
+ }
+ slice[i] = float64FromBits(state.decodeUint())
+ }
+ return true
+}
+
+func decIntArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decIntSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decIntSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]int)
+ if !ok {
+ // It is kind int but not type int. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding int array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeInt()
+ // MinInt and MaxInt
+ if x < ^int64(^uint(0)>>1) || int64(^uint(0)>>1) < x {
+ error_(ovfl)
+ }
+ slice[i] = int(x)
+ }
+ return true
+}
+
+func decInt16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decInt16Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decInt16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]int16)
+ if !ok {
+ // It is kind int16 but not type int16. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding int16 array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeInt()
+ if x < math.MinInt16 || math.MaxInt16 < x {
+ error_(ovfl)
+ }
+ slice[i] = int16(x)
+ }
+ return true
+}
+
+func decInt32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decInt32Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decInt32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]int32)
+ if !ok {
+ // It is kind int32 but not type int32. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding int32 array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeInt()
+ if x < math.MinInt32 || math.MaxInt32 < x {
+ error_(ovfl)
+ }
+ slice[i] = int32(x)
+ }
+ return true
+}
+
+func decInt64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decInt64Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decInt64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]int64)
+ if !ok {
+ // It is kind int64 but not type int64. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding int64 array or slice: length exceeds input size (%d elements)", length)
+ }
+ slice[i] = state.decodeInt()
+ }
+ return true
+}
+
+func decInt8Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decInt8Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decInt8Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]int8)
+ if !ok {
+ // It is kind int8 but not type int8. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding int8 array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeInt()
+ if x < math.MinInt8 || math.MaxInt8 < x {
+ error_(ovfl)
+ }
+ slice[i] = int8(x)
+ }
+ return true
+}
+
+func decStringArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decStringSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decStringSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]string)
+ if !ok {
+ // It is kind string but not type string. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding string array or slice: length exceeds input size (%d elements)", length)
+ }
+ u := state.decodeUint()
+ n := int(u)
+ if n < 0 || uint64(n) != u || n > state.b.Len() {
+ errorf("length of string exceeds input size (%d bytes)", u)
+ }
+ if n > state.b.Len() {
+ errorf("string data too long for buffer: %d", n)
+ }
+ // Read the data.
+ data := make([]byte, n)
+ if _, err := state.b.Read(data); err != nil {
+ errorf("error decoding string: %s", err)
+ }
+ slice[i] = string(data)
+ }
+ return true
+}
+
+func decUintArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decUintSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decUintSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]uint)
+ if !ok {
+ // It is kind uint but not type uint. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding uint array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeUint()
+ /*TODO if math.MaxUint32 < x {
+ error_(ovfl)
+ }*/
+ slice[i] = uint(x)
+ }
+ return true
+}
+
+func decUint16Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decUint16Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decUint16Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]uint16)
+ if !ok {
+ // It is kind uint16 but not type uint16. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding uint16 array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeUint()
+ if math.MaxUint16 < x {
+ error_(ovfl)
+ }
+ slice[i] = uint16(x)
+ }
+ return true
+}
+
+func decUint32Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decUint32Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decUint32Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]uint32)
+ if !ok {
+ // It is kind uint32 but not type uint32. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding uint32 array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeUint()
+ if math.MaxUint32 < x {
+ error_(ovfl)
+ }
+ slice[i] = uint32(x)
+ }
+ return true
+}
+
+func decUint64Array(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decUint64Slice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decUint64Slice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]uint64)
+ if !ok {
+ // It is kind uint64 but not type uint64. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding uint64 array or slice: length exceeds input size (%d elements)", length)
+ }
+ slice[i] = state.decodeUint()
+ }
+ return true
+}
+
+func decUintptrArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return decUintptrSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+
+func decUintptrSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]uintptr)
+ if !ok {
+ // It is kind uintptr but not type uintptr. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding uintptr array or slice: length exceeds input size (%d elements)", length)
+ }
+ x := state.decodeUint()
+ if uint64(^uintptr(0)) < x {
+ error_(ovfl)
+ }
+ slice[i] = uintptr(x)
+ }
+ return true
+}
diff --git a/src/encoding/gob/decgen.go b/src/encoding/gob/decgen.go
new file mode 100644
index 000000000..da41a899e
--- /dev/null
+++ b/src/encoding/gob/decgen.go
@@ -0,0 +1,240 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// encgen writes the helper functions for encoding. Intended to be
+// used with go generate; see the invocation in encode.go.
+
+// TODO: We could do more by being unsafe. Add a -unsafe flag?
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+)
+
+var output = flag.String("output", "dec_helpers.go", "file name to write")
+
+type Type struct {
+ lower string
+ upper string
+ decoder string
+}
+
+var types = []Type{
+ {
+ "bool",
+ "Bool",
+ `slice[i] = state.decodeUint() != 0`,
+ },
+ {
+ "complex64",
+ "Complex64",
+ `real := float32FromBits(state.decodeUint(), ovfl)
+ imag := float32FromBits(state.decodeUint(), ovfl)
+ slice[i] = complex(float32(real), float32(imag))`,
+ },
+ {
+ "complex128",
+ "Complex128",
+ `real := float64FromBits(state.decodeUint())
+ imag := float64FromBits(state.decodeUint())
+ slice[i] = complex(real, imag)`,
+ },
+ {
+ "float32",
+ "Float32",
+ `slice[i] = float32(float32FromBits(state.decodeUint(), ovfl))`,
+ },
+ {
+ "float64",
+ "Float64",
+ `slice[i] = float64FromBits(state.decodeUint())`,
+ },
+ {
+ "int",
+ "Int",
+ `x := state.decodeInt()
+ // MinInt and MaxInt
+ if x < ^int64(^uint(0)>>1) || int64(^uint(0)>>1) < x {
+ error_(ovfl)
+ }
+ slice[i] = int(x)`,
+ },
+ {
+ "int16",
+ "Int16",
+ `x := state.decodeInt()
+ if x < math.MinInt16 || math.MaxInt16 < x {
+ error_(ovfl)
+ }
+ slice[i] = int16(x)`,
+ },
+ {
+ "int32",
+ "Int32",
+ `x := state.decodeInt()
+ if x < math.MinInt32 || math.MaxInt32 < x {
+ error_(ovfl)
+ }
+ slice[i] = int32(x)`,
+ },
+ {
+ "int64",
+ "Int64",
+ `slice[i] = state.decodeInt()`,
+ },
+ {
+ "int8",
+ "Int8",
+ `x := state.decodeInt()
+ if x < math.MinInt8 || math.MaxInt8 < x {
+ error_(ovfl)
+ }
+ slice[i] = int8(x)`,
+ },
+ {
+ "string",
+ "String",
+ `u := state.decodeUint()
+ n := int(u)
+ if n < 0 || uint64(n) != u || n > state.b.Len() {
+ errorf("length of string exceeds input size (%d bytes)", u)
+ }
+ if n > state.b.Len() {
+ errorf("string data too long for buffer: %d", n)
+ }
+ // Read the data.
+ data := make([]byte, n)
+ if _, err := state.b.Read(data); err != nil {
+ errorf("error decoding string: %s", err)
+ }
+ slice[i] = string(data)`,
+ },
+ {
+ "uint",
+ "Uint",
+ `x := state.decodeUint()
+ /*TODO if math.MaxUint32 < x {
+ error_(ovfl)
+ }*/
+ slice[i] = uint(x)`,
+ },
+ {
+ "uint16",
+ "Uint16",
+ `x := state.decodeUint()
+ if math.MaxUint16 < x {
+ error_(ovfl)
+ }
+ slice[i] = uint16(x)`,
+ },
+ {
+ "uint32",
+ "Uint32",
+ `x := state.decodeUint()
+ if math.MaxUint32 < x {
+ error_(ovfl)
+ }
+ slice[i] = uint32(x)`,
+ },
+ {
+ "uint64",
+ "Uint64",
+ `slice[i] = state.decodeUint()`,
+ },
+ {
+ "uintptr",
+ "Uintptr",
+ `x := state.decodeUint()
+ if uint64(^uintptr(0)) < x {
+ error_(ovfl)
+ }
+ slice[i] = uintptr(x)`,
+ },
+ // uint8 Handled separately.
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("decgen: ")
+ flag.Parse()
+ if flag.NArg() != 0 {
+ log.Fatal("usage: decgen [--output filename]")
+ }
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "// Created by decgen --output %s; DO NOT EDIT\n", *output)
+ fmt.Fprint(&b, header)
+ printMaps(&b, "Array")
+ fmt.Fprint(&b, "\n")
+ printMaps(&b, "Slice")
+ for _, t := range types {
+ fmt.Fprintf(&b, arrayHelper, t.lower, t.upper)
+ fmt.Fprintf(&b, sliceHelper, t.lower, t.upper, t.decoder)
+ }
+ source, err := format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal("source format error:", err)
+ }
+ fd, err := os.Create(*output)
+ _, err = fd.Write(source)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func printMaps(b *bytes.Buffer, upperClass string) {
+ fmt.Fprintf(b, "var dec%sHelper = map[reflect.Kind]decHelper{\n", upperClass)
+ for _, t := range types {
+ fmt.Fprintf(b, "reflect.%s: dec%s%s,\n", t.upper, t.upper, upperClass)
+ }
+ fmt.Fprintf(b, "}\n")
+}
+
+const header = `
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "math"
+ "reflect"
+)
+
+`
+
+const arrayHelper = `
+func dec%[2]sArray(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return dec%[2]sSlice(state, v.Slice(0, v.Len()), length, ovfl)
+}
+`
+
+const sliceHelper = `
+func dec%[2]sSlice(state *decoderState, v reflect.Value, length int, ovfl error) bool {
+ slice, ok := v.Interface().([]%[1]s)
+ if !ok {
+ // It is kind %[1]s but not type %[1]s. TODO: We can handle this unsafely.
+ return false
+ }
+ for i := 0; i < length; i++ {
+ if state.b.Len() == 0 {
+ errorf("decoding %[1]s array or slice: length exceeds input size (%%d elements)", length)
+ }
+ %[3]s
+ }
+ return true
+}
+`
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index 6a9213fb3..a5bef9314 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run decgen.go -output dec_helpers.go
+
package gob
import (
- "bytes"
"encoding"
"errors"
"io"
@@ -19,21 +20,79 @@ var (
errRange = errors.New("gob: bad data: field numbers out of bounds")
)
+type decHelper func(state *decoderState, v reflect.Value, length int, ovfl error) bool
+
// decoderState is the execution state of an instance of the decoder. A new state
// is created for nested objects.
type decoderState struct {
dec *Decoder
// The buffer is stored with an extra indirection because it may be replaced
// if we load a type during decode (when reading an interface value).
- b *bytes.Buffer
+ b *decBuffer
fieldnum int // the last field number read.
buf []byte
next *decoderState // for free list
}
+// decBuffer is an extremely simple, fast implementation of a read-only byte buffer.
+// It is initialized by calling Size and then copying the data into the slice returned by Bytes().
+type decBuffer struct {
+ data []byte
+ offset int // Read offset.
+}
+
+func (d *decBuffer) Read(p []byte) (int, error) {
+ n := copy(p, d.data[d.offset:])
+ if n == 0 && len(p) != 0 {
+ return 0, io.EOF
+ }
+ d.offset += n
+ return n, nil
+}
+
+func (d *decBuffer) Drop(n int) {
+ if n > d.Len() {
+ panic("drop")
+ }
+ d.offset += n
+}
+
+// Size grows the buffer to exactly n bytes, so d.Bytes() will
+// return a slice of length n. Existing data is first discarded.
+func (d *decBuffer) Size(n int) {
+ d.Reset()
+ if cap(d.data) < n {
+ d.data = make([]byte, n)
+ } else {
+ d.data = d.data[0:n]
+ }
+}
+
+func (d *decBuffer) ReadByte() (byte, error) {
+ if d.offset >= len(d.data) {
+ return 0, io.EOF
+ }
+ c := d.data[d.offset]
+ d.offset++
+ return c, nil
+}
+
+func (d *decBuffer) Len() int {
+ return len(d.data) - d.offset
+}
+
+func (d *decBuffer) Bytes() []byte {
+ return d.data[d.offset:]
+}
+
+func (d *decBuffer) Reset() {
+ d.data = d.data[0:0]
+ d.offset = 0
+}
+
// We pass the bytes.Buffer separately for easier testing of the infrastructure
// without requiring a full Decoder.
-func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState {
+func (dec *Decoder) newDecoderState(buf *decBuffer) *decoderState {
d := dec.freeList
if d == nil {
d = new(decoderState)
@@ -257,7 +316,7 @@ func float64FromBits(u uint64) float64 {
// number, and returns it. It's a helper function for float32 and complex64.
// It returns a float64 because that's what reflection needs, but its return
// value is known to be accurately representable in a float32.
-func float32FromBits(i *decInstr, u uint64) float64 {
+func float32FromBits(u uint64, ovfl error) float64 {
v := float64FromBits(u)
av := v
if av < 0 {
@@ -265,7 +324,7 @@ func float32FromBits(i *decInstr, u uint64) float64 {
}
// +Inf is OK in both 32- and 64-bit floats. Underflow is always OK.
if math.MaxFloat32 < av && av <= math.MaxFloat64 {
- error_(i.ovfl)
+ error_(ovfl)
}
return v
}
@@ -273,7 +332,7 @@ func float32FromBits(i *decInstr, u uint64) float64 {
// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point
// number, and stores it in value.
func decFloat32(i *decInstr, state *decoderState, value reflect.Value) {
- value.SetFloat(float32FromBits(i, state.decodeUint()))
+ value.SetFloat(float32FromBits(state.decodeUint(), i.ovfl))
}
// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point
@@ -286,8 +345,8 @@ func decFloat64(i *decInstr, state *decoderState, value reflect.Value) {
// pair of floating point numbers, and stores them as a complex64 in value.
// The real part comes first.
func decComplex64(i *decInstr, state *decoderState, value reflect.Value) {
- real := float32FromBits(i, state.decodeUint())
- imag := float32FromBits(i, state.decodeUint())
+ real := float32FromBits(state.decodeUint(), i.ovfl)
+ imag := float32FromBits(state.decodeUint(), i.ovfl)
value.SetComplex(complex(real, imag))
}
@@ -450,7 +509,10 @@ func (dec *Decoder) ignoreSingle(engine *decEngine) {
}
// decodeArrayHelper does the work for decoding arrays and slices.
-func (dec *Decoder) decodeArrayHelper(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error) {
+func (dec *Decoder) decodeArrayHelper(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
+ if helper != nil && helper(state, value, length, ovfl) {
+ return
+ }
instr := &decInstr{elemOp, 0, nil, ovfl}
isPtr := value.Type().Elem().Kind() == reflect.Ptr
for i := 0; i < length; i++ {
@@ -468,11 +530,11 @@ func (dec *Decoder) decodeArrayHelper(state *decoderState, value reflect.Value,
// decodeArray decodes an array and stores it in value.
// The length is an unsigned integer preceding the elements. Even though the length is redundant
// (it's part of the type), it's a useful check and is included in the encoding.
-func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error) {
+func (dec *Decoder) decodeArray(atyp reflect.Type, state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
- dec.decodeArrayHelper(state, value, elemOp, length, ovfl)
+ dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
}
// decodeIntoValue is a helper for map decoding.
@@ -534,7 +596,7 @@ func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) {
// decodeSlice decodes a slice and stores it in value.
// Slices are encoded as an unsigned length followed by the elements.
-func (dec *Decoder) decodeSlice(state *decoderState, value reflect.Value, elemOp decOp, ovfl error) {
+func (dec *Decoder) decodeSlice(state *decoderState, value reflect.Value, elemOp decOp, ovfl error, helper decHelper) {
u := state.decodeUint()
typ := value.Type()
size := uint64(typ.Elem().Size())
@@ -551,7 +613,7 @@ func (dec *Decoder) decodeSlice(state *decoderState, value reflect.Value, elemOp
} else {
value.Set(value.Slice(0, n))
}
- dec.decodeArrayHelper(state, value, elemOp, n, ovfl)
+ dec.decodeArrayHelper(state, value, elemOp, n, ovfl, helper)
}
// ignoreSlice skips over the data for a slice value with no destination.
@@ -626,7 +688,7 @@ func (dec *Decoder) ignoreInterface(state *decoderState) {
error_(dec.err)
}
// At this point, the decoder buffer contains a delimited value. Just toss it.
- state.b.Next(int(state.decodeUint()))
+ state.b.Drop(int(state.decodeUint()))
}
// decodeGobDecoder decodes something implementing the GobDecoder interface.
@@ -720,8 +782,9 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
elemId := dec.wireType[wireId].ArrayT.Elem
elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name)
+ helper := decArrayHelper[t.Elem().Kind()]
op = func(i *decInstr, state *decoderState, value reflect.Value) {
- state.dec.decodeArray(t, state, value, *elemOp, t.Len(), ovfl)
+ state.dec.decodeArray(t, state, value, *elemOp, t.Len(), ovfl, helper)
}
case reflect.Map:
@@ -748,8 +811,9 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg
}
elemOp := dec.decOpFor(elemId, t.Elem(), name, inProgress)
ovfl := overflow(name)
+ helper := decSliceHelper[t.Elem().Kind()]
op = func(i *decInstr, state *decoderState, value reflect.Value) {
- state.dec.decodeSlice(state, value, *elemOp, ovfl)
+ state.dec.decodeSlice(state, value, *elemOp, ovfl, helper)
}
case reflect.Struct:
diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go
index dcad7a0e4..c453e9ba3 100644
--- a/src/encoding/gob/decoder.go
+++ b/src/encoding/gob/decoder.go
@@ -6,7 +6,6 @@ package gob
import (
"bufio"
- "bytes"
"errors"
"io"
"reflect"
@@ -23,13 +22,12 @@ const tooBig = 1 << 30
type Decoder struct {
mutex sync.Mutex // each item must be received atomically
r io.Reader // source of the data
- buf bytes.Buffer // buffer for more efficient i/o from r
+ buf decBuffer // buffer for more efficient i/o from r
wireType map[typeId]*wireType // map from remote ID to local description
decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines
ignorerCache map[typeId]**decEngine // ditto for ignored objects
freeList *decoderState // list of free decoderStates; avoids reallocation
countBuf []byte // used for decoding integers while parsing messages
- tmp []byte // temporary storage for i/o; saves reallocating
err error
}
@@ -90,37 +88,17 @@ func (dec *Decoder) recvMessage() bool {
// readMessage reads the next nbytes bytes from the input.
func (dec *Decoder) readMessage(nbytes int) {
- // Allocate the dec.tmp buffer, up to 10KB.
- const maxBuf = 10 * 1024
- nTmp := nbytes
- if nTmp > maxBuf {
- nTmp = maxBuf
+ if dec.buf.Len() != 0 {
+ // The buffer should always be empty now.
+ panic("non-empty decoder buffer")
}
- if cap(dec.tmp) < nTmp {
- nAlloc := nTmp + 100 // A little extra for growth.
- if nAlloc > maxBuf {
- nAlloc = maxBuf
- }
- dec.tmp = make([]byte, nAlloc)
- }
- dec.tmp = dec.tmp[:nTmp]
-
// Read the data
- dec.buf.Grow(nbytes)
- for nbytes > 0 {
- if nbytes < nTmp {
- dec.tmp = dec.tmp[:nbytes]
- }
- var nRead int
- nRead, dec.err = io.ReadFull(dec.r, dec.tmp)
- if dec.err != nil {
- if dec.err == io.EOF {
- dec.err = io.ErrUnexpectedEOF
- }
- return
+ dec.buf.Size(nbytes)
+ _, dec.err = io.ReadFull(dec.r, dec.buf.Bytes())
+ if dec.err != nil {
+ if dec.err == io.EOF {
+ dec.err = io.ErrUnexpectedEOF
}
- dec.buf.Write(dec.tmp)
- nbytes -= nRead
}
}
@@ -212,7 +190,7 @@ func (dec *Decoder) Decode(e interface{}) error {
// Otherwise, it stores the value into v. In that case, v must represent
// a non-nil pointer to data or be an assignable reflect.Value (v.CanSet())
// If the input is at EOF, DecodeValue returns io.EOF and
-// does not modify e.
+// does not modify v.
func (dec *Decoder) DecodeValue(v reflect.Value) error {
if v.IsValid() {
if v.Kind() == reflect.Ptr && !v.IsNil() {
diff --git a/src/encoding/gob/enc_helpers.go b/src/encoding/gob/enc_helpers.go
new file mode 100644
index 000000000..804e539d8
--- /dev/null
+++ b/src/encoding/gob/enc_helpers.go
@@ -0,0 +1,414 @@
+// Created by encgen --output enc_helpers.go; DO NOT EDIT
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "reflect"
+)
+
+var encArrayHelper = map[reflect.Kind]encHelper{
+ reflect.Bool: encBoolArray,
+ reflect.Complex64: encComplex64Array,
+ reflect.Complex128: encComplex128Array,
+ reflect.Float32: encFloat32Array,
+ reflect.Float64: encFloat64Array,
+ reflect.Int: encIntArray,
+ reflect.Int16: encInt16Array,
+ reflect.Int32: encInt32Array,
+ reflect.Int64: encInt64Array,
+ reflect.Int8: encInt8Array,
+ reflect.String: encStringArray,
+ reflect.Uint: encUintArray,
+ reflect.Uint16: encUint16Array,
+ reflect.Uint32: encUint32Array,
+ reflect.Uint64: encUint64Array,
+ reflect.Uintptr: encUintptrArray,
+}
+
+var encSliceHelper = map[reflect.Kind]encHelper{
+ reflect.Bool: encBoolSlice,
+ reflect.Complex64: encComplex64Slice,
+ reflect.Complex128: encComplex128Slice,
+ reflect.Float32: encFloat32Slice,
+ reflect.Float64: encFloat64Slice,
+ reflect.Int: encIntSlice,
+ reflect.Int16: encInt16Slice,
+ reflect.Int32: encInt32Slice,
+ reflect.Int64: encInt64Slice,
+ reflect.Int8: encInt8Slice,
+ reflect.String: encStringSlice,
+ reflect.Uint: encUintSlice,
+ reflect.Uint16: encUint16Slice,
+ reflect.Uint32: encUint32Slice,
+ reflect.Uint64: encUint64Slice,
+ reflect.Uintptr: encUintptrSlice,
+}
+
+func encBoolArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encBoolSlice(state, v.Slice(0, v.Len()))
+}
+
+func encBoolSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]bool)
+ if !ok {
+ // It is kind bool but not type bool. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != false || state.sendZero {
+ if x {
+ state.encodeUint(1)
+ } else {
+ state.encodeUint(0)
+ }
+ }
+ }
+ return true
+}
+
+func encComplex64Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encComplex64Slice(state, v.Slice(0, v.Len()))
+}
+
+func encComplex64Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]complex64)
+ if !ok {
+ // It is kind complex64 but not type complex64. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0+0i || state.sendZero {
+ rpart := floatBits(float64(real(x)))
+ ipart := floatBits(float64(imag(x)))
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)
+ }
+ }
+ return true
+}
+
+func encComplex128Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encComplex128Slice(state, v.Slice(0, v.Len()))
+}
+
+func encComplex128Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]complex128)
+ if !ok {
+ // It is kind complex128 but not type complex128. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0+0i || state.sendZero {
+ rpart := floatBits(real(x))
+ ipart := floatBits(imag(x))
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)
+ }
+ }
+ return true
+}
+
+func encFloat32Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encFloat32Slice(state, v.Slice(0, v.Len()))
+}
+
+func encFloat32Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]float32)
+ if !ok {
+ // It is kind float32 but not type float32. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ bits := floatBits(float64(x))
+ state.encodeUint(bits)
+ }
+ }
+ return true
+}
+
+func encFloat64Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encFloat64Slice(state, v.Slice(0, v.Len()))
+}
+
+func encFloat64Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]float64)
+ if !ok {
+ // It is kind float64 but not type float64. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ bits := floatBits(x)
+ state.encodeUint(bits)
+ }
+ }
+ return true
+}
+
+func encIntArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encIntSlice(state, v.Slice(0, v.Len()))
+}
+
+func encIntSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]int)
+ if !ok {
+ // It is kind int but not type int. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeInt(int64(x))
+ }
+ }
+ return true
+}
+
+func encInt16Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encInt16Slice(state, v.Slice(0, v.Len()))
+}
+
+func encInt16Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]int16)
+ if !ok {
+ // It is kind int16 but not type int16. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeInt(int64(x))
+ }
+ }
+ return true
+}
+
+func encInt32Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encInt32Slice(state, v.Slice(0, v.Len()))
+}
+
+func encInt32Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]int32)
+ if !ok {
+ // It is kind int32 but not type int32. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeInt(int64(x))
+ }
+ }
+ return true
+}
+
+func encInt64Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encInt64Slice(state, v.Slice(0, v.Len()))
+}
+
+func encInt64Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]int64)
+ if !ok {
+ // It is kind int64 but not type int64. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeInt(x)
+ }
+ }
+ return true
+}
+
+func encInt8Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encInt8Slice(state, v.Slice(0, v.Len()))
+}
+
+func encInt8Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]int8)
+ if !ok {
+ // It is kind int8 but not type int8. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeInt(int64(x))
+ }
+ }
+ return true
+}
+
+func encStringArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encStringSlice(state, v.Slice(0, v.Len()))
+}
+
+func encStringSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]string)
+ if !ok {
+ // It is kind string but not type string. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != "" || state.sendZero {
+ state.encodeUint(uint64(len(x)))
+ state.b.WriteString(x)
+ }
+ }
+ return true
+}
+
+func encUintArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encUintSlice(state, v.Slice(0, v.Len()))
+}
+
+func encUintSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]uint)
+ if !ok {
+ // It is kind uint but not type uint. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeUint(uint64(x))
+ }
+ }
+ return true
+}
+
+func encUint16Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encUint16Slice(state, v.Slice(0, v.Len()))
+}
+
+func encUint16Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]uint16)
+ if !ok {
+ // It is kind uint16 but not type uint16. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeUint(uint64(x))
+ }
+ }
+ return true
+}
+
+func encUint32Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encUint32Slice(state, v.Slice(0, v.Len()))
+}
+
+func encUint32Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]uint32)
+ if !ok {
+ // It is kind uint32 but not type uint32. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeUint(uint64(x))
+ }
+ }
+ return true
+}
+
+func encUint64Array(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encUint64Slice(state, v.Slice(0, v.Len()))
+}
+
+func encUint64Slice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]uint64)
+ if !ok {
+ // It is kind uint64 but not type uint64. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeUint(x)
+ }
+ }
+ return true
+}
+
+func encUintptrArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return encUintptrSlice(state, v.Slice(0, v.Len()))
+}
+
+func encUintptrSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]uintptr)
+ if !ok {
+ // It is kind uintptr but not type uintptr. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != 0 || state.sendZero {
+ state.encodeUint(uint64(x))
+ }
+ }
+ return true
+}
diff --git a/src/encoding/gob/encgen.go b/src/encoding/gob/encgen.go
new file mode 100644
index 000000000..efdd92829
--- /dev/null
+++ b/src/encoding/gob/encgen.go
@@ -0,0 +1,218 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// encgen writes the helper functions for encoding. Intended to be
+// used with go generate; see the invocation in encode.go.
+
+// TODO: We could do more by being unsafe. Add a -unsafe flag?
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+)
+
+var output = flag.String("output", "enc_helpers.go", "file name to write")
+
+type Type struct {
+ lower string
+ upper string
+ zero string
+ encoder string
+}
+
+var types = []Type{
+ {
+ "bool",
+ "Bool",
+ "false",
+ `if x {
+ state.encodeUint(1)
+ } else {
+ state.encodeUint(0)
+ }`,
+ },
+ {
+ "complex64",
+ "Complex64",
+ "0+0i",
+ `rpart := floatBits(float64(real(x)))
+ ipart := floatBits(float64(imag(x)))
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)`,
+ },
+ {
+ "complex128",
+ "Complex128",
+ "0+0i",
+ `rpart := floatBits(real(x))
+ ipart := floatBits(imag(x))
+ state.encodeUint(rpart)
+ state.encodeUint(ipart)`,
+ },
+ {
+ "float32",
+ "Float32",
+ "0",
+ `bits := floatBits(float64(x))
+ state.encodeUint(bits)`,
+ },
+ {
+ "float64",
+ "Float64",
+ "0",
+ `bits := floatBits(x)
+ state.encodeUint(bits)`,
+ },
+ {
+ "int",
+ "Int",
+ "0",
+ `state.encodeInt(int64(x))`,
+ },
+ {
+ "int16",
+ "Int16",
+ "0",
+ `state.encodeInt(int64(x))`,
+ },
+ {
+ "int32",
+ "Int32",
+ "0",
+ `state.encodeInt(int64(x))`,
+ },
+ {
+ "int64",
+ "Int64",
+ "0",
+ `state.encodeInt(x)`,
+ },
+ {
+ "int8",
+ "Int8",
+ "0",
+ `state.encodeInt(int64(x))`,
+ },
+ {
+ "string",
+ "String",
+ `""`,
+ `state.encodeUint(uint64(len(x)))
+ state.b.WriteString(x)`,
+ },
+ {
+ "uint",
+ "Uint",
+ "0",
+ `state.encodeUint(uint64(x))`,
+ },
+ {
+ "uint16",
+ "Uint16",
+ "0",
+ `state.encodeUint(uint64(x))`,
+ },
+ {
+ "uint32",
+ "Uint32",
+ "0",
+ `state.encodeUint(uint64(x))`,
+ },
+ {
+ "uint64",
+ "Uint64",
+ "0",
+ `state.encodeUint(x)`,
+ },
+ {
+ "uintptr",
+ "Uintptr",
+ "0",
+ `state.encodeUint(uint64(x))`,
+ },
+ // uint8 Handled separately.
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("encgen: ")
+ flag.Parse()
+ if flag.NArg() != 0 {
+ log.Fatal("usage: encgen [--output filename]")
+ }
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "// Created by encgen --output %s; DO NOT EDIT\n", *output)
+ fmt.Fprint(&b, header)
+ printMaps(&b, "Array")
+ fmt.Fprint(&b, "\n")
+ printMaps(&b, "Slice")
+ for _, t := range types {
+ fmt.Fprintf(&b, arrayHelper, t.lower, t.upper)
+ fmt.Fprintf(&b, sliceHelper, t.lower, t.upper, t.zero, t.encoder)
+ }
+ source, err := format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal("source format error:", err)
+ }
+ fd, err := os.Create(*output)
+ _, err = fd.Write(source)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func printMaps(b *bytes.Buffer, upperClass string) {
+ fmt.Fprintf(b, "var enc%sHelper = map[reflect.Kind]encHelper{\n", upperClass)
+ for _, t := range types {
+ fmt.Fprintf(b, "reflect.%s: enc%s%s,\n", t.upper, t.upper, upperClass)
+ }
+ fmt.Fprintf(b, "}\n")
+}
+
+const header = `
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gob
+
+import (
+ "reflect"
+)
+
+`
+
+const arrayHelper = `
+func enc%[2]sArray(state *encoderState, v reflect.Value) bool {
+ // Can only slice if it is addressable.
+ if !v.CanAddr() {
+ return false
+ }
+ return enc%[2]sSlice(state, v.Slice(0, v.Len()))
+}
+`
+
+const sliceHelper = `
+func enc%[2]sSlice(state *encoderState, v reflect.Value) bool {
+ slice, ok := v.Interface().([]%[1]s)
+ if !ok {
+ // It is kind %[1]s but not type %[1]s. TODO: We can handle this unsafely.
+ return false
+ }
+ for _, x := range slice {
+ if x != %[3]s || state.sendZero {
+ %[4]s
+ }
+ }
+ return true
+}
+`
diff --git a/src/encoding/gob/encode.go b/src/encoding/gob/encode.go
index 04a85410c..f66279f14 100644
--- a/src/encoding/gob/encode.go
+++ b/src/encoding/gob/encode.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run encgen.go -output enc_helpers.go
+
package gob
import (
- "bytes"
"encoding"
"math"
"reflect"
@@ -13,20 +14,54 @@ import (
const uint64Size = 8
+type encHelper func(state *encoderState, v reflect.Value) bool
+
// encoderState is the global execution state of an instance of the encoder.
// Field numbers are delta encoded and always increase. The field
// number is initialized to -1 so 0 comes out as delta(1). A delta of
// 0 terminates the structure.
type encoderState struct {
enc *Encoder
- b *bytes.Buffer
+ b *encBuffer
sendZero bool // encoding an array element or map key/value pair; send zero values
fieldnum int // the last field number written.
buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation.
next *encoderState // for free list
}
-func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState {
+// encBuffer is an extremely simple, fast implementation of a write-only byte buffer.
+// It never returns a non-nil error, but Write returns an error value so it matches io.Writer.
+type encBuffer struct {
+ data []byte
+ scratch [64]byte
+}
+
+func (e *encBuffer) WriteByte(c byte) {
+ e.data = append(e.data, c)
+}
+
+func (e *encBuffer) Write(p []byte) (int, error) {
+ e.data = append(e.data, p...)
+ return len(p), nil
+}
+
+func (e *encBuffer) WriteString(s string) {
+ e.data = append(e.data, s...)
+}
+
+func (e *encBuffer) Len() int {
+ return len(e.data)
+}
+
+func (e *encBuffer) Bytes() []byte {
+ return e.data
+}
+
+func (e *encBuffer) Reset() {
+ e.data = e.data[0:0]
+}
+
+func (enc *Encoder) newEncoderState(b *encBuffer) *encoderState {
e := enc.freeList
if e == nil {
e = new(encoderState)
@@ -37,6 +72,9 @@ func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState {
e.sendZero = false
e.fieldnum = 0
e.b = b
+ if len(b.data) == 0 {
+ b.data = b.scratch[0:0]
+ }
return e
}
@@ -53,10 +91,7 @@ func (enc *Encoder) freeEncoderState(e *encoderState) {
// encodeUint writes an encoded unsigned integer to state.b.
func (state *encoderState) encodeUint(x uint64) {
if x <= 0x7F {
- err := state.b.WriteByte(uint8(x))
- if err != nil {
- error_(err)
- }
+ state.b.WriteByte(uint8(x))
return
}
i := uint64Size
@@ -66,10 +101,7 @@ func (state *encoderState) encodeUint(x uint64) {
i--
}
state.buf[i] = uint8(i - uint64Size) // = loop count, negated
- _, err := state.b.Write(state.buf[i : uint64Size+1])
- if err != nil {
- error_(err)
- }
+ state.b.Write(state.buf[i : uint64Size+1])
}
// encodeInt writes an encoded signed integer to state.w.
@@ -247,7 +279,7 @@ func valid(v reflect.Value) bool {
}
// encodeSingle encodes a single top-level non-struct value.
-func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, value reflect.Value) {
+func (enc *Encoder) encodeSingle(b *encBuffer, engine *encEngine, value reflect.Value) {
state := enc.newEncoderState(b)
defer enc.freeEncoderState(state)
state.fieldnum = singletonField
@@ -264,7 +296,7 @@ func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, value refle
}
// encodeStruct encodes a single struct value.
-func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, value reflect.Value) {
+func (enc *Encoder) encodeStruct(b *encBuffer, engine *encEngine, value reflect.Value) {
if !valid(value) {
return
}
@@ -291,12 +323,15 @@ func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, value refle
}
// encodeArray encodes an array.
-func (enc *Encoder) encodeArray(b *bytes.Buffer, value reflect.Value, op encOp, elemIndir int, length int) {
+func (enc *Encoder) encodeArray(b *encBuffer, value reflect.Value, op encOp, elemIndir int, length int, helper encHelper) {
state := enc.newEncoderState(b)
defer enc.freeEncoderState(state)
state.fieldnum = -1
state.sendZero = true
state.encodeUint(uint64(length))
+ if helper != nil && helper(state, value) {
+ return
+ }
for i := 0; i < length; i++ {
elem := value.Index(i)
if elemIndir > 0 {
@@ -322,7 +357,7 @@ func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir in
}
// encodeMap encodes a map as unsigned count followed by key:value pairs.
-func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) {
+func (enc *Encoder) encodeMap(b *encBuffer, mv reflect.Value, keyOp, elemOp encOp, keyIndir, elemIndir int) {
state := enc.newEncoderState(b)
state.fieldnum = -1
state.sendZero = true
@@ -340,7 +375,7 @@ func (enc *Encoder) encodeMap(b *bytes.Buffer, mv reflect.Value, keyOp, elemOp e
// by the type identifier (which might require defining that type right now), followed
// by the concrete value. A nil value gets sent as the empty string for the name,
// followed by no value.
-func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
+func (enc *Encoder) encodeInterface(b *encBuffer, iv reflect.Value) {
// Gobs can encode nil interface values but not typed interface
// values holding nil pointers, since nil pointers point to no value.
elem := iv.Elem()
@@ -364,10 +399,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
}
// Send the name.
state.encodeUint(uint64(len(name)))
- _, err := state.b.WriteString(name)
- if err != nil {
- error_(err)
- }
+ state.b.WriteString(name)
// Define the type id if necessary.
enc.sendTypeDescriptor(enc.writer(), state, ut)
// Send the type id.
@@ -375,7 +407,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
// Encode the value into a new buffer. Any nested type definitions
// should be written to b, before the encoded value.
enc.pushWriter(b)
- data := new(bytes.Buffer)
+ data := new(encBuffer)
data.Write(spaceForLength)
enc.encode(data, elem, ut)
if enc.err != nil {
@@ -384,7 +416,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv reflect.Value) {
enc.popWriter()
enc.writeMessage(b, data)
if enc.err != nil {
- error_(err)
+ error_(enc.err)
}
enc.freeEncoderState(state)
}
@@ -426,7 +458,7 @@ func isZero(val reflect.Value) bool {
// encGobEncoder encodes a value that implements the GobEncoder interface.
// The data is sent as a byte array.
-func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, ut *userTypeInfo, v reflect.Value) {
+func (enc *Encoder) encodeGobEncoder(b *encBuffer, ut *userTypeInfo, v reflect.Value) {
// TODO: should we catch panics from the called method?
var data []byte
@@ -501,19 +533,21 @@ func encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp, building map[
}
// Slices have a header; we decode it to find the underlying array.
elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building)
+ helper := encSliceHelper[t.Elem().Kind()]
op = func(i *encInstr, state *encoderState, slice reflect.Value) {
if !state.sendZero && slice.Len() == 0 {
return
}
state.update(i)
- state.enc.encodeArray(state.b, slice, *elemOp, elemIndir, slice.Len())
+ state.enc.encodeArray(state.b, slice, *elemOp, elemIndir, slice.Len(), helper)
}
case reflect.Array:
// True arrays have size in the type.
elemOp, elemIndir := encOpFor(t.Elem(), inProgress, building)
+ helper := encArrayHelper[t.Elem().Kind()]
op = func(i *encInstr, state *encoderState, array reflect.Value) {
state.update(i)
- state.enc.encodeArray(state.b, array, *elemOp, elemIndir, array.Len())
+ state.enc.encodeArray(state.b, array, *elemOp, elemIndir, array.Len(), helper)
}
case reflect.Map:
keyOp, keyIndir := encOpFor(t.Key(), inProgress, building)
@@ -644,7 +678,7 @@ func buildEncEngine(info *typeInfo, ut *userTypeInfo, building map[*typeInfo]boo
return enc
}
-func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) {
+func (enc *Encoder) encode(b *encBuffer, value reflect.Value, ut *userTypeInfo) {
defer catchError(&enc.err)
engine := getEncEngine(ut, nil)
indir := ut.indir
diff --git a/src/encoding/gob/encoder.go b/src/encoding/gob/encoder.go
index 4b5dc16c7..a340e47b5 100644
--- a/src/encoding/gob/encoder.go
+++ b/src/encoding/gob/encoder.go
@@ -5,7 +5,6 @@
package gob
import (
- "bytes"
"io"
"reflect"
"sync"
@@ -19,7 +18,7 @@ type Encoder struct {
sent map[reflect.Type]typeId // which types we've already sent
countState *encoderState // stage for writing counts
freeList *encoderState // list of free encoderStates; avoids reallocation
- byteBuf bytes.Buffer // buffer for top-level encoderState
+ byteBuf encBuffer // buffer for top-level encoderState
err error
}
@@ -34,7 +33,7 @@ func NewEncoder(w io.Writer) *Encoder {
enc := new(Encoder)
enc.w = []io.Writer{w}
enc.sent = make(map[reflect.Type]typeId)
- enc.countState = enc.newEncoderState(new(bytes.Buffer))
+ enc.countState = enc.newEncoderState(new(encBuffer))
return enc
}
@@ -60,7 +59,7 @@ func (enc *Encoder) setError(err error) {
}
// writeMessage sends the data item preceded by a unsigned count of its length.
-func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) {
+func (enc *Encoder) writeMessage(w io.Writer, b *encBuffer) {
// Space has been reserved for the length at the head of the message.
// This is a little dirty: we grab the slice from the bytes.Buffer and massage
// it by hand.
diff --git a/src/encoding/gob/timing_test.go b/src/encoding/gob/timing_test.go
index ec55c4d63..940e5ad41 100644
--- a/src/encoding/gob/timing_test.go
+++ b/src/encoding/gob/timing_test.go
@@ -131,3 +131,195 @@ func TestCountDecodeMallocs(t *testing.T) {
t.Fatalf("mallocs per decode of type Bench: %v; wanted 4\n", allocs)
}
}
+
+func BenchmarkEncodeComplex128Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]complex128, 1000)
+ for i := range a {
+ a[i] = 1.2 + 3.4i
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeFloat64Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]float64, 1000)
+ for i := range a {
+ a[i] = 1.23e4
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeInt32Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]int32, 1000)
+ for i := range a {
+ a[i] = 1234
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeStringSlice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]string, 1000)
+ for i := range a {
+ a[i] = "now is the time"
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// benchmarkBuf is a read buffer we can reset
+type benchmarkBuf struct {
+ offset int
+ data []byte
+}
+
+func (b *benchmarkBuf) Read(p []byte) (n int, err error) {
+ n = copy(p, b.data[b.offset:])
+ if n == 0 {
+ return 0, io.EOF
+ }
+ b.offset += n
+ return
+}
+
+func (b *benchmarkBuf) ReadByte() (c byte, err error) {
+ if b.offset >= len(b.data) {
+ return 0, io.EOF
+ }
+ c = b.data[b.offset]
+ b.offset++
+ return
+}
+
+func (b *benchmarkBuf) reset() {
+ b.offset = 0
+}
+
+func BenchmarkDecodeComplex128Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]complex128, 1000)
+ for i := range a {
+ a[i] = 1.2 + 3.4i
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ x := make([]complex128, 1000)
+ bbuf := benchmarkBuf{data: buf.Bytes()}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bbuf.reset()
+ dec := NewDecoder(&bbuf)
+ err := dec.Decode(&x)
+ if err != nil {
+ b.Fatal(i, err)
+ }
+ }
+}
+
+func BenchmarkDecodeFloat64Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]float64, 1000)
+ for i := range a {
+ a[i] = 1.23e4
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ x := make([]float64, 1000)
+ bbuf := benchmarkBuf{data: buf.Bytes()}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bbuf.reset()
+ dec := NewDecoder(&bbuf)
+ err := dec.Decode(&x)
+ if err != nil {
+ b.Fatal(i, err)
+ }
+ }
+}
+
+func BenchmarkDecodeInt32Slice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]int32, 1000)
+ for i := range a {
+ a[i] = 1234
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ x := make([]int32, 1000)
+ bbuf := benchmarkBuf{data: buf.Bytes()}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bbuf.reset()
+ dec := NewDecoder(&bbuf)
+ err := dec.Decode(&x)
+ if err != nil {
+ b.Fatal(i, err)
+ }
+ }
+}
+
+func BenchmarkDecodeStringSlice(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ a := make([]string, 1000)
+ for i := range a {
+ a[i] = "now is the time"
+ }
+ err := enc.Encode(a)
+ if err != nil {
+ b.Fatal(err)
+ }
+ x := make([]string, 1000)
+ bbuf := benchmarkBuf{data: buf.Bytes()}
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bbuf.reset()
+ dec := NewDecoder(&bbuf)
+ err := dec.Decode(&x)
+ if err != nil {
+ b.Fatal(i, err)
+ }
+ }
+}
diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go
index 9b7b9d5fd..fca2a0980 100644
--- a/src/encoding/json/encode.go
+++ b/src/encoding/json/encode.go
@@ -805,6 +805,9 @@ func (e *encodeState) string(s string) (int, error) {
case '\r':
e.WriteByte('\\')
e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n and \r,
// as well as <, > and &. The latter are escaped because they
@@ -878,9 +881,12 @@ func (e *encodeState) stringBytes(s []byte) (int, error) {
case '\r':
e.WriteByte('\\')
e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n and \r,
- // as well as < and >. The latter are escaped because they
+ // as well as <, >, and &. The latter are escaped because they
// can lead to security holes when user-controlled strings
// are rendered into JSON and served to some browsers.
e.WriteString(`\u00`)
diff --git a/src/encoding/json/encode_test.go b/src/encoding/json/encode_test.go
index eb84cbae1..7abfa85db 100644
--- a/src/encoding/json/encode_test.go
+++ b/src/encoding/json/encode_test.go
@@ -478,3 +478,55 @@ func TestEncodePointerString(t *testing.T) {
t.Fatalf("*N = %d; want 42", *back.N)
}
}
+
+var encodeStringTests = []struct {
+ in string
+ out string
+}{
+ {"\x00", `"\u0000"`},
+ {"\x01", `"\u0001"`},
+ {"\x02", `"\u0002"`},
+ {"\x03", `"\u0003"`},
+ {"\x04", `"\u0004"`},
+ {"\x05", `"\u0005"`},
+ {"\x06", `"\u0006"`},
+ {"\x07", `"\u0007"`},
+ {"\x08", `"\u0008"`},
+ {"\x09", `"\t"`},
+ {"\x0a", `"\n"`},
+ {"\x0b", `"\u000b"`},
+ {"\x0c", `"\u000c"`},
+ {"\x0d", `"\r"`},
+ {"\x0e", `"\u000e"`},
+ {"\x0f", `"\u000f"`},
+ {"\x10", `"\u0010"`},
+ {"\x11", `"\u0011"`},
+ {"\x12", `"\u0012"`},
+ {"\x13", `"\u0013"`},
+ {"\x14", `"\u0014"`},
+ {"\x15", `"\u0015"`},
+ {"\x16", `"\u0016"`},
+ {"\x17", `"\u0017"`},
+ {"\x18", `"\u0018"`},
+ {"\x19", `"\u0019"`},
+ {"\x1a", `"\u001a"`},
+ {"\x1b", `"\u001b"`},
+ {"\x1c", `"\u001c"`},
+ {"\x1d", `"\u001d"`},
+ {"\x1e", `"\u001e"`},
+ {"\x1f", `"\u001f"`},
+}
+
+func TestEncodeString(t *testing.T) {
+ for _, tt := range encodeStringTests {
+ b, err := Marshal(tt.in)
+ if err != nil {
+ t.Errorf("Marshal(%q): %v", tt.in, err)
+ continue
+ }
+ out := string(b)
+ if out != tt.out {
+ t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
+ }
+ }
+}
diff --git a/src/flag/flag.go b/src/flag/flag.go
index 323e452a8..60aef5d80 100644
--- a/src/flag/flag.go
+++ b/src/flag/flag.go
@@ -760,6 +760,7 @@ func (f *FlagSet) parseOne() (bool, error) {
}
return false, f.failf("flag provided but not defined: -%s", name)
}
+
if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
if has_value {
if err := fv.Set(value); err != nil {
diff --git a/src/fmt/doc.go b/src/fmt/doc.go
index 304b9e958..ee54463e2 100644
--- a/src/fmt/doc.go
+++ b/src/fmt/doc.go
@@ -38,8 +38,8 @@
%E scientific notation, e.g. -1234.456E+78
%f decimal point but no exponent, e.g. 123.456
%F synonym for %f
- %g whichever of %e or %f produces more compact output
- %G whichever of %E or %f produces more compact output
+ %g %e for large exponents, %f otherwise
+ %G %E for large exponents, %F otherwise
String and slice of bytes:
%s the uninterpreted bytes of the string or slice
%q a double-quoted string safely escaped with Go syntax
diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go
index 311e1d2c4..7af7997de 100644
--- a/src/html/template/js_test.go
+++ b/src/html/template/js_test.go
@@ -138,7 +138,7 @@ func TestJSValEscaper(t *testing.T) {
// Newlines.
{"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`},
// "\v" == "v" on IE 6 so use "\x0b" instead.
- {"\t\x0b", `"\u0009\u000b"`},
+ {"\t\x0b", `"\t\u000b"`},
{struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`},
{[]interface{}{}, "[]"},
{[]interface{}{42, "foo", nil}, `[42,"foo",null]`},
diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go
index 0c7548e3e..a23f1bc4b 100644
--- a/src/net/http/pprof/pprof.go
+++ b/src/net/http/pprof/pprof.go
@@ -162,6 +162,10 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Unknown profile: %s\n", name)
return
}
+ gc, _ := strconv.Atoi(r.FormValue("gc"))
+ if name == "heap" && gc > 0 {
+ runtime.GC()
+ }
p.WriteTo(w, debug)
return
}
diff --git a/src/net/lookup.go b/src/net/lookup.go
index 20f20578c..aeffe6c9b 100644
--- a/src/net/lookup.go
+++ b/src/net/lookup.go
@@ -40,10 +40,16 @@ func lookupIPMerge(host string) (addrs []IP, err error) {
addrsi, err, shared := lookupGroup.Do(host, func() (interface{}, error) {
return lookupIP(host)
})
+ return lookupIPReturn(addrsi, err, shared)
+}
+
+// lookupIPReturn turns the return values from singleflight.Do into
+// the return values from LookupIP.
+func lookupIPReturn(addrsi interface{}, err error, shared bool) ([]IP, error) {
if err != nil {
return nil, err
}
- addrs = addrsi.([]IP)
+ addrs := addrsi.([]IP)
if shared {
clone := make([]IP, len(addrs))
copy(clone, addrs)
@@ -52,41 +58,40 @@ func lookupIPMerge(host string) (addrs []IP, err error) {
return addrs, nil
}
+// lookupIPDeadline looks up a hostname with a deadline.
func lookupIPDeadline(host string, deadline time.Time) (addrs []IP, err error) {
if deadline.IsZero() {
return lookupIPMerge(host)
}
- // TODO(bradfitz): consider pushing the deadline down into the
- // name resolution functions. But that involves fixing it for
- // the native Go resolver, cgo, Windows, etc.
- //
- // In the meantime, just use a goroutine. Most users affected
- // by http://golang.org/issue/2631 are due to TCP connections
- // to unresponsive hosts, not DNS.
+ // We could push the deadline down into the name resolution
+ // functions. However, the most commonly used implementation
+ // calls getaddrinfo, which has no timeout.
+
timeout := deadline.Sub(time.Now())
if timeout <= 0 {
- err = errTimeout
- return
+ return nil, errTimeout
}
t := time.NewTimer(timeout)
defer t.Stop()
- type res struct {
- addrs []IP
- err error
- }
- resc := make(chan res, 1)
- go func() {
- a, err := lookupIPMerge(host)
- resc <- res{a, err}
- }()
+
+ ch := lookupGroup.DoChan(host, func() (interface{}, error) {
+ return lookupIP(host)
+ })
+
select {
case <-t.C:
- err = errTimeout
- case r := <-resc:
- addrs, err = r.addrs, r.err
+ // The DNS lookup timed out for some reason. Force
+ // future requests to start the DNS lookup again
+ // rather than waiting for the current lookup to
+ // complete. See issue 8602.
+ lookupGroup.Forget(host)
+
+ return nil, errTimeout
+
+ case r := <-ch:
+ return lookupIPReturn(r.v, r.err, r.shared)
}
- return
}
// LookupPort looks up the port for the given network and service.
diff --git a/src/net/singleflight.go b/src/net/singleflight.go
index dc58affda..bf599f0cc 100644
--- a/src/net/singleflight.go
+++ b/src/net/singleflight.go
@@ -8,10 +8,18 @@ import "sync"
// call is an in-flight or completed singleflight.Do call
type call struct {
- wg sync.WaitGroup
- val interface{}
- err error
- dups int
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- singleflightResult
}
// singleflight represents a class of work and forms a namespace in
@@ -21,6 +29,14 @@ type singleflight struct {
m map[string]*call // lazily initialized
}
+// singleflightResult holds the results of Do, so they can be passed
+// on a channel.
+type singleflightResult struct {
+ v interface{}
+ err error
+ shared bool
+}
+
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
@@ -42,12 +58,52 @@ func (g *singleflight) Do(key string, fn func() (interface{}, error)) (v interfa
g.m[key] = c
g.mu.Unlock()
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+func (g *singleflight) DoChan(key string, fn func() (interface{}, error)) <-chan singleflightResult {
+ ch := make(chan singleflightResult, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- singleflightResult{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *singleflight) doCall(c *call, key string, fn func() (interface{}, error)) {
c.val, c.err = fn()
c.wg.Done()
g.mu.Lock()
delete(g.m, key)
+ for _, ch := range c.chans {
+ ch <- singleflightResult{c.val, c.err, c.dups > 0}
+ }
g.mu.Unlock()
+}
- return c.val, c.err, c.dups > 0
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *singleflight) Forget(key string) {
+ g.mu.Lock()
+ delete(g.m, key)
+ g.mu.Unlock()
}
diff --git a/src/net/z_last_test.go b/src/net/z_last_test.go
index 4f6a54a56..716c103db 100644
--- a/src/net/z_last_test.go
+++ b/src/net/z_last_test.go
@@ -8,6 +8,7 @@ import (
"flag"
"fmt"
"testing"
+ "time"
)
var testDNSFlood = flag.Bool("dnsflood", false, "whether to test dns query flooding")
@@ -35,3 +36,64 @@ func TestDNSThreadLimit(t *testing.T) {
// If we're still here, it worked.
}
+
+func TestLookupIPDeadline(t *testing.T) {
+ if !*testDNSFlood {
+ t.Skip("test disabled; use -dnsflood to enable")
+ }
+
+ const N = 5000
+ const timeout = 3 * time.Second
+ c := make(chan error, 2*N)
+ for i := 0; i < N; i++ {
+ name := fmt.Sprintf("%d.net-test.golang.org", i)
+ go func() {
+ _, err := lookupIPDeadline(name, time.Now().Add(timeout/2))
+ c <- err
+ }()
+ go func() {
+ _, err := lookupIPDeadline(name, time.Now().Add(timeout))
+ c <- err
+ }()
+ }
+ qstats := struct {
+ succeeded, failed int
+ timeout, temporary, other int
+ unknown int
+ }{}
+ deadline := time.After(timeout + time.Second)
+ for i := 0; i < 2*N; i++ {
+ select {
+ case <-deadline:
+ t.Fatal("deadline exceeded")
+ case err := <-c:
+ switch err := err.(type) {
+ case nil:
+ qstats.succeeded++
+ case Error:
+ qstats.failed++
+ if err.Timeout() {
+ qstats.timeout++
+ }
+ if err.Temporary() {
+ qstats.temporary++
+ }
+ if !err.Timeout() && !err.Temporary() {
+ qstats.other++
+ }
+ default:
+ qstats.failed++
+ qstats.unknown++
+ }
+ }
+ }
+
+ // A high volume of DNS queries for sub-domain of golang.org
+ // would be coordinated by authoritative or recursive server,
+ // or stub resolver which implements query-response rate
+ // limitation, so we can expect some query successes and more
+ // failures including timeout, temporary and other here.
+ // As a rule, unknown must not be shown but it might possibly
+ // happen due to issue 4856 for now.
+ t.Logf("%v succeeded, %v failed (%v timeout, %v temporary, %v other, %v unknown)", qstats.succeeded, qstats.failed, qstats.timeout, qstats.temporary, qstats.other, qstats.unknown)
+}
diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go
index d353e405e..589db8527 100644
--- a/src/os/dir_unix.go
+++ b/src/os/dir_unix.go
@@ -36,7 +36,7 @@ func (f *File) readdirnames(n int) (names []string, err error) {
if d.bufp >= d.nbuf {
d.bufp = 0
var errno error
- d.nbuf, errno = syscall.ReadDirent(f.fd, d.buf)
+ d.nbuf, errno = fixCount(syscall.ReadDirent(f.fd, d.buf))
if errno != nil {
return names, NewSyscallError("readdirent", errno)
}
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index 5fd439b8b..bc9c00eff 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -258,15 +258,7 @@ var testedAlreadyLeaked = false
// basefds returns the number of expected file descriptors
// to be present in a process at start.
func basefds() uintptr {
- n := os.Stderr.Fd() + 1
-
- // Go runtime for 32-bit Plan 9 requires that /dev/bintime
- // be kept open.
- // See ../../runtime/time_plan9_386.c:/^runtime·nanotime
- if runtime.GOOS == "plan9" && runtime.GOARCH == "386" {
- n++
- }
- return n
+ return os.Stderr.Fd() + 1
}
func closeUnexpectedFds(t *testing.T, m string) {
diff --git a/src/os/file.go b/src/os/file.go
index b4a745801..e12428cbe 100644
--- a/src/os/file.go
+++ b/src/os/file.go
@@ -255,3 +255,12 @@ var lstat = Lstat
func Rename(oldpath, newpath string) error {
return rename(oldpath, newpath)
}
+
+// Many functions in package syscall return a count of -1 instead of 0.
+// Using fixCount(call()) instead of call() corrects the count.
+func fixCount(n int, err error) (int, error) {
+ if n < 0 {
+ n = 0
+ }
+ return n, err
+}
diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go
index a804b8197..5efc2a4f1 100644
--- a/src/os/file_plan9.go
+++ b/src/os/file_plan9.go
@@ -244,14 +244,14 @@ func (f *File) Sync() (err error) {
// read reads up to len(b) bytes from the File.
// It returns the number of bytes read and an error, if any.
func (f *File) read(b []byte) (n int, err error) {
- return syscall.Read(f.fd, b)
+ return fixCount(syscall.Read(f.fd, b))
}
// pread reads len(b) bytes from the File starting at byte offset off.
// It returns the number of bytes read and the error, if any.
// EOF is signaled by a zero count with err set to nil.
func (f *File) pread(b []byte, off int64) (n int, err error) {
- return syscall.Pread(f.fd, b, off)
+ return fixCount(syscall.Pread(f.fd, b, off))
}
// write writes len(b) bytes to the File.
@@ -262,7 +262,7 @@ func (f *File) write(b []byte) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
- return syscall.Write(f.fd, b)
+ return fixCount(syscall.Write(f.fd, b))
}
// pwrite writes len(b) bytes to the File starting at byte offset off.
@@ -273,7 +273,7 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) {
if len(b) == 0 {
return 0, nil
}
- return syscall.Pwrite(f.fd, b, off)
+ return fixCount(syscall.Pwrite(f.fd, b, off))
}
// seek sets the offset for the next Read or Write on file to offset, interpreted
diff --git a/src/os/file_posix.go b/src/os/file_posix.go
index 9cff7e5bc..fbb3b5e4d 100644
--- a/src/os/file_posix.go
+++ b/src/os/file_posix.go
@@ -18,7 +18,7 @@ func sigpipe() // implemented in package runtime
func Readlink(name string) (string, error) {
for len := 128; ; len *= 2 {
b := make([]byte, len)
- n, e := syscall.Readlink(name, b)
+ n, e := fixCount(syscall.Readlink(name, b))
if e != nil {
return "", &PathError{"readlink", name, e}
}
diff --git a/src/os/file_unix.go b/src/os/file_unix.go
index bba0d9c0f..f59d563e6 100644
--- a/src/os/file_unix.go
+++ b/src/os/file_unix.go
@@ -187,7 +187,7 @@ func (f *File) read(b []byte) (n int, err error) {
if needsMaxRW && len(b) > maxRW {
b = b[:maxRW]
}
- return syscall.Read(f.fd, b)
+ return fixCount(syscall.Read(f.fd, b))
}
// pread reads len(b) bytes from the File starting at byte offset off.
@@ -197,7 +197,7 @@ func (f *File) pread(b []byte, off int64) (n int, err error) {
if needsMaxRW && len(b) > maxRW {
b = b[:maxRW]
}
- return syscall.Pread(f.fd, b, off)
+ return fixCount(syscall.Pread(f.fd, b, off))
}
// write writes len(b) bytes to the File.
@@ -208,7 +208,7 @@ func (f *File) write(b []byte) (n int, err error) {
if needsMaxRW && len(bcap) > maxRW {
bcap = bcap[:maxRW]
}
- m, err := syscall.Write(f.fd, bcap)
+ m, err := fixCount(syscall.Write(f.fd, bcap))
n += m
// If the syscall wrote some data but not all (short write)
@@ -234,7 +234,7 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) {
if needsMaxRW && len(b) > maxRW {
b = b[:maxRW]
}
- return syscall.Pwrite(f.fd, b, off)
+ return fixCount(syscall.Pwrite(f.fd, b, off))
}
// seek sets the offset for the next Read or Write on file to offset, interpreted
diff --git a/src/os/file_windows.go b/src/os/file_windows.go
index e78d4abf6..3b5519390 100644
--- a/src/os/file_windows.go
+++ b/src/os/file_windows.go
@@ -295,7 +295,7 @@ func (f *File) read(b []byte) (n int, err error) {
if f.isConsole {
return f.readConsole(b)
}
- return syscall.Read(f.fd, b)
+ return fixCount(syscall.Read(f.fd, b))
}
// pread reads len(b) bytes from the File starting at byte offset off.
@@ -376,7 +376,7 @@ func (f *File) write(b []byte) (n int, err error) {
if f.isConsole {
return f.writeConsole(b)
}
- return syscall.Write(f.fd, b)
+ return fixCount(syscall.Write(f.fd, b))
}
// pwrite writes len(b) bytes to the File starting at byte offset off.
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index 6bdc9be9d..268a9e319 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -2502,10 +2502,21 @@ func TestAllocations(t *testing.T) {
noAlloc(t, 100, func(j int) {
var i interface{}
var v Value
- i = 42 + j
+
+ // We can uncomment this when compiler escape analysis
+ // is good enough to see that the integer assigned to i
+ // does not escape and therefore need not be allocated.
+ //
+ // i = 42 + j
+ // v = ValueOf(i)
+ // if int(v.Int()) != 42+j {
+ // panic("wrong int")
+ // }
+
+ i = func(j int) int { return j }
v = ValueOf(i)
- if int(v.Int()) != 42+j {
- panic("wrong int")
+ if v.Interface().(func(int) int)(j) != j {
+ panic("wrong result")
}
})
}
@@ -2678,6 +2689,26 @@ func TestFuncArg(t *testing.T) {
}
}
+func TestStructArg(t *testing.T) {
+ type padded struct {
+ B string
+ C int32
+ }
+ var (
+ gotA padded
+ gotB uint32
+ wantA = padded{"3", 4}
+ wantB = uint32(5)
+ )
+ f := func(a padded, b uint32) {
+ gotA, gotB = a, b
+ }
+ ValueOf(f).Call([]Value{ValueOf(wantA), ValueOf(wantB)})
+ if gotA != wantA || gotB != wantB {
+ t.Errorf("function called with (%v, %v), want (%v, %v)", gotA, gotB, wantA, wantB)
+ }
+}
+
var tagGetTests = []struct {
Tag StructTag
Key string
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index 1072c7fab..d89f7f681 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -60,7 +60,7 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
impl := &makeFuncImpl{code: code, stack: stack, typ: ftyp, fn: fn}
- return Value{t, unsafe.Pointer(impl), flag(Func) << flagKindShift}
+ return Value{t, unsafe.Pointer(impl), flag(Func)}
}
// makeFuncStub is an assembly function that is the code half of
@@ -91,7 +91,7 @@ func makeMethodValue(op string, v Value) Value {
// Ignoring the flagMethod bit, v describes the receiver, not the method type.
fl := v.flag & (flagRO | flagAddr | flagIndir)
- fl |= flag(v.typ.Kind()) << flagKindShift
+ fl |= flag(v.typ.Kind())
rcvr := Value{v.typ, v.ptr, fl}
// v.Type returns the actual type of the method value.
@@ -118,7 +118,7 @@ func makeMethodValue(op string, v Value) Value {
// but we want Interface() and other operations to fail early.
methodReceiver(op, fv.rcvr, fv.method)
- return Value{funcType, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)<<flagKindShift}
+ return Value{funcType, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
}
// methodValueCall is an assembly function that is the code half of
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 26328e74b..2064922f6 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -490,7 +490,7 @@ func (t *uncommonType) Method(i int) (m Method) {
if p.name != nil {
m.Name = *p.name
}
- fl := flag(Func) << flagKindShift
+ fl := flag(Func)
if p.pkgPath != nil {
m.PkgPath = *p.pkgPath
fl |= flagRO
@@ -1540,6 +1540,7 @@ func (gc *gcProg) appendProg(t *rtype) {
for i := 0; i < c; i++ {
gc.appendProg(t.Field(i).Type.common())
}
+ gc.align(uintptr(t.align))
}
}
diff --git a/src/reflect/value.go b/src/reflect/value.go
index c6e8038eb..43843e963 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -61,18 +61,17 @@ type Value struct {
type flag uintptr
const (
- flagRO flag = 1 << iota
- flagIndir
- flagAddr
- flagMethod
- flagKindShift = iota
flagKindWidth = 5 // there are 27 kinds
flagKindMask flag = 1<<flagKindWidth - 1
- flagMethodShift = flagKindShift + flagKindWidth
+ flagRO flag = 1 << 5
+ flagIndir flag = 1 << 6
+ flagAddr flag = 1 << 7
+ flagMethod flag = 1 << 8
+ flagMethodShift = 9
)
func (f flag) kind() Kind {
- return Kind((f >> flagKindShift) & flagKindMask)
+ return Kind(f & flagKindMask)
}
// pointer returns the underlying pointer represented by v.
@@ -107,14 +106,14 @@ func packEface(v Value) interface{} {
memmove(c, ptr, t.size)
ptr = c
}
- e.word = iword(ptr)
+ e.word = ptr
case v.flag&flagIndir != 0:
// Value is indirect, but interface is direct. We need
// to load the data at v.ptr into the interface data word.
- e.word = iword(*(*unsafe.Pointer)(v.ptr))
+ e.word = *(*unsafe.Pointer)(v.ptr)
default:
// Value is direct, and so is the interface.
- e.word = iword(v.ptr)
+ e.word = v.ptr
}
// Now, fill in the type portion. We're very careful here not
// to have any operation between the e.word and e.typ assignments
@@ -132,7 +131,7 @@ func unpackEface(i interface{}) Value {
if t == nil {
return Value{}
}
- f := flag(t.Kind()) << flagKindShift
+ f := flag(t.Kind())
if ifaceIndir(t) {
f |= flagIndir
}
@@ -165,20 +164,10 @@ func methodName() string {
return f.Name()
}
-// An iword is the word that would be stored in an
-// interface to represent a given value v. Specifically, if v is
-// bigger than a pointer, its word is a pointer to v's data.
-// Otherwise, its word holds the data stored
-// in its leading bytes (so is not a pointer).
-// This type is very dangerous for the garbage collector because
-// it must be treated conservatively. We try to never expose it
-// to the GC here so that GC remains precise.
-type iword unsafe.Pointer
-
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ *rtype
- word iword
+ word unsafe.Pointer
}
// nonEmptyInterface is the header for a interface value with methods.
@@ -192,7 +181,7 @@ type nonEmptyInterface struct {
unused int32
fun [100000]unsafe.Pointer // method table
}
- word iword
+ word unsafe.Pointer
}
// mustBe panics if f's kind is not expected.
@@ -202,9 +191,8 @@ type nonEmptyInterface struct {
// v.flag.mustBe(Bool), which will only bother to copy the
// single important word for the receiver.
func (f flag) mustBe(expected Kind) {
- k := f.kind()
- if k != expected {
- panic(&ValueError{methodName(), k})
+ if f.kind() != expected {
+ panic(&ValueError{methodName(), f.kind()})
}
}
@@ -244,7 +232,7 @@ func (v Value) Addr() Value {
if v.flag&flagAddr == 0 {
panic("reflect.Value.Addr of unaddressable value")
}
- return Value{v.typ.ptrTo(), v.ptr, (v.flag & flagRO) | flag(Ptr)<<flagKindShift}
+ return Value{v.typ.ptrTo(), v.ptr, (v.flag & flagRO) | flag(Ptr)}
}
// Bool returns v's underlying value.
@@ -418,7 +406,7 @@ func (v Value) call(op string, in []Value) []Value {
off = (off + a - 1) &^ (a - 1)
n := targ.size
addr := unsafe.Pointer(uintptr(args) + off)
- v = v.assignTo("reflect.Value.Call", targ, (*interface{})(addr))
+ v = v.assignTo("reflect.Value.Call", targ, addr)
if v.flag&flagIndir != 0 {
memmove(addr, v.ptr, n)
} else {
@@ -442,7 +430,7 @@ func (v Value) call(op string, in []Value) []Value {
tv := t.Out(i)
a := uintptr(tv.Align())
off = (off + a - 1) &^ (a - 1)
- fl := flagIndir | flag(tv.Kind())<<flagKindShift
+ fl := flagIndir | flag(tv.Kind())
ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
off += tv.Size()
}
@@ -474,7 +462,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
typ := arg
off += -off & uintptr(typ.align-1)
addr := unsafe.Pointer(uintptr(ptr) + off)
- v := Value{typ, nil, flag(typ.Kind()) << flagKindShift}
+ v := Value{typ, nil, flag(typ.Kind())}
if ifaceIndir(typ) {
// value cannot be inlined in interface data.
// Must make a copy, because f might keep a reference to it,
@@ -537,7 +525,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
i := methodIndex
if v.typ.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.methods) {
+ if uint(i) >= uint(len(tt.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
@@ -554,7 +542,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn
} else {
rcvrtype = v.typ
ut := v.typ.uncommon()
- if ut == nil || i < 0 || i >= len(ut.methods) {
+ if ut == nil || uint(i) >= uint(len(ut.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
@@ -652,7 +640,7 @@ func (v Value) Cap() int {
// Slice is always bigger than a word; assume flagIndir.
return (*sliceHeader)(v.ptr).Cap
}
- panic(&ValueError{"reflect.Value.Cap", k})
+ panic(&ValueError{"reflect.Value.Cap", v.kind()})
}
// Close closes the channel v.
@@ -673,7 +661,7 @@ func (v Value) Complex() complex128 {
case Complex128:
return *(*complex128)(v.ptr)
}
- panic(&ValueError{"reflect.Value.Complex", k})
+ panic(&ValueError{"reflect.Value.Complex", v.kind()})
}
// Elem returns the value that the interface v contains
@@ -709,42 +697,37 @@ func (v Value) Elem() Value {
tt := (*ptrType)(unsafe.Pointer(v.typ))
typ := tt.elem
fl := v.flag&flagRO | flagIndir | flagAddr
- fl |= flag(typ.Kind() << flagKindShift)
+ fl |= flag(typ.Kind())
return Value{typ, ptr, fl}
}
- panic(&ValueError{"reflect.Value.Elem", k})
+ panic(&ValueError{"reflect.Value.Elem", v.kind()})
}
// Field returns the i'th field of the struct v.
// It panics if v's Kind is not Struct or i is out of range.
func (v Value) Field(i int) Value {
- v.mustBe(Struct)
+ if v.kind() != Struct {
+ panic(&ValueError{"reflect.Value.Field", v.kind()})
+ }
tt := (*structType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.fields) {
+ if uint(i) >= uint(len(tt.fields)) {
panic("reflect: Field index out of range")
}
field := &tt.fields[i]
typ := field.typ
// Inherit permission bits from v.
- fl := v.flag & (flagRO | flagIndir | flagAddr)
+ fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind())
// Using an unexported field forces flagRO.
if field.pkgPath != nil {
fl |= flagRO
}
- fl |= flag(typ.Kind()) << flagKindShift
-
- var ptr unsafe.Pointer
- if fl&flagIndir != 0 {
- // Indirect. Just bump pointer.
- ptr = unsafe.Pointer(uintptr(v.ptr) + field.offset)
- } else {
- if field.offset != 0 {
- panic("field access of ptr value isn't at offset 0")
- }
- ptr = v.ptr
- }
-
+ // Either flagIndir is set and v.ptr points at struct,
+ // or flagIndir is not set and v.ptr is the actual struct data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be have field.offset = 0,
+ // so v.ptr + field.offset is still okay.
+ ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset)
return Value{typ, ptr, fl}
}
@@ -785,7 +768,6 @@ func (v Value) FieldByName(name string) Value {
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
- v.mustBe(Struct)
if f, ok := v.typ.FieldByNameFunc(match); ok {
return v.FieldByIndex(f.Index)
}
@@ -802,7 +784,7 @@ func (v Value) Float() float64 {
case Float64:
return *(*float64)(v.ptr)
}
- panic(&ValueError{"reflect.Value.Float", k})
+ panic(&ValueError{"reflect.Value.Float", v.kind()})
}
var uint8Type = TypeOf(uint8(0)).(*rtype)
@@ -810,60 +792,47 @@ var uint8Type = TypeOf(uint8(0)).(*rtype)
// Index returns v's i'th element.
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
- k := v.kind()
- switch k {
+ switch v.kind() {
case Array:
tt := (*arrayType)(unsafe.Pointer(v.typ))
- if i < 0 || i > int(tt.len) {
+ if uint(i) >= uint(tt.len) {
panic("reflect: array index out of range")
}
typ := tt.elem
- fl := v.flag & (flagRO | flagIndir | flagAddr) // bits same as overall array
- fl |= flag(typ.Kind()) << flagKindShift
offset := uintptr(i) * typ.size
- var val unsafe.Pointer
- if fl&flagIndir != 0 {
- // Indirect. Just bump pointer.
- val = unsafe.Pointer(uintptr(v.ptr) + offset)
- } else {
- if offset != 0 {
- // This is an array stored inline in an interface value.
- // And the array element type has pointers.
- // Since the inline storage space is only a single word,
- // this implies we must be holding an array of length 1
- // with an element type that is a single pointer.
- // If the offset is not 0, something has gone wrong.
- panic("reflect: internal error: unexpected array index")
- }
- val = v.ptr
- }
+ // Either flagIndir is set and v.ptr points at array,
+ // or flagIndir is not set and v.ptr is the actual array data.
+ // In the former case, we want v.ptr + offset.
+ // In the latter case, we must be doing Index(0), so offset = 0,
+ // so v.ptr + offset is still okay.
+ val := unsafe.Pointer(uintptr(v.ptr) + offset)
+ fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind()) // bits same as overall array
return Value{typ, val, fl}
case Slice:
// Element flag same as Elem of Ptr.
// Addressable, indirect, possibly read-only.
- fl := flagAddr | flagIndir | v.flag&flagRO
s := (*sliceHeader)(v.ptr)
- if i < 0 || i >= s.Len {
+ if uint(i) >= uint(s.Len) {
panic("reflect: slice index out of range")
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
typ := tt.elem
- fl |= flag(typ.Kind()) << flagKindShift
val := unsafe.Pointer(uintptr(s.Data) + uintptr(i)*typ.size)
+ fl := flagAddr | flagIndir | v.flag&flagRO | flag(typ.Kind())
return Value{typ, val, fl}
case String:
- fl := v.flag&flagRO | flag(Uint8<<flagKindShift) | flagIndir
s := (*stringHeader)(v.ptr)
- if i < 0 || i >= s.Len {
+ if uint(i) >= uint(s.Len) {
panic("reflect: string index out of range")
}
p := unsafe.Pointer(uintptr(s.Data) + uintptr(i))
+ fl := v.flag&flagRO | flag(Uint8) | flagIndir
return Value{uint8Type, p, fl}
}
- panic(&ValueError{"reflect.Value.Index", k})
+ panic(&ValueError{"reflect.Value.Index", v.kind()})
}
// Int returns v's underlying value, as an int64.
@@ -883,7 +852,7 @@ func (v Value) Int() int64 {
case Int64:
return int64(*(*int64)(p))
}
- panic(&ValueError{"reflect.Value.Int", k})
+ panic(&ValueError{"reflect.Value.Int", v.kind()})
}
// CanInterface returns true if Interface can be used without panicking.
@@ -970,7 +939,7 @@ func (v Value) IsNil() bool {
// Both are always bigger than a word; assume flagIndir.
return *(*unsafe.Pointer)(v.ptr) == nil
}
- panic(&ValueError{"reflect.Value.IsNil", k})
+ panic(&ValueError{"reflect.Value.IsNil", v.kind()})
}
// IsValid returns true if v represents a value.
@@ -1007,7 +976,7 @@ func (v Value) Len() int {
// String is bigger than a word; assume flagIndir.
return (*stringHeader)(v.ptr).Len
}
- panic(&ValueError{"reflect.Value.Len", k})
+ panic(&ValueError{"reflect.Value.Len", v.kind()})
}
// MapIndex returns the value associated with key in the map v.
@@ -1039,7 +1008,7 @@ func (v Value) MapIndex(key Value) Value {
}
typ := tt.elem
fl := (v.flag | key.flag) & flagRO
- fl |= flag(typ.Kind()) << flagKindShift
+ fl |= flag(typ.Kind())
if ifaceIndir(typ) {
// Copy result so future changes to the map
// won't change the underlying value.
@@ -1060,7 +1029,7 @@ func (v Value) MapKeys() []Value {
tt := (*mapType)(unsafe.Pointer(v.typ))
keyType := tt.key
- fl := v.flag&flagRO | flag(keyType.Kind())<<flagKindShift
+ fl := v.flag&flagRO | flag(keyType.Kind())
m := v.pointer()
mlen := int(0)
@@ -1100,14 +1069,14 @@ func (v Value) Method(i int) Value {
if v.typ == nil {
panic(&ValueError{"reflect.Value.Method", Invalid})
}
- if v.flag&flagMethod != 0 || i < 0 || i >= v.typ.NumMethod() {
+ if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
panic("reflect: Method index out of range")
}
if v.typ.Kind() == Interface && v.IsNil() {
panic("reflect: Method on nil interface value")
}
fl := v.flag & (flagRO | flagIndir)
- fl |= flag(Func) << flagKindShift
+ fl |= flag(Func)
fl |= flag(i)<<flagMethodShift | flagMethod
return Value{v.typ, v.ptr, fl}
}
@@ -1160,7 +1129,7 @@ func (v Value) OverflowComplex(x complex128) bool {
case Complex128:
return false
}
- panic(&ValueError{"reflect.Value.OverflowComplex", k})
+ panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
}
// OverflowFloat returns true if the float64 x cannot be represented by v's type.
@@ -1173,7 +1142,7 @@ func (v Value) OverflowFloat(x float64) bool {
case Float64:
return false
}
- panic(&ValueError{"reflect.Value.OverflowFloat", k})
+ panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
}
func overflowFloat32(x float64) bool {
@@ -1193,7 +1162,7 @@ func (v Value) OverflowInt(x int64) bool {
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
- panic(&ValueError{"reflect.Value.OverflowInt", k})
+ panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
}
// OverflowUint returns true if the uint64 x cannot be represented by v's type.
@@ -1206,7 +1175,7 @@ func (v Value) OverflowUint(x uint64) bool {
trunc := (x << (64 - bitSize)) >> (64 - bitSize)
return x != trunc
}
- panic(&ValueError{"reflect.Value.OverflowUint", k})
+ panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
}
// Pointer returns v's value as a uintptr.
@@ -1251,7 +1220,7 @@ func (v Value) Pointer() uintptr {
case Slice:
return (*SliceHeader)(v.ptr).Data
}
- panic(&ValueError{"reflect.Value.Pointer", k})
+ panic(&ValueError{"reflect.Value.Pointer", v.kind()})
}
// Recv receives and returns a value from the channel v.
@@ -1273,7 +1242,7 @@ func (v Value) recv(nb bool) (val Value, ok bool) {
panic("reflect: recv on send-only channel")
}
t := tt.elem
- val = Value{t, nil, flag(t.Kind()) << flagKindShift}
+ val = Value{t, nil, flag(t.Kind())}
var p unsafe.Pointer
if ifaceIndir(t) {
p = unsafe_New(t)
@@ -1322,9 +1291,9 @@ func (v Value) send(x Value, nb bool) (selected bool) {
func (v Value) Set(x Value) {
v.mustBeAssignable()
x.mustBeExported() // do not let unexported x leak
- var target *interface{}
+ var target unsafe.Pointer
if v.kind() == Interface {
- target = (*interface{})(v.ptr)
+ target = v.ptr
}
x = x.assignTo("reflect.Set", v.typ, target)
if x.flag&flagIndir != 0 {
@@ -1370,7 +1339,7 @@ func (v Value) SetComplex(x complex128) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetComplex", k})
+ panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
case Complex64:
*(*complex64)(v.ptr) = complex64(x)
case Complex128:
@@ -1384,7 +1353,7 @@ func (v Value) SetFloat(x float64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetFloat", k})
+ panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
case Float32:
*(*float32)(v.ptr) = float32(x)
case Float64:
@@ -1398,7 +1367,7 @@ func (v Value) SetInt(x int64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetInt", k})
+ panic(&ValueError{"reflect.Value.SetInt", v.kind()})
case Int:
*(*int)(v.ptr) = int(x)
case Int8:
@@ -1419,7 +1388,7 @@ func (v Value) SetLen(n int) {
v.mustBeAssignable()
v.mustBe(Slice)
s := (*sliceHeader)(v.ptr)
- if n < 0 || n > int(s.Cap) {
+ if uint(n) > uint(s.Cap) {
panic("reflect: slice length out of range in SetLen")
}
s.Len = n
@@ -1477,7 +1446,7 @@ func (v Value) SetUint(x uint64) {
v.mustBeAssignable()
switch k := v.kind(); k {
default:
- panic(&ValueError{"reflect.Value.SetUint", k})
+ panic(&ValueError{"reflect.Value.SetUint", v.kind()})
case Uint:
*(*uint)(v.ptr) = uint(x)
case Uint8:
@@ -1520,7 +1489,7 @@ func (v Value) Slice(i, j int) Value {
)
switch kind := v.kind(); kind {
default:
- panic(&ValueError{"reflect.Value.Slice", kind})
+ panic(&ValueError{"reflect.Value.Slice", v.kind()})
case Array:
if v.flag&flagAddr == 0 {
@@ -1564,7 +1533,7 @@ func (v Value) Slice(i, j int) Value {
s.Data = base
}
- fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
+ fl := v.flag&flagRO | flagIndir | flag(Slice)
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
@@ -1579,7 +1548,7 @@ func (v Value) Slice3(i, j, k int) Value {
)
switch kind := v.kind(); kind {
default:
- panic(&ValueError{"reflect.Value.Slice3", kind})
+ panic(&ValueError{"reflect.Value.Slice3", v.kind()})
case Array:
if v.flag&flagAddr == 0 {
@@ -1616,7 +1585,7 @@ func (v Value) Slice3(i, j, k int) Value {
s.Data = base
}
- fl := v.flag&flagRO | flagIndir | flag(Slice)<<flagKindShift
+ fl := v.flag&flagRO | flagIndir | flag(Slice)
return Value{typ.common(), unsafe.Pointer(&x), fl}
}
@@ -1674,7 +1643,7 @@ func (v Value) Type() Type {
if v.typ.Kind() == Interface {
// Method on interface.
tt := (*interfaceType)(unsafe.Pointer(v.typ))
- if i < 0 || i >= len(tt.methods) {
+ if uint(i) >= uint(len(tt.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &tt.methods[i]
@@ -1682,7 +1651,7 @@ func (v Value) Type() Type {
}
// Method on concrete type.
ut := v.typ.uncommon()
- if ut == nil || i < 0 || i >= len(ut.methods) {
+ if ut == nil || uint(i) >= uint(len(ut.methods)) {
panic("reflect: internal error: invalid method index")
}
m := &ut.methods[i]
@@ -1708,7 +1677,7 @@ func (v Value) Uint() uint64 {
case Uintptr:
return uint64(*(*uintptr)(p))
}
- panic(&ValueError{"reflect.Value.Uint", k})
+ panic(&ValueError{"reflect.Value.Uint", v.kind()})
}
// UnsafeAddr returns a pointer to v's data.
@@ -1998,7 +1967,7 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
tt := (*chanType)(unsafe.Pointer(runcases[chosen].typ))
t := tt.elem
p := runcases[chosen].val
- fl := flag(t.Kind()) << flagKindShift
+ fl := flag(t.Kind())
if ifaceIndir(t) {
recv = Value{t, p, fl | flagIndir}
} else {
@@ -2033,7 +2002,7 @@ func MakeSlice(typ Type, len, cap int) Value {
}
s := sliceHeader{unsafe_NewArray(typ.Elem().(*rtype), cap), len, cap}
- return Value{typ.common(), unsafe.Pointer(&s), flagIndir | flag(Slice)<<flagKindShift}
+ return Value{typ.common(), unsafe.Pointer(&s), flagIndir | flag(Slice)}
}
// MakeChan creates a new channel with the specified type and buffer size.
@@ -2048,7 +2017,7 @@ func MakeChan(typ Type, buffer int) Value {
panic("reflect.MakeChan: unidirectional channel type")
}
ch := makechan(typ.(*rtype), uint64(buffer))
- return Value{typ.common(), ch, flag(Chan) << flagKindShift}
+ return Value{typ.common(), ch, flag(Chan)}
}
// MakeMap creates a new map of the specified type.
@@ -2057,7 +2026,7 @@ func MakeMap(typ Type) Value {
panic("reflect.MakeMap of non-map type")
}
m := makemap(typ.(*rtype))
- return Value{typ.common(), m, flag(Map) << flagKindShift}
+ return Value{typ.common(), m, flag(Map)}
}
// Indirect returns the value that v points to.
@@ -2097,7 +2066,7 @@ func Zero(typ Type) Value {
panic("reflect: Zero(nil)")
}
t := typ.common()
- fl := flag(t.Kind()) << flagKindShift
+ fl := flag(t.Kind())
if ifaceIndir(t) {
return Value{t, unsafe_New(typ.(*rtype)), fl | flagIndir}
}
@@ -2111,21 +2080,21 @@ func New(typ Type) Value {
panic("reflect: New(nil)")
}
ptr := unsafe_New(typ.(*rtype))
- fl := flag(Ptr) << flagKindShift
+ fl := flag(Ptr)
return Value{typ.common().ptrTo(), ptr, fl}
}
// NewAt returns a Value representing a pointer to a value of the
// specified type, using p as that pointer.
func NewAt(typ Type, p unsafe.Pointer) Value {
- fl := flag(Ptr) << flagKindShift
+ fl := flag(Ptr)
return Value{typ.common().ptrTo(), p, fl}
}
// assignTo returns a value v that can be assigned directly to typ.
// It panics if v is not assignable to typ.
// For a conversion to an interface type, target is a suggested scratch space to use.
-func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
+func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value {
if v.flag&flagMethod != 0 {
v = makeMethodValue(context, v)
}
@@ -2136,20 +2105,20 @@ func (v Value) assignTo(context string, dst *rtype, target *interface{}) Value {
// Same memory layout, so no harm done.
v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
- fl |= flag(dst.Kind()) << flagKindShift
+ fl |= flag(dst.Kind())
return Value{dst, v.ptr, fl}
case implements(dst, v.typ):
if target == nil {
- target = new(interface{})
+ target = unsafe_New(dst)
}
x := valueInterface(v, false)
if dst.NumMethod() == 0 {
- *target = x
+ *(*interface{})(target) = x
} else {
- ifaceE2I(dst, x, unsafe.Pointer(target))
+ ifaceE2I(dst, x, target)
}
- return Value{dst, unsafe.Pointer(target), flagIndir | flag(Interface)<<flagKindShift}
+ return Value{dst, target, flagIndir | flag(Interface)}
}
// Failed.
@@ -2268,7 +2237,7 @@ func makeInt(f flag, bits uint64, t Type) Value {
case 8:
*(*uint64)(unsafe.Pointer(ptr)) = bits
}
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
// makeFloat returns a Value of type t equal to v (possibly truncated to float32),
@@ -2282,7 +2251,7 @@ func makeFloat(f flag, v float64, t Type) Value {
case 8:
*(*float64)(unsafe.Pointer(ptr)) = v
}
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
// makeComplex returns a Value of type t equal to v (possibly truncated to complex64),
@@ -2296,7 +2265,7 @@ func makeComplex(f flag, v complex128, t Type) Value {
case 16:
*(*complex128)(unsafe.Pointer(ptr)) = v
}
- return Value{typ, ptr, f | flagIndir | flag(typ.Kind())<<flagKindShift}
+ return Value{typ, ptr, f | flagIndir | flag(typ.Kind())}
}
func makeString(f flag, v string, t Type) Value {
@@ -2412,14 +2381,14 @@ func cvtDirect(v Value, typ Type) Value {
// convertOp: concrete -> interface
func cvtT2I(v Value, typ Type) Value {
- target := new(interface{})
+ target := unsafe_New(typ.common())
x := valueInterface(v, false)
if typ.NumMethod() == 0 {
- *target = x
+ *(*interface{})(target) = x
} else {
- ifaceE2I(typ.(*rtype), x, unsafe.Pointer(target))
+ ifaceE2I(typ.(*rtype), x, target)
}
- return Value{typ.common(), unsafe.Pointer(target), v.flag&flagRO | flagIndir | flag(Interface)<<flagKindShift}
+ return Value{typ.common(), target, v.flag&flagRO | flagIndir | flag(Interface)}
}
// convertOp: interface -> interface
diff --git a/src/regexp/all_test.go b/src/regexp/all_test.go
index 5fadb67c0..01ea3742a 100644
--- a/src/regexp/all_test.go
+++ b/src/regexp/all_test.go
@@ -6,6 +6,7 @@ package regexp
import (
"reflect"
+ "regexp/syntax"
"strings"
"testing"
)
@@ -473,12 +474,19 @@ func TestSplit(t *testing.T) {
}
}
-// This ran out of stack before issue 7608 was fixed.
+// Check that one-pass cutoff does trigger.
func TestOnePassCutoff(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping in short mode")
+ re, err := syntax.Parse(`^x{1,1000}y{1,1000}$`, syntax.Perl)
+ if err != nil {
+ t.Fatalf("parse: %v", err)
+ }
+ p, err := syntax.Compile(re.Simplify())
+ if err != nil {
+ t.Fatalf("compile: %v", err)
+ }
+ if compileOnePass(p) != notOnePass {
+ t.Fatalf("makeOnePass succeeded; wanted notOnePass")
}
- MustCompile(`^(?:x{1,1000}){1,1000}$`)
}
func BenchmarkLiteral(b *testing.B) {
diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go
index 0b8336a04..b615acdf0 100644
--- a/src/regexp/regexp.go
+++ b/src/regexp/regexp.go
@@ -452,7 +452,7 @@ func (re *Regexp) ReplaceAllString(src, repl string) string {
return string(b)
}
-// ReplaceAllStringLiteral returns a copy of src, replacing matches of the Regexp
+// ReplaceAllLiteralString returns a copy of src, replacing matches of the Regexp
// with the replacement string repl. The replacement repl is substituted directly,
// without using Expand.
func (re *Regexp) ReplaceAllLiteralString(src, repl string) string {
diff --git a/src/regexp/syntax/parse.go b/src/regexp/syntax/parse.go
index 3dc8ccf50..d579a4069 100644
--- a/src/regexp/syntax/parse.go
+++ b/src/regexp/syntax/parse.go
@@ -272,13 +272,18 @@ func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (
func repeatIsValid(re *Regexp, n int) bool {
if re.Op == OpRepeat {
m := re.Max
+ if m == 0 {
+ return true
+ }
if m < 0 {
m = re.Min
}
if m > n {
return false
}
- n /= m
+ if m > 0 {
+ n /= m
+ }
}
for _, sub := range re.Sub {
if !repeatIsValid(sub, n) {
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index b0ed2d8ce..0d46a9eff 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -732,6 +732,20 @@ needm:
MOVL g(CX), BP
MOVL g_m(BP), BP
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then onM will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVL m_g0(BP), SI
+ MOVL SP, (g_sched+gobuf_sp)(SI)
+
havem:
// Now there's a valid m, and we're running on its m->g0.
// Save current m->g0->sched.sp on stack and then set it to SP.
@@ -871,12 +885,6 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-8
MOVL DX, ret_hi+4(FP)
RET
-TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
- RDTSC
- MOVL AX, ret_lo+0(FP)
- MOVL DX, ret_hi+4(FP)
- RET
-
TEXT runtime·ldt0setup(SB),NOSPLIT,$16-0
// set up ldt 7 to point at tls0
// ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 2ee331208..a9b082beb 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -717,6 +717,20 @@ needm:
get_tls(CX)
MOVQ g(CX), BP
MOVQ g_m(BP), BP
+
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then onM will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVQ m_g0(BP), SI
+ MOVQ SP, (g_sched+gobuf_sp)(SI)
havem:
// Now there's a valid m, and we're running on its m->g0.
@@ -855,13 +869,6 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-0
MOVQ AX, ret+0(FP)
RET
-TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
- RDTSC
- SHLQ $32, DX
- ADDQ DX, AX
- MOVQ AX, ret+0(FP)
- RET
-
// hash function using AES hardware instructions
TEXT runtime·aeshash(SB),NOSPLIT,$0-32
MOVQ p+0(FP), AX // ptr to data
diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s
index e27f67e1e..28875bc55 100644
--- a/src/runtime/asm_amd64p32.s
+++ b/src/runtime/asm_amd64p32.s
@@ -657,13 +657,6 @@ TEXT runtime·cputicks(SB),NOSPLIT,$0-0
MOVQ AX, ret+0(FP)
RET
-TEXT runtime·gocputicks(SB),NOSPLIT,$0-8
- RDTSC
- SHLQ $32, DX
- ADDQ DX, AX
- MOVQ AX, ret+0(FP)
- RET
-
// hash function using AES hardware instructions
// For now, our one amd64p32 system (NaCl) does not
// support using AES instructions, so have not bothered to
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index b21441488..e94b4c1ff 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -556,6 +556,21 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12
MOVW $runtime·needm(SB), R0
BL (R0)
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then onM will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVW g_m(g), R8
+ MOVW m_g0(R8), R3
+ MOVW R13, (g_sched+gobuf_sp)(R3)
+
havem:
MOVW g_m(g), R8
MOVW R8, savedm-4(SP)
@@ -1275,9 +1290,6 @@ TEXT runtime·fastrand1(SB),NOSPLIT,$-4-4
MOVW R0, ret+0(FP)
RET
-TEXT runtime·gocputicks(SB),NOSPLIT,$0
- B runtime·cputicks(SB)
-
TEXT runtime·return0(SB),NOSPLIT,$0
MOVW $0, R0
RET
diff --git a/src/runtime/cgo/gcc_arm.S b/src/runtime/cgo/gcc_arm.S
index 2e4b3528b..d5833bfad 100644
--- a/src/runtime/cgo/gcc_arm.S
+++ b/src/runtime/cgo/gcc_arm.S
@@ -12,13 +12,6 @@
#endif
/*
- * Because the assembler might target an earlier revision of the ISA
- * by default, we must explicitly specify the ISA revision to ensure
- * BLX is recognized as a valid instruction.
- */
-.arch armv5t
-
-/*
* void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void *g), void *g)
*
* Calling into the 5c tool chain, where all registers are caller save.
@@ -31,8 +24,12 @@ EXT(crosscall_arm1):
mov r4, r0
mov r5, r1
mov r0, r2
- blx r5 // setg(g)
- blx r4 // fn()
+
+ // Because the assembler might target an earlier revision of the ISA
+ // by default, we encode BLX as a .word.
+ .word 0xe12fff35 // blx r5 // setg(g)
+ .word 0xe12fff34 // blx r4 // fn()
+
pop {r4, r5, r6, r7, r8, r9, r10, r11, ip, pc}
.globl EXT(__stack_chk_fail_local)
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index 01632892e..e689ceaed 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -482,6 +482,35 @@ func TestShrinkStackDuringBlockedSend(t *testing.T) {
<-done
}
+func TestSelectDuplicateChannel(t *testing.T) {
+ // This test makes sure we can queue a G on
+ // the same channel multiple times.
+ c := make(chan int)
+ d := make(chan int)
+ e := make(chan int)
+
+ // goroutine A
+ go func() {
+ select {
+ case <-c:
+ case <-c:
+ case <-d:
+ }
+ e <- 9
+ }()
+ time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c
+
+ // goroutine B
+ go func() {
+ <-c
+ }()
+ time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
+
+ d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
+ <-e // A tells us it's done
+ c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
+}
+
func BenchmarkChanNonblocking(b *testing.B) {
myc := make(chan int)
b.RunParallel(func(pb *testing.PB) {
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index 4ff0084c2..5958ad891 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -8,6 +8,7 @@ package runtime_test
import (
"runtime"
+ "strings"
"testing"
)
@@ -34,6 +35,17 @@ func TestCgoTraceback(t *testing.T) {
}
}
+func TestCgoExternalThreadPanic(t *testing.T) {
+ if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ t.Skipf("no pthreads on %s", runtime.GOOS)
+ }
+ got := executeTest(t, cgoExternalThreadPanicSource, nil, "main.c", cgoExternalThreadPanicC)
+ want := "panic: BOOM"
+ if !strings.Contains(got, want) {
+ t.Fatalf("want failure containing %q. output:\n%s\n", want, got)
+ }
+}
+
const cgoSignalDeadlockSource = `
package main
@@ -117,3 +129,43 @@ func main() {
fmt.Printf("OK\n")
}
`
+
+const cgoExternalThreadPanicSource = `
+package main
+
+// void start(void);
+import "C"
+
+func main() {
+ C.start()
+ select {}
+}
+
+//export gopanic
+func gopanic() {
+ panic("BOOM")
+}
+`
+
+const cgoExternalThreadPanicC = `
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+
+void gopanic(void);
+
+static void*
+die(void* x)
+{
+ gopanic();
+ return 0;
+}
+
+void
+start(void)
+{
+ pthread_t t;
+ if(pthread_create(&t, 0, die, 0) != 0)
+ printf("pthread_create failed\n");
+}
+`
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 783b4c48f..211a0476f 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -31,7 +31,7 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
return cmd
}
-func executeTest(t *testing.T, templ string, data interface{}) string {
+func executeTest(t *testing.T, templ string, data interface{}, extra ...string) string {
switch runtime.GOOS {
case "android", "nacl":
t.Skipf("skipping on %s", runtime.GOOS)
@@ -61,7 +61,20 @@ func executeTest(t *testing.T, templ string, data interface{}) string {
t.Fatalf("failed to close file: %v", err)
}
- got, _ := testEnv(exec.Command("go", "run", src)).CombinedOutput()
+ for i := 0; i < len(extra); i += 2 {
+ if err := ioutil.WriteFile(filepath.Join(dir, extra[i]), []byte(extra[i+1]), 0666); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ cmd := exec.Command("go", "build", "-o", "a.exe")
+ cmd.Dir = dir
+ out, err := testEnv(cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("building source: %v\n%s", err, out)
+ }
+
+ got, _ := testEnv(exec.Command(filepath.Join(dir, "a.exe"))).CombinedOutput()
return string(got)
}
diff --git a/src/runtime/debug/garbage.go b/src/runtime/debug/garbage.go
index 30994f219..4a77dcfcd 100644
--- a/src/runtime/debug/garbage.go
+++ b/src/runtime/debug/garbage.go
@@ -16,6 +16,7 @@ type GCStats struct {
NumGC int64 // number of garbage collections
PauseTotal time.Duration // total pause for all collections
Pause []time.Duration // pause history, most recent first
+ PauseEnd []time.Time // pause end times history, most recent first
PauseQuantiles []time.Duration
}
@@ -30,25 +31,36 @@ type GCStats struct {
func ReadGCStats(stats *GCStats) {
// Create a buffer with space for at least two copies of the
// pause history tracked by the runtime. One will be returned
- // to the caller and the other will be used as a temporary buffer
- // for computing quantiles.
+ // to the caller and the other will be used as transfer buffer
+ // for end times history and as a temporary buffer for
+ // computing quantiles.
const maxPause = len(((*runtime.MemStats)(nil)).PauseNs)
- if cap(stats.Pause) < 2*maxPause {
- stats.Pause = make([]time.Duration, 2*maxPause)
+ if cap(stats.Pause) < 2*maxPause+3 {
+ stats.Pause = make([]time.Duration, 2*maxPause+3)
}
- // readGCStats fills in the pause history (up to maxPause entries)
- // and then three more: Unix ns time of last GC, number of GC,
- // and total pause time in nanoseconds. Here we depend on the
- // fact that time.Duration's native unit is nanoseconds, so the
- // pauses and the total pause time do not need any conversion.
+ // readGCStats fills in the pause and end times histories (up to
+ // maxPause entries) and then three more: Unix ns time of last GC,
+ // number of GC, and total pause time in nanoseconds. Here we
+ // depend on the fact that time.Duration's native unit is
+ // nanoseconds, so the pauses and the total pause time do not need
+ // any conversion.
readGCStats(&stats.Pause)
n := len(stats.Pause) - 3
stats.LastGC = time.Unix(0, int64(stats.Pause[n]))
stats.NumGC = int64(stats.Pause[n+1])
stats.PauseTotal = stats.Pause[n+2]
+ n /= 2 // buffer holds pauses and end times
stats.Pause = stats.Pause[:n]
+ if cap(stats.PauseEnd) < maxPause {
+ stats.PauseEnd = make([]time.Time, 0, maxPause)
+ }
+ stats.PauseEnd = stats.PauseEnd[:0]
+ for _, ns := range stats.Pause[n : n+n] {
+ stats.PauseEnd = append(stats.PauseEnd, time.Unix(0, int64(ns)))
+ }
+
if len(stats.PauseQuantiles) > 0 {
if n == 0 {
for i := range stats.PauseQuantiles {
diff --git a/src/runtime/debug/garbage_test.go b/src/runtime/debug/garbage_test.go
index 149bafc6f..54c33bd4f 100644
--- a/src/runtime/debug/garbage_test.go
+++ b/src/runtime/debug/garbage_test.go
@@ -70,6 +70,19 @@ func TestReadGCStats(t *testing.T) {
t.Errorf("stats.PauseQuantiles[%d]=%d > stats.PauseQuantiles[%d]=%d", i, q[i], i+1, q[i+1])
}
}
+
+ // compare memory stats with gc stats:
+ if len(stats.PauseEnd) != n {
+ t.Fatalf("len(stats.PauseEnd) = %d, want %d", len(stats.PauseEnd), n)
+ }
+ off := (int(mstats.NumGC) + len(mstats.PauseEnd) - 1) % len(mstats.PauseEnd)
+ for i := 0; i < n; i++ {
+ dt := stats.PauseEnd[i]
+ if dt.UnixNano() != int64(mstats.PauseEnd[off]) {
+ t.Errorf("stats.PauseEnd[%d] = %d, want %d", i, dt, mstats.PauseEnd[off])
+ }
+ off = (off + len(mstats.PauseEnd) - 1) % len(mstats.PauseEnd)
+ }
}
var big = make([]byte, 1<<20)
diff --git a/src/runtime/env_plan9.go b/src/runtime/env_plan9.go
index 76e9867e0..e442c3483 100644
--- a/src/runtime/env_plan9.go
+++ b/src/runtime/env_plan9.go
@@ -30,7 +30,7 @@ func gogetenv(key string) string {
if fd < 0 {
return ""
}
- n := seek(fd, 0, 2) - 1
+ n := seek(fd, 0, 2)
if n <= 0 {
close(fd)
return ""
@@ -44,6 +44,10 @@ func gogetenv(key string) string {
return ""
}
+ if p[r-1] == 0 {
+ r--
+ }
+
var s string
sp := (*_string)(unsafe.Pointer(&s))
sp.str = &p[0]
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index b8db5d0c4..1b8052bb5 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -39,6 +39,12 @@ a comma-separated list of name=val pairs. Supported names are:
gcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots
that it thinks are dead.
+ invalidptr: defaults to invalidptr=1, causing the garbage collector and stack
+ copier to crash the program if an invalid pointer value (for example, 1)
+ is found in a pointer-typed location. Setting invalidptr=0 disables this check.
+ This should only be used as a temporary workaround to diagnose buggy code.
+ The real fix is to not store integers in pointer-typed locations.
+
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h
index a2667a4c0..d6c14fcb4 100644
--- a/src/runtime/funcdata.h
+++ b/src/runtime/funcdata.h
@@ -28,6 +28,9 @@
// defines the pointer map for the function's arguments.
// GO_ARGS should be the first instruction in a function that uses it.
// It can be omitted if there are no arguments at all.
+// GO_ARGS is inserted implicitly by the linker for any function
+// that also has a Go prototype and therefore is usually not necessary
+// to write explicitly.
#define GO_ARGS FUNCDATA $FUNCDATA_ArgsPointerMaps, go_args_stackmap(SB)
// GO_RESULTS_INITIALIZED indicates that the assembly function
diff --git a/src/runtime/malloc.h b/src/runtime/malloc.h
index e606b0c7a..522b11bba 100644
--- a/src/runtime/malloc.h
+++ b/src/runtime/malloc.h
@@ -268,7 +268,8 @@ struct MStats
uint64 next_gc; // next GC (in heap_alloc time)
uint64 last_gc; // last GC (in absolute time)
uint64 pause_total_ns;
- uint64 pause_ns[256];
+ uint64 pause_ns[256]; // circular buffer of recent GC pause lengths
+ uint64 pause_end[256]; // circular buffer of recent GC end times (nanoseconds since 1970)
uint32 numgc;
bool enablegc;
bool debuggc;
diff --git a/src/runtime/mem.go b/src/runtime/mem.go
index 438f22ec0..e6f1eb0e6 100644
--- a/src/runtime/mem.go
+++ b/src/runtime/mem.go
@@ -44,7 +44,8 @@ type MemStats struct {
NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount
LastGC uint64 // end time of last collection (nanoseconds since 1970)
PauseTotalNs uint64
- PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
+ PauseNs [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
+ PauseEnd [256]uint64 // circular buffer of recent GC pause end times
NumGC uint32
EnableGC bool
DebugGC bool
diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c
index cc1f81123..bcc5a2f39 100644
--- a/src/runtime/mgc0.c
+++ b/src/runtime/mgc0.c
@@ -687,7 +687,7 @@ putpartial(Workbuf *b)
else if (b->nobj == nelem(b->obj))
runtime·lfstackpush(&runtime·work.full, &b->node);
else {
- runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, b->nobj, (uint32)nelem(b->obj));
+ runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, (uint32)b->nobj, (uint32)nelem(b->obj));
runtime·throw("putpartial: bad Workbuf b->nobj");
}
}
@@ -1725,6 +1725,7 @@ gc(struct gc_args *args)
t4 = runtime·nanotime();
runtime·atomicstore64(&mstats.last_gc, runtime·unixnanotime()); // must be Unix time to make sense to user
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
+ mstats.pause_end[mstats.numgc%nelem(mstats.pause_end)] = t4;
mstats.pause_total_ns += t4 - t0;
mstats.numgc++;
if(mstats.debuggc)
@@ -1773,7 +1774,6 @@ gc(struct gc_args *args)
runtime·sweep.spanidx = 0;
runtime·unlock(&runtime·mheap.lock);
- // Temporary disable concurrent sweep, because we see failures on builders.
if(ConcurrentSweep && !args->eagersweep) {
runtime·lock(&runtime·gclock);
if(runtime·sweep.g == nil)
@@ -1787,6 +1787,8 @@ gc(struct gc_args *args)
// Sweep all spans eagerly.
while(runtime·sweepone() != -1)
runtime·sweep.npausesweep++;
+ // Do an additional mProf_GC, because all 'free' events are now real as well.
+ runtime·mProf_GC();
}
runtime·mProf_GC();
@@ -1834,7 +1836,7 @@ readgcstats_m(void)
{
Slice *pauses;
uint64 *p;
- uint32 i, n;
+ uint32 i, j, n;
pauses = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
@@ -1843,25 +1845,29 @@ readgcstats_m(void)
if(pauses->cap < nelem(mstats.pause_ns)+3)
runtime·throw("runtime: short slice passed to readGCStats");
- // Pass back: pauses, last GC (absolute time), number of GC, total pause ns.
+ // Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
p = (uint64*)pauses->array;
runtime·lock(&runtime·mheap.lock);
+
n = mstats.numgc;
if(n > nelem(mstats.pause_ns))
n = nelem(mstats.pause_ns);
-
+
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
// from there to go back farther in time. We deliver the times
// most recent first (in p[0]).
- for(i=0; i<n; i++)
- p[i] = mstats.pause_ns[(mstats.numgc-1-i)%nelem(mstats.pause_ns)];
+ for(i=0; i<n; i++) {
+ j = (mstats.numgc-1-i)%nelem(mstats.pause_ns);
+ p[i] = mstats.pause_ns[j];
+ p[n+i] = mstats.pause_end[j];
+ }
- p[n] = mstats.last_gc;
- p[n+1] = mstats.numgc;
- p[n+2] = mstats.pause_total_ns;
+ p[n+n] = mstats.last_gc;
+ p[n+n+1] = mstats.numgc;
+ p[n+n+2] = mstats.pause_total_ns;
runtime·unlock(&runtime·mheap.lock);
- pauses->len = n+3;
+ pauses->len = n+n+3;
}
void
@@ -2041,7 +2047,7 @@ runtime·unrollgcprog_m(void)
Type *typ;
byte *mask, *prog;
uintptr pos;
- uint32 x;
+ uintptr x;
typ = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
@@ -2060,8 +2066,9 @@ runtime·unrollgcprog_m(void)
unrollgcprog1(mask, prog, &pos, false, true);
}
// atomic way to say mask[0] = 1
- x = ((uint32*)mask)[0];
- runtime·atomicstore((uint32*)mask, x|1);
+ x = *(uintptr*)mask;
+ ((byte*)&x)[0] = 1;
+ runtime·atomicstorep((void**)mask, (void*)x);
}
runtime·unlock(&lock);
}
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index 89e991523..803da5667 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -234,7 +234,7 @@ func mProf_GC() {
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
var stk [maxStack]uintptr
- nstk := callers(1, &stk[0], len(stk))
+ nstk := callers(4, &stk[0], len(stk))
lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true)
mp := b.mp()
@@ -284,6 +284,8 @@ func SetBlockProfileRate(rate int) {
var r int64
if rate <= 0 {
r = 0 // disable profiling
+ } else if rate == 1 {
+ r = 1 // profile everything
} else {
// convert ns to cycles, use float64 to prevent overflow during multiplication
r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
@@ -297,7 +299,7 @@ func SetBlockProfileRate(rate int) {
func blockevent(cycles int64, skip int) {
if cycles <= 0 {
- return
+ cycles = 1
}
rate := int64(atomicload64(&blockprofilerate))
if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go
new file mode 100644
index 000000000..ebf53dd66
--- /dev/null
+++ b/src/runtime/pprof/mprof_test.go
@@ -0,0 +1,99 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof_test
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "runtime"
+ . "runtime/pprof"
+ "testing"
+ "unsafe"
+)
+
+var memSink interface{}
+
+func allocateTransient1M() {
+ for i := 0; i < 1024; i++ {
+ memSink = &struct{ x [1024]byte }{}
+ }
+}
+
+func allocateTransient2M() {
+ // prevent inlining
+ if memSink == nil {
+ panic("bad")
+ }
+ memSink = make([]byte, 2<<20)
+}
+
+type Obj32 struct {
+ link *Obj32
+ pad [32 - unsafe.Sizeof(uintptr(0))]byte
+}
+
+var persistentMemSink *Obj32
+
+func allocatePersistent1K() {
+ for i := 0; i < 32; i++ {
+ // Can't use slice because that will introduce implicit allocations.
+ obj := &Obj32{link: persistentMemSink}
+ persistentMemSink = obj
+ }
+}
+
+var memoryProfilerRun = 0
+
+func TestMemoryProfiler(t *testing.T) {
+ // Disable sampling, otherwise it's difficult to assert anything.
+ oldRate := runtime.MemProfileRate
+ runtime.MemProfileRate = 1
+ defer func() {
+ runtime.MemProfileRate = oldRate
+ }()
+
+ // Allocate a meg to ensure that mcache.next_sample is updated to 1.
+ for i := 0; i < 1024; i++ {
+ memSink = make([]byte, 1024)
+ }
+
+ // Do the interesting allocations.
+ allocateTransient1M()
+ allocateTransient2M()
+ allocatePersistent1K()
+ memSink = nil
+
+ runtime.GC() // materialize stats
+ var buf bytes.Buffer
+ if err := Lookup("heap").WriteTo(&buf, 1); err != nil {
+ t.Fatalf("failed to write heap profile: %v", err)
+ }
+
+ memoryProfilerRun++
+
+ tests := []string{
+ fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test\.go:43
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test\.go:66
+`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun),
+
+ fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.allocateTransient1M\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test.go:21
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test.go:64
+`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun),
+
+ fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test.go:30
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/runtime/pprof/mprof_test.go:65
+`, memoryProfilerRun, (2<<20)*memoryProfilerRun),
+ }
+
+ for _, test := range tests {
+ if !regexp.MustCompile(test).Match(buf.Bytes()) {
+ t.Fatalf("The entry did not match:\n%v\n\nProfile:\n%v\n", test, buf.String())
+ }
+ }
+}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index edd471a0c..8677cb30c 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -249,7 +249,7 @@ func TestGoroutineSwitch(t *testing.T) {
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
f := runtime.FuncForPC(stk[1])
- if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode") {
+ if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode" || f.Name() == "GC") {
return
}
}
diff --git a/src/runtime/print1.go b/src/runtime/print1.go
index 0fa1fb63c..8f8268873 100644
--- a/src/runtime/print1.go
+++ b/src/runtime/print1.go
@@ -19,32 +19,17 @@ func bytes(s string) (ret []byte) {
return
}
-// goprintf is the function call that is actually deferred when you write
-// defer print(...)
-// It is otherwise unused. In particular it is not used for ordinary prints.
-// Right now a dynamically allocated string that is being passed as an
-// argument is invisible to the garbage collector and might be collected
-// if that argument list is the only reference. For now we ignore that possibility.
-// To fix, we should change to defer a call to vprintf with a pointer to
-// an argument list on the stack, stored in an appropriately typed
-// struct. golang.org/issue/8614.
-//go:nosplit
-func goprintf(s string) {
- vprintf(s, add(unsafe.Pointer(&s), unsafe.Sizeof(s)))
-}
-
-// printf is only called from C code. It has the same problem as goprintf
-// with strings possibly being collected from underneath.
-// However, the runtime never prints dynamically allocated
-// Go strings using printf. The strings it prints come from the symbol
-// and type tables.
+// printf is only called from C code. It has no type information for the args,
+// but C stacks are ignored by the garbage collector anyway, so having
+// type information would not add anything.
//go:nosplit
func printf(s *byte) {
vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s)))
}
-// sprintf is only called from C code.
-// It has the same problem as goprintf.
+// sprintf is only called from C code. It has no type information for the args,
+// but C stacks are ignored by the garbage collector anyway, so having
+// type information would not add anything.
//go:nosplit
func snprintf(dst *byte, n int32, s *byte) {
buf := (*[1 << 30]byte)(unsafe.Pointer(dst))[0:n:n]
diff --git a/src/runtime/proc.c b/src/runtime/proc.c
index b824f574d..ab6812329 100644
--- a/src/runtime/proc.c
+++ b/src/runtime/proc.c
@@ -2764,6 +2764,8 @@ static void
checkdead(void)
{
G *gp;
+ P *p;
+ M *mp;
int32 run, grunning, s;
uintptr i;
@@ -2805,6 +2807,24 @@ checkdead(void)
runtime·unlock(&runtime·allglock);
if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
+
+ // Maybe jump time forward for playground.
+ if((gp = runtime·timejump()) != nil) {
+ runtime·casgstatus(gp, Gwaiting, Grunnable);
+ globrunqput(gp);
+ p = pidleget();
+ if(p == nil)
+ runtime·throw("checkdead: no p for timer");
+ mp = mget();
+ if(mp == nil)
+ newm(nil, p);
+ else {
+ mp->nextp = p;
+ runtime·notewakeup(&mp->park);
+ }
+ return;
+ }
+
g->m->throwing = -1; // do not dump full stacks
runtime·throw("all goroutines are asleep - deadlock!");
}
diff --git a/src/runtime/rt0_nacl_amd64p32.s b/src/runtime/rt0_nacl_amd64p32.s
index d8703dc0f..54e4b1de8 100644
--- a/src/runtime/rt0_nacl_amd64p32.s
+++ b/src/runtime/rt0_nacl_amd64p32.s
@@ -26,5 +26,5 @@ TEXT _rt0_amd64p32_nacl(SB),NOSPLIT,$16
TEXT main(SB),NOSPLIT,$0
// Uncomment for fake time like on Go Playground.
//MOVQ $1257894000000000000, AX
- //MOVQ AX, runtime·timens(SB)
+ //MOVQ AX, runtime·faketime(SB)
JMP runtime·rt0_go(SB)
diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c
index b3503fb90..c823691ec 100644
--- a/src/runtime/runtime.c
+++ b/src/runtime/runtime.c
@@ -276,9 +276,13 @@ struct DbgVar
int32* value;
};
+// Do we report invalid pointers found during stack or heap scans?
+int32 runtime·invalidptr = 1;
+
#pragma dataflag NOPTR /* dbgvar has no heap pointers */
static DbgVar dbgvar[] = {
{"allocfreetrace", &runtime·debug.allocfreetrace},
+ {"invalidptr", &runtime·invalidptr},
{"efence", &runtime·debug.efence},
{"gctrace", &runtime·debug.gctrace},
{"gcdead", &runtime·debug.gcdead},
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index c1bba423a..6a02ef1d3 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -672,6 +672,8 @@ enum {
byte* runtime·startup_random_data;
uint32 runtime·startup_random_data_len;
+int32 runtime·invalidptr;
+
enum {
// hashinit wants this many random bytes
HashRandomBytes = 32
diff --git a/src/runtime/select.go b/src/runtime/select.go
index 2d0787bd9..d703e1d79 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -393,9 +393,9 @@ loop:
} else {
c = k._chan
if k.kind == _CaseSend {
- c.sendq.dequeueg(gp)
+ c.sendq.dequeueSudoG(sglist)
} else {
- c.recvq.dequeueg(gp)
+ c.recvq.dequeueSudoG(sglist)
}
}
sgnext = sglist.waitlink
@@ -623,7 +623,7 @@ func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
return
}
-func (q *waitq) dequeueg(gp *g) {
+func (q *waitq) dequeueSudoG(s *sudog) {
var prevsgp *sudog
l := &q.first
for {
@@ -631,7 +631,7 @@ func (q *waitq) dequeueg(gp *g) {
if sgp == nil {
return
}
- if sgp.g == gp {
+ if sgp == s {
*l = sgp.next
if q.last == sgp {
q.last = prevsgp
diff --git a/src/runtime/stack.c b/src/runtime/stack.c
index e06e48a93..f18171ea5 100644
--- a/src/runtime/stack.c
+++ b/src/runtime/stack.c
@@ -401,12 +401,12 @@ adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
break;
case BitsPointer:
p = scanp[i];
- if(f != nil && (byte*)0 < p && (p < (byte*)PageSize || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
+ if(f != nil && (byte*)0 < p && (p < (byte*)PageSize && runtime·invalidptr || (uintptr)p == PoisonGC || (uintptr)p == PoisonStack)) {
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
g->m->traceback = 2;
runtime·printf("runtime: bad pointer in frame %s at %p: %p\n", runtime·funcname(f), &scanp[i], p);
- runtime·throw("bad pointer!");
+ runtime·throw("invalid stack pointer");
}
if(minp <= p && p < maxp) {
if(StackDebug >= 3)
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 32dfed7d3..2d5e41c1c 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -182,7 +182,11 @@ func exit(code int32)
func breakpoint()
func nanotime() int64
func usleep(usec uint32)
+
+// careful: cputicks is not guaranteed to be monotonic! In particular, we have
+// noticed drift between cpus on certain os/arch combinations. See issue 8976.
func cputicks() int64
+
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 48d4023b9..45d107b77 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -84,10 +84,13 @@ func symtabinit() {
}
}
- // file table follows ftab.
+ // The ftab ends with a half functab consisting only of
+ // 'entry', followed by a uint32 giving the pcln-relative
+ // offset of the file table.
sp = (*sliceStruct)(unsafe.Pointer(&filetab))
- p = unsafe.Pointer(add(unsafe.Pointer(pcln), ftab[nftab].funcoff))
- sp.array = unsafe.Pointer(add(unsafe.Pointer(pcln), ftab[nftab].funcoff))
+ end := unsafe.Pointer(&ftab[nftab].funcoff) // just beyond ftab
+ fileoffset := *(*uint32)(end)
+ sp.array = unsafe.Pointer(&pclntable[fileoffset])
// length is in first element of array.
// set len to 1 so we can get first element.
sp.len = 1
@@ -224,7 +227,7 @@ func funcline(f *_func, targetpc uintptr, file *string) int32 {
func funcspdelta(f *_func, targetpc uintptr) int32 {
x := pcvalue(f, f.pcsp, targetpc, true)
if x&(ptrSize-1) != 0 {
- print("invalid spdelta ", f.pcsp, " ", x, "\n")
+ print("invalid spdelta ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
return x
}
diff --git a/src/runtime/sys_nacl_amd64p32.s b/src/runtime/sys_nacl_amd64p32.s
index c30c2a893..4eb4aacdd 100644
--- a/src/runtime/sys_nacl_amd64p32.s
+++ b/src/runtime/sys_nacl_amd64p32.s
@@ -60,7 +60,7 @@ TEXT syscall·naclWrite(SB), NOSPLIT, $24-20
TEXT runtime·write(SB),NOSPLIT,$16-20
// If using fake time and writing to stdout or stderr,
// emit playback header before actual data.
- MOVQ runtime·timens(SB), AX
+ MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ write
MOVL fd+0(FP), DI
@@ -242,7 +242,7 @@ TEXT runtime·mmap(SB),NOSPLIT,$8
RET
TEXT time·now(SB),NOSPLIT,$16
- MOVQ runtime·timens(SB), AX
+ MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ realtime
MOVQ $0, DX
@@ -277,7 +277,7 @@ TEXT runtime·nacl_clock_gettime(SB),NOSPLIT,$0
RET
TEXT runtime·nanotime(SB),NOSPLIT,$16
- MOVQ runtime·timens(SB), AX
+ MOVQ runtime·faketime(SB), AX
CMPQ AX, $0
JEQ 3(PC)
MOVQ AX, ret+0(FP)
diff --git a/src/runtime/time.go b/src/runtime/time.go
index 8cf9eecf8..11862c7e2 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -35,8 +35,8 @@ var timers struct {
t []*timer
}
-// nacl fake time support.
-var timens int64
+// nacl fake time support - time in nanoseconds since 1970
+var faketime int64
// Package time APIs.
// Godoc uses the comments in package time, not these.
@@ -194,7 +194,7 @@ func timerproc() {
f(arg, seq)
lock(&timers.lock)
}
- if delta < 0 {
+ if delta < 0 || faketime > 0 {
// No timers left - put goroutine to sleep.
timers.rescheduling = true
goparkunlock(&timers.lock, "timer goroutine (idle)")
@@ -208,6 +208,29 @@ func timerproc() {
}
}
+func timejump() *g {
+ if faketime == 0 {
+ return nil
+ }
+
+ lock(&timers.lock)
+ if !timers.created || len(timers.t) == 0 {
+ unlock(&timers.lock)
+ return nil
+ }
+
+ var gp *g
+ if faketime < timers.t[0].when {
+ faketime = timers.t[0].when
+ if timers.rescheduling {
+ timers.rescheduling = false
+ gp = timers.gp
+ }
+ }
+ unlock(&timers.lock)
+ return gp
+}
+
// Heap maintenance algorithms.
func siftupTimer(i int) {
diff --git a/src/sync/pool.go b/src/sync/pool.go
index 1f08707cd..0cf063702 100644
--- a/src/sync/pool.go
+++ b/src/sync/pool.go
@@ -200,6 +200,8 @@ func poolCleanup() {
}
l.shared = nil
}
+ p.local = nil
+ p.localSize = 0
}
allPools = []*Pool{}
}
diff --git a/src/sync/pool_test.go b/src/sync/pool_test.go
index cf5c8bd90..fa1a27bea 100644
--- a/src/sync/pool_test.go
+++ b/src/sync/pool_test.go
@@ -69,32 +69,44 @@ func TestPoolNew(t *testing.T) {
}
}
-// Test that Pool does not hold pointers to previously cached
-// resources
+// Test that Pool does not hold pointers to previously cached resources.
func TestPoolGC(t *testing.T) {
+ testPool(t, true)
+}
+
+// Test that Pool releases resources on GC.
+func TestPoolRelease(t *testing.T) {
+ testPool(t, false)
+}
+
+func testPool(t *testing.T, drain bool) {
var p Pool
- var fin uint32
const N = 100
- for i := 0; i < N; i++ {
- v := new(string)
- runtime.SetFinalizer(v, func(vv *string) {
- atomic.AddUint32(&fin, 1)
- })
- p.Put(v)
- }
- for i := 0; i < N; i++ {
- p.Get()
- }
- for i := 0; i < 5; i++ {
- runtime.GC()
- time.Sleep(time.Duration(i*100+10) * time.Millisecond)
- // 1 pointer can remain on stack or elsewhere
- if atomic.LoadUint32(&fin) >= N-1 {
- return
+loop:
+ for try := 0; try < 3; try++ {
+ var fin, fin1 uint32
+ for i := 0; i < N; i++ {
+ v := new(string)
+ runtime.SetFinalizer(v, func(vv *string) {
+ atomic.AddUint32(&fin, 1)
+ })
+ p.Put(v)
+ }
+ if drain {
+ for i := 0; i < N; i++ {
+ p.Get()
+ }
+ }
+ for i := 0; i < 5; i++ {
+ runtime.GC()
+ time.Sleep(time.Duration(i*100+10) * time.Millisecond)
+ // 1 pointer can remain on stack or elsewhere
+ if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 {
+ continue loop
+ }
}
+ t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try)
}
- t.Fatalf("only %v out of %v resources are finalized",
- atomic.LoadUint32(&fin), N)
}
func TestPoolStress(t *testing.T) {
diff --git a/src/syscall/env_plan9.go b/src/syscall/env_plan9.go
index 934507905..9ea36c886 100644
--- a/src/syscall/env_plan9.go
+++ b/src/syscall/env_plan9.go
@@ -8,28 +8,9 @@ package syscall
import (
"errors"
- "sync"
)
var (
- // envOnce guards copyenv, which populates env, envi and envs.
- envOnce sync.Once
-
- // envLock guards env, envi and envs.
- envLock sync.RWMutex
-
- // env maps from an environment variable to its value.
- // TODO: remove this? golang.org/issue/8849
- env = make(map[string]string)
-
- // envi maps from an environment variable to its index in envs.
- // TODO: remove this? golang.org/issue/8849
- envi = make(map[string]int)
-
- // envs contains elements of env in the form "key=value".
- // empty strings mean deleted.
- envs []string
-
errZeroLengthKey = errors.New("zero length key")
errShortWrite = errors.New("i/o count too small")
)
@@ -70,47 +51,14 @@ func writeenv(key, value string) error {
return nil
}
-func copyenv() {
- fd, err := Open("/env", O_RDONLY)
- if err != nil {
- return
- }
- defer Close(fd)
- files, err := readdirnames(fd)
- if err != nil {
- return
- }
- envs = make([]string, len(files))
- i := 0
- for _, key := range files {
- v, err := readenv(key)
- if err != nil {
- continue
- }
- env[key] = v
- envs[i] = key + "=" + v
- envi[key] = i
- i++
- }
-}
-
func Getenv(key string) (value string, found bool) {
if len(key) == 0 {
return "", false
}
-
- envLock.RLock()
- defer envLock.RUnlock()
-
- if v, ok := env[key]; ok {
- return v, true
- }
v, err := readenv(key)
if err != nil {
return "", false
}
- env[key] = v
- envs = append(envs, key+"="+v)
return v, true
}
@@ -118,27 +66,14 @@ func Setenv(key, value string) error {
if len(key) == 0 {
return errZeroLengthKey
}
-
- envLock.Lock()
- defer envLock.Unlock()
-
err := writeenv(key, value)
if err != nil {
return err
}
- env[key] = value
- envs = append(envs, key+"="+value)
- envi[key] = len(envs) - 1
return nil
}
func Clearenv() {
- envLock.Lock()
- defer envLock.Unlock()
-
- env = make(map[string]string)
- envi = make(map[string]int)
- envs = []string{}
RawSyscall(SYS_RFORK, RFCENVG, 0, 0)
}
@@ -146,30 +81,28 @@ func Unsetenv(key string) error {
if len(key) == 0 {
return errZeroLengthKey
}
-
- envLock.Lock()
- defer envLock.Unlock()
-
Remove("/env/" + key)
-
- if i, ok := envi[key]; ok {
- delete(env, key)
- delete(envi, key)
- envs[i] = ""
- }
return nil
}
func Environ() []string {
- envLock.RLock()
- defer envLock.RUnlock()
+ fd, err := Open("/env", O_RDONLY)
+ if err != nil {
+ return nil
+ }
+ defer Close(fd)
+ files, err := readdirnames(fd)
+ if err != nil {
+ return nil
+ }
+ ret := make([]string, 0, len(files))
- envOnce.Do(copyenv)
- ret := make([]string, 0, len(envs))
- for _, pair := range envs {
- if pair != "" {
- ret = append(ret, pair)
+ for _, key := range files {
+ v, err := readenv(key)
+ if err != nil {
+ continue
}
+ ret = append(ret, key+"="+v)
}
return ret
}
diff --git a/src/syscall/fs_nacl.go b/src/syscall/fs_nacl.go
index 4abc9b81c..6e6ce2ab7 100644
--- a/src/syscall/fs_nacl.go
+++ b/src/syscall/fs_nacl.go
@@ -818,6 +818,12 @@ func create(name string, mode uint32, sec int64, data []byte) error {
fs.mu.Unlock()
f, err := fs.open(name, O_CREATE|O_EXCL, mode)
if err != nil {
+ if mode&S_IFMT == S_IFDIR {
+ ip, _, err := fs.namei(name, false)
+ if err == nil && (ip.Mode&S_IFMT) == S_IFDIR {
+ return nil // directory already exists
+ }
+ }
return err
}
ip := f.(*fsysFile).inode
diff --git a/src/syscall/route_bsd.go b/src/syscall/route_bsd.go
index 48af58745..1dabe4253 100644
--- a/src/syscall/route_bsd.go
+++ b/src/syscall/route_bsd.go
@@ -153,7 +153,7 @@ func (m *InterfaceAddrMessage) sockaddr() (sas []Sockaddr) {
// RTAX_NETMASK socket address on the FreeBSD kernel.
preferredFamily := uint8(AF_UNSPEC)
for i := uint(0); i < RTAX_MAX; i++ {
- if m.Header.Addrs&rtaIfaMask&(1<<i) == 0 {
+ if m.Header.Addrs&(1<<i) == 0 {
continue
}
rsa := (*RawSockaddr)(unsafe.Pointer(&b[0]))
diff --git a/src/testing/testing.go b/src/testing/testing.go
index f91d860a9..e54a3b8ce 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -620,6 +620,7 @@ func after() {
fmt.Fprintf(os.Stderr, "testing: %s\n", err)
os.Exit(2)
}
+ runtime.GC() // materialize all statistics
if err = pprof.WriteHeapProfile(f); err != nil {
fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err)
os.Exit(2)
diff --git a/src/text/template/exec.go b/src/text/template/exec.go
index f6eed662b..b00e10c7e 100644
--- a/src/text/template/exec.go
+++ b/src/text/template/exec.go
@@ -546,7 +546,7 @@ func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, a
argv := make([]reflect.Value, numIn)
// Args must be evaluated. Fixed args first.
i := 0
- for ; i < numFixed; i++ {
+ for ; i < numFixed && i < len(args); i++ {
argv[i] = s.evalArg(dot, typ.In(i), args[i])
}
// Now the ... args.
diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go
index e2cf2d370..69c213ed2 100644
--- a/src/text/template/exec_test.go
+++ b/src/text/template/exec_test.go
@@ -893,6 +893,18 @@ func TestMessageForExecuteEmpty(t *testing.T) {
}
}
+func TestFinalForPrintf(t *testing.T) {
+ tmpl, err := New("").Parse(`{{"x" | printf}}`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b bytes.Buffer
+ err = tmpl.Execute(&b, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
type cmpTest struct {
expr string
truth string
diff --git a/src/time/sleep.go b/src/time/sleep.go
index 61660d14f..e7a2ee205 100644
--- a/src/time/sleep.go
+++ b/src/time/sleep.go
@@ -55,6 +55,9 @@ type Timer struct {
// Stop does not close the channel, to prevent a read from the channel succeeding
// incorrectly.
func (t *Timer) Stop() bool {
+ if t.r.f == nil {
+ panic("time: Stop called on uninitialized Timer")
+ }
return stopTimer(&t.r)
}
@@ -78,6 +81,9 @@ func NewTimer(d Duration) *Timer {
// It returns true if the timer had been active, false if the timer had
// expired or been stopped.
func (t *Timer) Reset(d Duration) bool {
+ if t.r.f == nil {
+ panic("time: Reset called on uninitialized Timer")
+ }
w := when(d)
active := stopTimer(&t.r)
t.r.when = w
diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go
index 2cfb6a59c..c9b2956b7 100644
--- a/src/time/sleep_test.go
+++ b/src/time/sleep_test.go
@@ -9,6 +9,7 @@ import (
"fmt"
"runtime"
"sort"
+ "strings"
"sync"
"sync/atomic"
"testing"
@@ -407,3 +408,23 @@ func TestOverflowRuntimeTimer(t *testing.T) {
// the end of CheckRuntimeTimerOverflow in internal_test.go.
CheckRuntimeTimerOverflow()
}
+
+func checkZeroPanicString(t *testing.T) {
+ e := recover()
+ s, _ := e.(string)
+ if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
+ t.Errorf("panic = %v; want substring %q", e, want)
+ }
+}
+
+func TestZeroTimerResetPanics(t *testing.T) {
+ defer checkZeroPanicString(t)
+ var tr Timer
+ tr.Reset(1)
+}
+
+func TestZeroTimerStopPanics(t *testing.T) {
+ defer checkZeroPanicString(t)
+ var tr Timer
+ tr.Stop()
+}
diff --git a/src/unicode/utf8/utf8.go b/src/unicode/utf8/utf8.go
index 253295ad3..9ac37184d 100644
--- a/src/unicode/utf8/utf8.go
+++ b/src/unicode/utf8/utf8.go
@@ -211,8 +211,11 @@ func FullRuneInString(s string) bool {
return !short
}
-// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and its width in bytes.
-// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8.
+// DecodeRune unpacks the first UTF-8 encoding in p and returns the rune and
+// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// the encoding is invalid, it returns (RuneError, 1). Both are impossible
+// results for correct UTF-8.
+//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
@@ -221,8 +224,10 @@ func DecodeRune(p []byte) (r rune, size int) {
return
}
-// DecodeRuneInString is like DecodeRune but its input is a string.
-// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8.
+// DecodeRuneInString is like DecodeRune but its input is a string. If s is
+// empty it returns (RuneError, 0). Otherwise, if the encoding is invalid, it
+// returns (RuneError, 1). Both are impossible results for correct UTF-8.
+//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
@@ -231,8 +236,11 @@ func DecodeRuneInString(s string) (r rune, size int) {
return
}
-// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and its width in bytes.
-// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8.
+// DecodeLastRune unpacks the last UTF-8 encoding in p and returns the rune and
+// its width in bytes. If p is empty it returns (RuneError, 0). Otherwise, if
+// the encoding is invalid, it returns (RuneError, 1). Both are impossible
+// results for correct UTF-8.
+//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
@@ -268,8 +276,10 @@ func DecodeLastRune(p []byte) (r rune, size int) {
return r, size
}
-// DecodeLastRuneInString is like DecodeLastRune but its input is a string.
-// If the encoding is invalid, it returns (RuneError, 1), an impossible result for correct UTF-8.
+// DecodeLastRuneInString is like DecodeLastRune but its input is a string. If
+// s is empty it returns (RuneError, 0). Otherwise, if the encoding is invalid,
+// it returns (RuneError, 1). Both are impossible results for correct UTF-8.
+//
// An encoding is invalid if it is incorrect UTF-8, encodes a rune that is
// out of range, or is not the shortest possible UTF-8 encoding for the
// value. No other validation is performed.
diff --git a/src/unsafe/unsafe.go b/src/unsafe/unsafe.go
index 83b2e1405..79499b295 100644
--- a/src/unsafe/unsafe.go
+++ b/src/unsafe/unsafe.go
@@ -4,6 +4,9 @@
/*
Package unsafe contains operations that step around the type safety of Go programs.
+
+ Packages that import unsafe may be non-portable and are not protected by the
+ Go 1 compatibility guidelines.
*/
package unsafe