summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/bufio/scan.go13
-rw-r--r--src/bufio/scan_test.go67
-rw-r--r--src/cmd/5g/reg.c2
-rw-r--r--src/cmd/api/goapi.go1
-rw-r--r--src/cmd/dist/build.c9
-rw-r--r--src/cmd/gc/builtin.c31
-rw-r--r--src/cmd/gc/go.h1
-rw-r--r--src/cmd/gc/lex.c4
-rw-r--r--src/cmd/gc/plive.c19
-rw-r--r--src/cmd/gc/popt.c4
-rw-r--r--src/cmd/gc/reflect.c8
-rw-r--r--src/cmd/gc/runtime.go33
-rw-r--r--src/cmd/gc/typecheck.c6
-rw-r--r--src/cmd/gc/walk.c63
-rw-r--r--src/cmd/go/build.go10
-rw-r--r--src/cmd/go/doc.go2
-rw-r--r--src/cmd/go/pkg.go24
-rwxr-xr-xsrc/cmd/go/test.bash18
-rw-r--r--src/cmd/go/tool.go2
-rw-r--r--src/cmd/go/vet.go2
-rw-r--r--src/cmd/internal/objfile/disasm.go248
-rw-r--r--src/cmd/internal/objfile/objfile.go14
-rw-r--r--src/cmd/objdump/main.go242
-rw-r--r--src/cmd/objdump/objdump_test.go102
-rw-r--r--src/cmd/pprof/README8
-rw-r--r--src/cmd/pprof/doc.go12
-rw-r--r--src/cmd/pprof/pprof.go41
-rw-r--r--src/compress/lzw/reader.go2
-rw-r--r--src/crypto/crypto.go12
-rw-r--r--src/debug/goobj/read_test.go2
-rw-r--r--src/net/http/cookiejar/jar.go2
-rw-r--r--src/net/http/serve_test.go1
-rw-r--r--src/os/exec/exec_test.go2
-rw-r--r--src/os/file_plan9.go3
-rw-r--r--src/os/file_unix.go1
-rw-r--r--src/os/file_windows.go1
-rw-r--r--src/reflect/type.go8
-rwxr-xr-xsrc/run.bash3
-rw-r--r--src/runtime/asm_386.s6
-rw-r--r--src/runtime/asm_amd64.s6
-rw-r--r--src/runtime/asm_amd64p32.s6
-rw-r--r--src/runtime/asm_power64x.s6
-rw-r--r--src/runtime/atomic.go38
-rw-r--r--src/runtime/cgo/dragonfly.c2
-rw-r--r--src/runtime/cgo/freebsd.c2
-rw-r--r--src/runtime/cgo/netbsd.c2
-rw-r--r--src/runtime/cgo/openbsd.c2
-rw-r--r--src/runtime/export_test.go2
-rw-r--r--src/runtime/gcinfo_test.go4
-rw-r--r--src/runtime/heapdump.c19
-rw-r--r--src/runtime/lfstack.c14
-rw-r--r--src/runtime/lfstack_test.go2
-rw-r--r--src/runtime/malloc.go74
-rw-r--r--src/runtime/malloc.h5
-rw-r--r--src/runtime/mcache.c2
-rw-r--r--src/runtime/mgc0.c1426
-rw-r--r--src/runtime/mgc0.go131
-rw-r--r--src/runtime/mgc0.h10
-rw-r--r--src/runtime/mprof.go43
-rw-r--r--src/runtime/os_android.c2
-rw-r--r--src/runtime/os_darwin.c3
-rw-r--r--src/runtime/os_dragonfly.c3
-rw-r--r--src/runtime/os_freebsd.c3
-rw-r--r--src/runtime/os_linux.c3
-rw-r--r--src/runtime/os_nacl.c3
-rw-r--r--src/runtime/os_netbsd.c3
-rw-r--r--src/runtime/os_openbsd.c3
-rw-r--r--src/runtime/os_plan9.c6
-rw-r--r--src/runtime/os_solaris.c3
-rw-r--r--src/runtime/print1.go30
-rw-r--r--src/runtime/proc.c47
-rw-r--r--src/runtime/proc.go3
-rw-r--r--src/runtime/runtime.h37
-rw-r--r--src/runtime/select.go7
-rw-r--r--src/runtime/sema.go1
-rw-r--r--src/runtime/stack.c83
-rw-r--r--src/runtime/string.c2
-rw-r--r--src/runtime/stubs.go41
-rw-r--r--src/runtime/sys_x86.c1
-rw-r--r--src/runtime/traceback.go22
-rw-r--r--src/runtime/wbfat.go190
-rw-r--r--src/runtime/wbfat_gen.go41
-rw-r--r--src/sync/atomic/atomic_test.go8
83 files changed, 2389 insertions, 981 deletions
diff --git a/src/bufio/scan.go b/src/bufio/scan.go
index a41451524..364d15961 100644
--- a/src/bufio/scan.go
+++ b/src/bufio/scan.go
@@ -36,6 +36,7 @@ type Scanner struct {
start int // First non-processed byte in buf.
end int // End of data in buf.
err error // Sticky error.
+ empties int // Count of successive empty tokens.
}
// SplitFunc is the signature of the split function used to tokenize the
@@ -108,6 +109,8 @@ func (s *Scanner) Text() string {
// After Scan returns false, the Err method will return any error that
// occurred during scanning, except that if it was io.EOF, Err
// will return nil.
+// Split panics if the split function returns 100 empty tokens without
+// advancing the input. This is a common error mode for scanners.
func (s *Scanner) Scan() bool {
// Loop until we have a token.
for {
@@ -125,6 +128,15 @@ func (s *Scanner) Scan() bool {
}
s.token = token
if token != nil {
+ if s.err == nil || advance > 0 {
+ s.empties = 0
+ } else {
+ // Returning tokens not advancing input at EOF.
+ s.empties++
+ if s.empties > 100 {
+ panic("bufio.Scan: 100 empty tokens without progressing")
+ }
+ }
return true
}
}
@@ -172,6 +184,7 @@ func (s *Scanner) Scan() bool {
break
}
if n > 0 {
+ s.empties = 0
break
}
loop++
diff --git a/src/bufio/scan_test.go b/src/bufio/scan_test.go
index 1454a8113..eea87cbf7 100644
--- a/src/bufio/scan_test.go
+++ b/src/bufio/scan_test.go
@@ -455,3 +455,70 @@ func TestEmptyTokens(t *testing.T) {
t.Fatal(err)
}
}
+
+func loopAtEOFSplit(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if len(data) > 0 {
+ return 1, data[:1], nil
+ }
+ return 0, data, nil
+}
+
+func TestDontLoopForever(t *testing.T) {
+ s := NewScanner(strings.NewReader("abc"))
+ s.Split(loopAtEOFSplit)
+ // Expect a panic
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Fatal("should have panicked")
+ }
+ if msg, ok := err.(string); !ok || !strings.Contains(msg, "empty tokens") {
+ panic(err)
+ }
+ }()
+ for count := 0; s.Scan(); count++ {
+ if count > 1000 {
+ t.Fatal("looping")
+ }
+ }
+ if s.Err() != nil {
+ t.Fatal("after scan:", s.Err())
+ }
+}
+
+func TestBlankLines(t *testing.T) {
+ s := NewScanner(strings.NewReader(strings.Repeat("\n", 1000)))
+ for count := 0; s.Scan(); count++ {
+ if count > 2000 {
+ t.Fatal("looping")
+ }
+ }
+ if s.Err() != nil {
+ t.Fatal("after scan:", s.Err())
+ }
+}
+
+type countdown int
+
+func (c *countdown) split(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if *c > 0 {
+ *c--
+ return 1, data[:1], nil
+ }
+ return 0, nil, nil
+}
+
+// Check that the looping-at-EOF check doesn't trigger for merely empty tokens.
+func TestEmptyLinesOK(t *testing.T) {
+ c := countdown(10000)
+ s := NewScanner(strings.NewReader(strings.Repeat("\n", 10000)))
+ s.Split(c.split)
+ for s.Scan() {
+ }
+ if s.Err() != nil {
+ t.Fatal("after scan:", s.Err())
+ }
+ if c != 0 {
+ t.Fatalf("stopped with %d left to process", c)
+ }
+}
diff --git a/src/cmd/5g/reg.c b/src/cmd/5g/reg.c
index 712841329..441792873 100644
--- a/src/cmd/5g/reg.c
+++ b/src/cmd/5g/reg.c
@@ -199,7 +199,7 @@ regopt(Prog *firstp)
proginfo(&info, p);
// Avoid making variables for direct-called functions.
- if(p->as == ABL && p->to.type == D_EXTERN)
+ if(p->as == ABL && p->to.name == D_EXTERN)
continue;
bit = mkvar(r, &p->from);
diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go
index 5a8c87603..e49ba33bb 100644
--- a/src/cmd/api/goapi.go
+++ b/src/cmd/api/goapi.go
@@ -405,6 +405,7 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) {
" note struct{};" +
" p struct{};" +
" parfor struct{};" +
+ " slice struct{};" +
" slicetype struct{};" +
" stkframe struct{};" +
" sudog struct{};" +
diff --git a/src/cmd/dist/build.c b/src/cmd/dist/build.c
index 8fd2e998a..9c81dd8b2 100644
--- a/src/cmd/dist/build.c
+++ b/src/cmd/dist/build.c
@@ -691,13 +691,6 @@ install(char *dir)
bpathf(&final_path, "%s/src/%s", goroot_final, dir);
name = lastelem(dir);
- // For misc/prof, copy into the tool directory and we're done.
- if(hasprefix(dir, "misc/")) {
- copyfile(bpathf(&b, "%s/%s", tooldir, name),
- bpathf(&b1, "%s/misc/%s", goroot, name), 1);
- goto out;
- }
-
// set up gcc command line on first run.
if(gccargs.len == 0) {
bprintf(&b, "%s %s", defaultcc, defaultcflags);
@@ -1328,8 +1321,6 @@ static char *buildorder[] = {
"libbio",
"liblink",
- "misc/pprof",
-
"cmd/cc", // must be before c
"cmd/gc", // must be before g
"cmd/%sl", // must be before a, c, g
diff --git a/src/cmd/gc/builtin.c b/src/cmd/gc/builtin.c
index fbca4ee5f..aeeadedca 100644
--- a/src/cmd/gc/builtin.c
+++ b/src/cmd/gc/builtin.c
@@ -24,6 +24,8 @@ char *runtimeimport =
"func @\"\".printslice (? any)\n"
"func @\"\".printnl ()\n"
"func @\"\".printsp ()\n"
+ "func @\"\".printlock ()\n"
+ "func @\"\".printunlock ()\n"
"func @\"\".concatstring2 (? string, ? string) (? string)\n"
"func @\"\".concatstring3 (? string, ? string, ? string) (? string)\n"
"func @\"\".concatstring4 (? string, ? string, ? string, ? string) (? string)\n"
@@ -86,10 +88,33 @@ char *runtimeimport =
"func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n"
"func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n"
"func @\"\".writebarrieriface (@\"\".dst·1 *any, @\"\".src·2 any)\n"
- "func @\"\".writebarrierfat2 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
- "func @\"\".writebarrierfat3 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
- "func @\"\".writebarrierfat4 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat01 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat10 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat11 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat0111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1000 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
+ "func @\"\".writebarrierfat1111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n"
"func @\"\".writebarrierfat (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n"
+ "func @\"\".writebarriercopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n"
"func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n"
"func @\"\".selectnbrecv (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".hchan·4 <-chan any) (? bool)\n"
"func @\"\".selectnbrecv2 (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".received·4 *bool, @\"\".hchan·5 <-chan any) (? bool)\n"
diff --git a/src/cmd/gc/go.h b/src/cmd/gc/go.h
index d3c4193b5..c695c5bf3 100644
--- a/src/cmd/gc/go.h
+++ b/src/cmd/gc/go.h
@@ -1466,6 +1466,7 @@ void walk(Node *fn);
void walkexpr(Node **np, NodeList **init);
void walkexprlist(NodeList *l, NodeList **init);
void walkexprlistsafe(NodeList *l, NodeList **init);
+void walkexprlistcheap(NodeList *l, NodeList **init);
void walkstmt(Node **np);
void walkstmtlist(NodeList *l);
Node* conv(Node*, Type*);
diff --git a/src/cmd/gc/lex.c b/src/cmd/gc/lex.c
index 2303b442c..523ba37aa 100644
--- a/src/cmd/gc/lex.c
+++ b/src/cmd/gc/lex.c
@@ -344,8 +344,8 @@ main(int argc, char *argv[])
break;
}
}
- if(j == nelem(debugtab))
- fatal("unknown debug information -d '%s'\n", f[i]);
+ if(debugtab[j].name == nil)
+ sysfatal("unknown debug information -d '%s'\n", f[i]);
}
}
diff --git a/src/cmd/gc/plive.c b/src/cmd/gc/plive.c
index 0feb2c710..3bfa69b1f 100644
--- a/src/cmd/gc/plive.c
+++ b/src/cmd/gc/plive.c
@@ -1092,7 +1092,7 @@ twobitwalktype1(Type *t, vlong *xoffset, Bvec *bv)
case TCOMPLEX64:
case TCOMPLEX128:
for(i = 0; i < t->width; i++) {
- bvset(bv, ((*xoffset + i) / widthptr) * BitsPerPointer); // 1 = live scalar
+ bvset(bv, ((*xoffset + i) / widthptr) * BitsPerPointer); // 1 = live scalar (BitsScalar)
}
*xoffset += t->width;
break;
@@ -1105,7 +1105,7 @@ twobitwalktype1(Type *t, vlong *xoffset, Bvec *bv)
case TMAP:
if((*xoffset & (widthptr-1)) != 0)
fatal("twobitwalktype1: invalid alignment, %T", t);
- bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr
+ bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr (BitsPointer)
*xoffset += t->width;
break;
@@ -1113,7 +1113,7 @@ twobitwalktype1(Type *t, vlong *xoffset, Bvec *bv)
// struct { byte *str; intgo len; }
if((*xoffset & (widthptr-1)) != 0)
fatal("twobitwalktype1: invalid alignment, %T", t);
- bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr in first slot
+ bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr in first slot (BitsPointer)
*xoffset += t->width;
break;
@@ -1123,15 +1123,8 @@ twobitwalktype1(Type *t, vlong *xoffset, Bvec *bv)
// struct { Type *type; union { void *ptr, uintptr val } data; }
if((*xoffset & (widthptr-1)) != 0)
fatal("twobitwalktype1: invalid alignment, %T", t);
- bvset(bv, ((*xoffset / widthptr) * BitsPerPointer) + 0);
- bvset(bv, ((*xoffset / widthptr) * BitsPerPointer) + 1); // 3 = multiword
- // next word contains 2 = Iface, 3 = Eface
- if(isnilinter(t)) {
- bvset(bv, ((*xoffset / widthptr) * BitsPerPointer) + 2);
- bvset(bv, ((*xoffset / widthptr) * BitsPerPointer) + 3);
- } else {
- bvset(bv, ((*xoffset / widthptr) * BitsPerPointer) + 3);
- }
+ bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr in first slot (BitsPointer)
+ bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 3); // 2 = live ptr in second slot (BitsPointer)
*xoffset += t->width;
break;
@@ -1144,7 +1137,7 @@ twobitwalktype1(Type *t, vlong *xoffset, Bvec *bv)
// struct { byte *array; uintgo len; uintgo cap; }
if((*xoffset & (widthptr-1)) != 0)
fatal("twobitwalktype1: invalid TARRAY alignment, %T", t);
- bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr in first slot
+ bvset(bv, (*xoffset / widthptr) * BitsPerPointer + 1); // 2 = live ptr in first slot (BitsPointer)
*xoffset += t->width;
} else
for(i = 0; i < t->bound; i++)
diff --git a/src/cmd/gc/popt.c b/src/cmd/gc/popt.c
index 993bb2482..6e6db88ef 100644
--- a/src/cmd/gc/popt.c
+++ b/src/cmd/gc/popt.c
@@ -847,6 +847,10 @@ nilopt(Prog *firstp)
Graph *g;
int ncheck, nkill;
+ // TODO(minux): nilopt on power64 throw away seemly random segment of code.
+ if(thechar == '9')
+ return;
+
g = flowstart(firstp, sizeof(NilFlow));
if(g == nil)
return;
diff --git a/src/cmd/gc/reflect.c b/src/cmd/gc/reflect.c
index b2ff2fbc5..0f8802abc 100644
--- a/src/cmd/gc/reflect.c
+++ b/src/cmd/gc/reflect.c
@@ -1525,11 +1525,9 @@ gengcprog1(ProgGen *g, Type *t, vlong *xoffset)
*xoffset += t->width;
break;
case TINTER:
- proggendata(g, BitsMultiWord);
- if(isnilinter(t))
- proggendata(g, BitsEface);
- else
- proggendata(g, BitsIface);
+ // Assuming IfacePointerOnly=1.
+ proggendata(g, BitsPointer);
+ proggendata(g, BitsPointer);
*xoffset += t->width;
break;
case TARRAY:
diff --git a/src/cmd/gc/runtime.go b/src/cmd/gc/runtime.go
index 0fb15c265..c6007714c 100644
--- a/src/cmd/gc/runtime.go
+++ b/src/cmd/gc/runtime.go
@@ -36,6 +36,8 @@ func printeface(any)
func printslice(any)
func printnl()
func printsp()
+func printlock()
+func printunlock()
func concatstring2(string, string) string
func concatstring3(string, string, string) string
@@ -115,10 +117,35 @@ func writebarrieriface(dst *any, src any)
// The unused *byte argument makes sure that src is 2-pointer-aligned,
// which is the maximum alignment on NaCl amd64p32
// (and possibly on 32-bit systems if we start 64-bit aligning uint64s).
-func writebarrierfat2(dst *any, _ *byte, src any)
-func writebarrierfat3(dst *any, _ *byte, src any)
-func writebarrierfat4(dst *any, _ *byte, src any)
+// The bitmap in the name tells which words being copied are pointers.
+func writebarrierfat01(dst *any, _ *byte, src any)
+func writebarrierfat10(dst *any, _ *byte, src any)
+func writebarrierfat11(dst *any, _ *byte, src any)
+func writebarrierfat001(dst *any, _ *byte, src any)
+func writebarrierfat010(dst *any, _ *byte, src any)
+func writebarrierfat011(dst *any, _ *byte, src any)
+func writebarrierfat100(dst *any, _ *byte, src any)
+func writebarrierfat101(dst *any, _ *byte, src any)
+func writebarrierfat110(dst *any, _ *byte, src any)
+func writebarrierfat111(dst *any, _ *byte, src any)
+func writebarrierfat0001(dst *any, _ *byte, src any)
+func writebarrierfat0010(dst *any, _ *byte, src any)
+func writebarrierfat0011(dst *any, _ *byte, src any)
+func writebarrierfat0100(dst *any, _ *byte, src any)
+func writebarrierfat0101(dst *any, _ *byte, src any)
+func writebarrierfat0110(dst *any, _ *byte, src any)
+func writebarrierfat0111(dst *any, _ *byte, src any)
+func writebarrierfat1000(dst *any, _ *byte, src any)
+func writebarrierfat1001(dst *any, _ *byte, src any)
+func writebarrierfat1010(dst *any, _ *byte, src any)
+func writebarrierfat1011(dst *any, _ *byte, src any)
+func writebarrierfat1100(dst *any, _ *byte, src any)
+func writebarrierfat1101(dst *any, _ *byte, src any)
+func writebarrierfat1110(dst *any, _ *byte, src any)
+func writebarrierfat1111(dst *any, _ *byte, src any)
+
func writebarrierfat(typ *byte, dst *any, src *any)
+func writebarriercopy(typ *byte, dst any, src any) int
func selectnbsend(chanType *byte, hchan chan<- any, elem *any) bool
func selectnbrecv(chanType *byte, elem *any, hchan <-chan any) bool
diff --git a/src/cmd/gc/typecheck.c b/src/cmd/gc/typecheck.c
index 714c66268..f05d8022d 100644
--- a/src/cmd/gc/typecheck.c
+++ b/src/cmd/gc/typecheck.c
@@ -2891,7 +2891,8 @@ typecheckas(Node *n)
case OSLICE3:
case OSLICESTR:
// For x = x[0:y], x can be updated in place, without touching pointer.
- if(samesafeexpr(n->left, n->right->left) && (n->right->right->left == N || iszero(n->right->right->left)))
+ // TODO(rsc): Reenable once it is actually updated in place without touching the pointer.
+ if(0 && samesafeexpr(n->left, n->right->left) && (n->right->right->left == N || iszero(n->right->right->left)))
n->right->reslice = 1;
break;
@@ -2899,7 +2900,8 @@ typecheckas(Node *n)
// For x = append(x, ...), x can be updated in place when there is capacity,
// without touching the pointer; otherwise the emitted code to growslice
// can take care of updating the pointer, and only in that case.
- if(n->right->list != nil && samesafeexpr(n->left, n->right->list->n))
+ // TODO(rsc): Reenable once the emitted code does update the pointer.
+ if(0 && n->right->list != nil && samesafeexpr(n->left, n->right->list->n))
n->right->reslice = 1;
break;
}
diff --git a/src/cmd/gc/walk.c b/src/cmd/gc/walk.c
index d4d0f449c..37bd62dea 100644
--- a/src/cmd/gc/walk.c
+++ b/src/cmd/gc/walk.c
@@ -6,6 +6,7 @@
#include <libc.h>
#include "go.h"
#include "../ld/textflag.h"
+#include "../../runtime/mgc0.h"
static Node* walkprint(Node*, NodeList**);
static Node* writebarrierfn(char*, Type*, Type*);
@@ -363,6 +364,15 @@ walkexprlistsafe(NodeList *l, NodeList **init)
}
void
+walkexprlistcheap(NodeList *l, NodeList **init)
+{
+ for(; l; l=l->next) {
+ l->n = cheapexpr(l->n, init);
+ walkexpr(&l->n, init);
+ }
+}
+
+void
walkexpr(Node **np, NodeList **init)
{
Node *r, *l, *var, *a;
@@ -1772,6 +1782,11 @@ walkprint(Node *nn, NodeList **init)
calls = nil;
notfirst = 0;
+ // Hoist all the argument evaluation up before the lock.
+ walkexprlistcheap(all, init);
+
+ calls = list(calls, mkcall("printlock", T, init));
+
for(l=all; l; l=l->next) {
if(notfirst) {
calls = list(calls, mkcall("printsp", T, init));
@@ -1852,6 +1867,9 @@ walkprint(Node *nn, NodeList **init)
if(op == OPRINTN)
calls = list(calls, mkcall("printnl", T, nil));
+
+ calls = list(calls, mkcall("printunlock", T, init));
+
typechecklist(calls, Etop);
walkexprlist(calls, init);
@@ -1988,6 +2006,9 @@ applywritebarrier(Node *n, NodeList **init)
{
Node *l, *r;
Type *t;
+ vlong x;
+ static Bvec *bv;
+ char name[32];
if(n->left && n->right && needwritebarrier(n->left, n->right)) {
t = n->left->type;
@@ -2005,14 +2026,35 @@ applywritebarrier(Node *n, NodeList **init)
} else if(isinter(t)) {
n = mkcall1(writebarrierfn("writebarrieriface", t, n->right->type), T, init,
l, n->right);
- } else if(t->width == 2*widthptr) {
- n = mkcall1(writebarrierfn("writebarrierfat2", t, n->right->type), T, init,
- l, nodnil(), n->right);
- } else if(t->width == 3*widthptr) {
- n = mkcall1(writebarrierfn("writebarrierfat3", t, n->right->type), T, init,
- l, nodnil(), n->right);
- } else if(t->width == 4*widthptr) {
- n = mkcall1(writebarrierfn("writebarrierfat4", t, n->right->type), T, init,
+ } else if(t->width <= 4*widthptr) {
+ x = 0;
+ if(bv == nil)
+ bv = bvalloc(BitsPerPointer*4);
+ bvresetall(bv);
+ twobitwalktype1(t, &x, bv);
+ // The bvgets are looking for BitsPointer in successive slots.
+ enum {
+ PtrBit = 1,
+ };
+ if(BitsPointer != (1<<PtrBit))
+ fatal("wrong PtrBit");
+ switch(t->width/widthptr) {
+ default:
+ fatal("found writebarrierfat for %d-byte object of type %T", (int)t->width, t);
+ case 2:
+ snprint(name, sizeof name, "writebarrierfat%d%d",
+ bvget(bv, PtrBit), bvget(bv, BitsPerPointer+PtrBit));
+ break;
+ case 3:
+ snprint(name, sizeof name, "writebarrierfat%d%d%d",
+ bvget(bv, PtrBit), bvget(bv, BitsPerPointer+PtrBit), bvget(bv, 2*BitsPerPointer+PtrBit));
+ break;
+ case 4:
+ snprint(name, sizeof name, "writebarrierfat%d%d%d%d",
+ bvget(bv, PtrBit), bvget(bv, BitsPerPointer+PtrBit), bvget(bv, 2*BitsPerPointer+PtrBit), bvget(bv, 3*BitsPerPointer+PtrBit));
+ break;
+ }
+ n = mkcall1(writebarrierfn(name, t, n->right->type), T, init,
l, nodnil(), n->right);
} else {
r = n->right;
@@ -2874,6 +2916,11 @@ copyany(Node *n, NodeList **init, int runtimecall)
{
Node *nl, *nr, *nfrm, *nto, *nif, *nlen, *nwid, *fn;
NodeList *l;
+
+ if(haspointers(n->left->type->type)) {
+ fn = writebarrierfn("writebarriercopy", n->left->type, n->right->type);
+ return mkcall1(fn, n->type, init, typename(n->left->type->type), n->left, n->right);
+ }
if(runtimecall) {
if(n->right->type->etype == TSTRING)
diff --git a/src/cmd/go/build.go b/src/cmd/go/build.go
index 79a27116a..1dd4314da 100644
--- a/src/cmd/go/build.go
+++ b/src/cmd/go/build.go
@@ -1826,7 +1826,15 @@ func (gcToolchain) ld(b *builder, p *Package, out string, allactions []*action,
func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
inc := filepath.Join(goroot, "pkg", fmt.Sprintf("%s_%s", goos, goarch))
cfile = mkAbs(p.Dir, cfile)
- args := stringList(tool(archChar+"c"), "-F", "-V", "-w", "-trimpath", b.work, "-I", objdir, "-I", inc, "-o", ofile, buildCcflags, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, cfile)
+ warn := []string{"-w"}
+ if p.usesSwig() {
+ // When using SWIG, this compiler is only used to
+ // compile the C files generated by SWIG.
+ // We don't want warnings.
+ // See issue 9065 for details.
+ warn = nil
+ }
+ args := stringList(tool(archChar+"c"), "-F", "-V", warn, "-trimpath", b.work, "-I", objdir, "-I", inc, "-o", ofile, buildCcflags, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, cfile)
return b.run(p.Dir, p.ImportPath, nil, args)
}
diff --git a/src/cmd/go/doc.go b/src/cmd/go/doc.go
index cf3a54565..43a315944 100644
--- a/src/cmd/go/doc.go
+++ b/src/cmd/go/doc.go
@@ -590,7 +590,7 @@ Usage:
Vet runs the Go vet command on the packages named by the import paths.
-For more about vet, see 'godoc code.google.com/p/go.tools/cmd/vet'.
+For more about vet, see 'godoc golang.org/x/tools/cmd/vet'.
For more about specifying packages, see 'go help packages'.
To run the vet tool with specific options, run 'go tool vet'.
diff --git a/src/cmd/go/pkg.go b/src/cmd/go/pkg.go
index e17326442..b71feb7a6 100644
--- a/src/cmd/go/pkg.go
+++ b/src/cmd/go/pkg.go
@@ -383,9 +383,10 @@ func findInternal(path string) (index int, ok bool) {
type targetDir int
const (
- toRoot targetDir = iota // to bin dir inside package root (default)
- toTool // GOROOT/pkg/tool
- toBin // GOROOT/bin
+ toRoot targetDir = iota // to bin dir inside package root (default)
+ toTool // GOROOT/pkg/tool
+ toBin // GOROOT/bin
+ stalePath // the old import path; fail to build
)
// goTools is a map of Go program import path to install target directory.
@@ -398,10 +399,14 @@ var goTools = map[string]targetDir{
"cmd/nm": toTool,
"cmd/objdump": toTool,
"cmd/pack": toTool,
+ "cmd/pprof": toTool,
"cmd/yacc": toTool,
- "code.google.com/p/go.tools/cmd/cover": toTool,
- "code.google.com/p/go.tools/cmd/godoc": toBin,
- "code.google.com/p/go.tools/cmd/vet": toTool,
+ "golang.org/x/tools/cmd/cover": toTool,
+ "golang.org/x/tools/cmd/godoc": toBin,
+ "golang.org/x/tools/cmd/vet": toTool,
+ "code.google.com/p/go.tools/cmd/cover": stalePath,
+ "code.google.com/p/go.tools/cmd/godoc": stalePath,
+ "code.google.com/p/go.tools/cmd/vet": stalePath,
}
// expandScanner expands a scanner.List error into all the errors in the list.
@@ -462,6 +467,13 @@ func (p *Package) load(stk *importStack, bp *build.Package, err error) *Package
}
if p.Name == "main" {
+ // Report an error when the old code.google.com/p/go.tools paths are used.
+ if goTools[p.ImportPath] == stalePath {
+ newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1)
+ e := fmt.Sprintf("the %v command has moved; use %v instead.", p.ImportPath, newPath)
+ p.Error = &PackageError{Err: e}
+ return p
+ }
_, elem := filepath.Split(p.Dir)
full := buildContext.GOOS + "_" + buildContext.GOARCH + "/" + elem
if buildContext.GOOS != toolGOOS || buildContext.GOARCH != toolGOARCH {
diff --git a/src/cmd/go/test.bash b/src/cmd/go/test.bash
index 2b5230b1a..e0f066f18 100755
--- a/src/cmd/go/test.bash
+++ b/src/cmd/go/test.bash
@@ -433,20 +433,20 @@ TEST godoc installs into GOBIN
d=$(mktemp -d -t testgoXXX)
export GOPATH=$d
mkdir $d/gobin
-GOBIN=$d/gobin ./testgo get code.google.com/p/go.tools/cmd/godoc || ok=false
+GOBIN=$d/gobin ./testgo get golang.org/x/tools/cmd/godoc || ok=false
if [ ! -x $d/gobin/godoc ]; then
echo did not install godoc to '$GOBIN'
- GOBIN=$d/gobin ./testgo list -f 'Target: {{.Target}}' code.google.com/p/go.tools/cmd/godoc || true
+ GOBIN=$d/gobin ./testgo list -f 'Target: {{.Target}}' golang.org/x/tools/cmd/godoc || true
ok=false
fi
TEST godoc installs into GOROOT
GOROOT=$(./testgo env GOROOT)
rm -f $GOROOT/bin/godoc
-./testgo install code.google.com/p/go.tools/cmd/godoc || ok=false
+./testgo install golang.org/x/tools/cmd/godoc || ok=false
if [ ! -x $GOROOT/bin/godoc ]; then
echo did not install godoc to '$GOROOT/bin'
- ./testgo list -f 'Target: {{.Target}}' code.google.com/p/go.tools/cmd/godoc || true
+ ./testgo list -f 'Target: {{.Target}}' golang.org/x/tools/cmd/godoc || true
ok=false
fi
@@ -561,8 +561,8 @@ fi
TEST without GOPATH, go get fails
d=$(mktemp -d -t testgoXXX)
mkdir -p $d/src
-if GOPATH= GOROOT=$d ./testgo get -d code.google.com/p/go.codereview/cmd/hgpatch ; then
- echo 'go get code.google.com/p/go.codereview/cmd/hgpatch should not succeed with $GOPATH unset'
+if GOPATH= GOROOT=$d ./testgo get -d golang.org/x/codereview/cmd/hgpatch ; then
+ echo 'go get golang.org/x/codereview/cmd/hgpatch should not succeed with $GOPATH unset'
ok=false
fi
rm -rf $d
@@ -571,8 +571,8 @@ rm -rf $d
TEST with GOPATH=GOROOT, go get fails
d=$(mktemp -d -t testgoXXX)
mkdir -p $d/src
-if GOPATH=$d GOROOT=$d ./testgo get -d code.google.com/p/go.codereview/cmd/hgpatch ; then
- echo 'go get code.google.com/p/go.codereview/cmd/hgpatch should not succeed with GOPATH=$GOROOT'
+if GOPATH=$d GOROOT=$d ./testgo get -d golang.org/x/codereview/cmd/hgpatch ; then
+ echo 'go get golang.org/x/codereview/cmd/hgpatch should not succeed with GOPATH=$GOROOT'
ok=false
fi
rm -rf $d
@@ -728,7 +728,7 @@ elif ! grep "case-insensitive file name collision" $d/out >/dev/null; then
fi
TEST go get cover
-./testgo get code.google.com/p/go.tools/cmd/cover || ok=false
+./testgo get golang.org/x/tools/cmd/cover || ok=false
unset GOPATH
rm -rf $d
diff --git a/src/cmd/go/tool.go b/src/cmd/go/tool.go
index 6d26f7a4b..c96161e0f 100644
--- a/src/cmd/go/tool.go
+++ b/src/cmd/go/tool.go
@@ -53,7 +53,7 @@ func tool(toolName string) string {
// Give a nice message if there is no tool with that name.
if _, err := os.Stat(toolPath); err != nil {
if isInGoToolsRepo(toolName) {
- fmt.Fprintf(os.Stderr, "go tool: no such tool %q; to install:\n\tgo get code.google.com/p/go.tools/cmd/%s\n", toolName, toolName)
+ fmt.Fprintf(os.Stderr, "go tool: no such tool %q; to install:\n\tgo get golang.org/x/tools/cmd/%s\n", toolName, toolName)
} else {
fmt.Fprintf(os.Stderr, "go tool: no such tool %q\n", toolName)
}
diff --git a/src/cmd/go/vet.go b/src/cmd/go/vet.go
index de7befc61..02ff54b2a 100644
--- a/src/cmd/go/vet.go
+++ b/src/cmd/go/vet.go
@@ -17,7 +17,7 @@ var cmdVet = &Command{
Long: `
Vet runs the Go vet command on the packages named by the import paths.
-For more about vet, see 'godoc code.google.com/p/go.tools/cmd/vet'.
+For more about vet, see 'godoc golang.org/x/tools/cmd/vet'.
For more about specifying packages, see 'go help packages'.
To run the vet tool with specific options, run 'go tool vet'.
diff --git a/src/cmd/internal/objfile/disasm.go b/src/cmd/internal/objfile/disasm.go
new file mode 100644
index 000000000..1a339c321
--- /dev/null
+++ b/src/cmd/internal/objfile/disasm.go
@@ -0,0 +1,248 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objfile
+
+import (
+ "bufio"
+ "debug/gosym"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "regexp"
+ "sort"
+ "strings"
+ "text/tabwriter"
+
+ "cmd/internal/rsc.io/arm/armasm"
+ "cmd/internal/rsc.io/x86/x86asm"
+)
+
+// Disasm is a disassembler for a given File.
+type Disasm struct {
+ syms []Sym //symbols in file, sorted by address
+ pcln *gosym.Table // pcln table
+ text []byte // bytes of text segment (actual instructions)
+ textStart uint64 // start PC of text
+ textEnd uint64 // end PC of text
+ goarch string // GOARCH string
+ disasm disasmFunc // disassembler function for goarch
+ byteOrder binary.ByteOrder // byte order for goarch
+}
+
+// Disasm returns a disassembler for the file f.
+func (f *File) Disasm() (*Disasm, error) {
+ syms, err := f.Symbols()
+ if err != nil {
+ return nil, err
+ }
+
+ pcln, err := f.PCLineTable()
+ if err != nil {
+ return nil, err
+ }
+
+ textStart, textBytes, err := f.Text()
+ if err != nil {
+ return nil, err
+ }
+
+ goarch := f.GOARCH()
+ disasm := disasms[goarch]
+ byteOrder := byteOrders[goarch]
+ if disasm == nil || byteOrder == nil {
+ return nil, fmt.Errorf("unsupported architecture")
+ }
+
+ // Filter out section symbols, overwriting syms in place.
+ keep := syms[:0]
+ for _, sym := range syms {
+ switch sym.Name {
+ case "runtime.text", "text", "_text", "runtime.etext", "etext", "_etext":
+ // drop
+ default:
+ keep = append(keep, sym)
+ }
+ }
+ syms = keep
+ d := &Disasm{
+ syms: syms,
+ pcln: pcln,
+ text: textBytes,
+ textStart: textStart,
+ textEnd: textStart + uint64(len(textBytes)),
+ goarch: goarch,
+ disasm: disasm,
+ byteOrder: byteOrder,
+ }
+
+ return d, nil
+}
+
+// lookup finds the symbol name containing addr.
+func (d *Disasm) lookup(addr uint64) (name string, base uint64) {
+ i := sort.Search(len(d.syms), func(i int) bool { return addr < d.syms[i].Addr })
+ if i > 0 {
+ s := d.syms[i-1]
+ if s.Addr != 0 && s.Addr <= addr && addr < s.Addr+uint64(s.Size) {
+ return s.Name, s.Addr
+ }
+ }
+ return "", 0
+}
+
+// base returns the final element in the path.
+// It works on both Windows and Unix paths,
+// regardless of host operating system.
+func base(path string) string {
+ path = path[strings.LastIndex(path, "/")+1:]
+ path = path[strings.LastIndex(path, `\`)+1:]
+ return path
+}
+
+// Print prints a disassembly of the file to w.
+// If filter is non-nil, the disassembly only includes functions with names matching filter.
+// The disassembly only includes functions that overlap the range [start, end).
+func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64) {
+ if start < d.textStart {
+ start = d.textStart
+ }
+ if end > d.textEnd {
+ end = d.textEnd
+ }
+ printed := false
+ bw := bufio.NewWriter(w)
+ for _, sym := range d.syms {
+ symStart := sym.Addr
+ symEnd := sym.Addr + uint64(sym.Size)
+ if sym.Code != 'T' && sym.Code != 't' ||
+ symStart < d.textStart ||
+ symEnd <= start || end <= symStart ||
+ filter != nil && !filter.MatchString(sym.Name) {
+ continue
+ }
+ if printed {
+ fmt.Fprintf(bw, "\n")
+ }
+ printed = true
+
+ file, _, _ := d.pcln.PCToLine(sym.Addr)
+ fmt.Fprintf(bw, "TEXT %s(SB) %s\n", sym.Name, file)
+
+ tw := tabwriter.NewWriter(bw, 1, 8, 1, '\t', 0)
+ if symEnd > end {
+ symEnd = end
+ }
+ code := d.text[:end-d.textStart]
+ d.Decode(symStart, symEnd, func(pc, size uint64, file string, line int, text string) {
+ i := pc - d.textStart
+ fmt.Fprintf(tw, "\t%s:%d\t%#x\t", base(file), line, pc)
+ if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" {
+ // Print instruction as bytes.
+ fmt.Fprintf(tw, "%x", code[i:i+size])
+ } else {
+ // Print instruction as 32-bit words.
+ for j := uint64(0); j < size; j += 4 {
+ if j > 0 {
+ fmt.Fprintf(tw, " ")
+ }
+ fmt.Fprintf(tw, "%08x", d.byteOrder.Uint32(code[i+j:]))
+ }
+ }
+ fmt.Fprintf(tw, "\t%s\n", text)
+ })
+ tw.Flush()
+ }
+ bw.Flush()
+}
+
+// Decode disassembles the text segment range [start, end), calling f for each instruction.
+func (d *Disasm) Decode(start, end uint64, f func(pc, size uint64, file string, line int, text string)) {
+ if start < d.textStart {
+ start = d.textStart
+ }
+ if end > d.textEnd {
+ end = d.textEnd
+ }
+ code := d.text[:end-d.textStart]
+ lookup := d.lookup
+ for pc := start; pc < end; {
+ i := pc - d.textStart
+ text, size := d.disasm(code[i:], pc, lookup)
+ file, line, _ := d.pcln.PCToLine(pc)
+ f(pc, uint64(size), file, line, text)
+ pc += uint64(size)
+ }
+}
+
+type lookupFunc func(addr uint64) (sym string, base uint64)
+type disasmFunc func(code []byte, pc uint64, lookup lookupFunc) (text string, size int)
+
+func disasm_386(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ return disasm_x86(code, pc, lookup, 32)
+}
+
+func disasm_amd64(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ return disasm_x86(code, pc, lookup, 64)
+}
+
+func disasm_x86(code []byte, pc uint64, lookup lookupFunc, arch int) (string, int) {
+ inst, err := x86asm.Decode(code, 64)
+ var text string
+ size := inst.Len
+ if err != nil || size == 0 || inst.Op == 0 {
+ size = 1
+ text = "?"
+ } else {
+ text = x86asm.Plan9Syntax(inst, pc, lookup)
+ }
+ return text, size
+}
+
+type textReader struct {
+ code []byte
+ pc uint64
+}
+
+func (r textReader) ReadAt(data []byte, off int64) (n int, err error) {
+ if off < 0 || uint64(off) < r.pc {
+ return 0, io.EOF
+ }
+ d := uint64(off) - r.pc
+ if d >= uint64(len(r.code)) {
+ return 0, io.EOF
+ }
+ n = copy(data, r.code[d:])
+ if n < len(data) {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+func disasm_arm(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ inst, err := armasm.Decode(code, armasm.ModeARM)
+ var text string
+ size := inst.Len
+ if err != nil || size == 0 || inst.Op == 0 {
+ size = 4
+ text = "?"
+ } else {
+ text = armasm.Plan9Syntax(inst, pc, lookup, textReader{code, pc})
+ }
+ return text, size
+}
+
+var disasms = map[string]disasmFunc{
+ "386": disasm_386,
+ "amd64": disasm_amd64,
+ "arm": disasm_arm,
+}
+
+var byteOrders = map[string]binary.ByteOrder{
+ "386": binary.LittleEndian,
+ "amd64": binary.LittleEndian,
+ "arm": binary.LittleEndian,
+ "power64": binary.BigEndian,
+ "power64le": binary.LittleEndian,
+}
diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go
index 3d4a5d27c..9227ef387 100644
--- a/src/cmd/internal/objfile/objfile.go
+++ b/src/cmd/internal/objfile/objfile.go
@@ -9,6 +9,7 @@ import (
"debug/gosym"
"fmt"
"os"
+ "sort"
)
type rawFile interface {
@@ -62,9 +63,20 @@ func (f *File) Close() error {
}
func (f *File) Symbols() ([]Sym, error) {
- return f.raw.symbols()
+ syms, err := f.raw.symbols()
+ if err != nil {
+ return nil, err
+ }
+ sort.Sort(byAddr(syms))
+ return syms, nil
}
+type byAddr []Sym
+
+func (x byAddr) Less(i, j int) bool { return x[i].Addr < x[j].Addr }
+func (x byAddr) Len() int { return len(x) }
+func (x byAddr) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
func (f *File) PCLineTable() (*gosym.Table, error) {
textStart, symtab, pclntab, err := f.raw.pcln()
if err != nil {
diff --git a/src/cmd/objdump/main.go b/src/cmd/objdump/main.go
index 0f125c98b..708a85370 100644
--- a/src/cmd/objdump/main.go
+++ b/src/cmd/objdump/main.go
@@ -32,24 +32,15 @@
package main
import (
- "bufio"
- "debug/gosym"
- "encoding/binary"
"flag"
"fmt"
- "io"
"log"
"os"
"regexp"
- "sort"
"strconv"
"strings"
- "text/tabwriter"
"cmd/internal/objfile"
-
- "cmd/internal/rsc.io/arm/armasm"
- "cmd/internal/rsc.io/x86/x86asm"
)
var symregexp = flag.String("s", "", "only dump symbols matching this regexp")
@@ -87,227 +78,30 @@ func main() {
log.Fatal(err)
}
- syms, err := f.Symbols()
+ dis, err := f.Disasm()
if err != nil {
- log.Fatalf("reading %s: %v", flag.Arg(0), err)
+ log.Fatal("disassemble %s: %v", flag.Arg(0), err)
}
- tab, err := f.PCLineTable()
- if err != nil {
- log.Fatalf("reading %s: %v", flag.Arg(0), err)
- }
-
- textStart, textBytes, err := f.Text()
- if err != nil {
- log.Fatalf("reading %s: %v", flag.Arg(0), err)
- }
-
- goarch := f.GOARCH()
-
- disasm := disasms[goarch]
- if disasm == nil {
- log.Fatalf("reading %s: unknown architecture", flag.Arg(0))
- }
-
- // Filter out section symbols, overwriting syms in place.
- keep := syms[:0]
- for _, sym := range syms {
- switch sym.Name {
- case "runtime.text", "text", "_text", "runtime.etext", "etext", "_etext":
- // drop
- default:
- keep = append(keep, sym)
- }
- }
- syms = keep
-
- sort.Sort(ByAddr(syms))
- lookup := func(addr uint64) (string, uint64) {
- i := sort.Search(len(syms), func(i int) bool { return addr < syms[i].Addr })
- if i > 0 {
- s := syms[i-1]
- if s.Addr != 0 && s.Addr <= addr && addr < s.Addr+uint64(s.Size) {
- return s.Name, s.Addr
- }
- }
- return "", 0
- }
-
- if flag.NArg() == 1 {
- // disassembly of entire object - our format
- dump(tab, lookup, disasm, goarch, syms, textBytes, textStart)
+ switch flag.NArg() {
+ default:
+ usage()
+ case 1:
+ // disassembly of entire object
+ dis.Print(os.Stdout, symRE, 0, ^uint64(0))
os.Exit(0)
- }
-
- // disassembly of specific piece of object - gnu objdump format for pprof
- gnuDump(tab, lookup, disasm, textBytes, textStart)
- os.Exit(0)
-}
-
-// base returns the final element in the path.
-// It works on both Windows and Unix paths.
-func base(path string) string {
- path = path[strings.LastIndex(path, "/")+1:]
- path = path[strings.LastIndex(path, `\`)+1:]
- return path
-}
-
-func dump(tab *gosym.Table, lookup lookupFunc, disasm disasmFunc, goarch string, syms []objfile.Sym, textData []byte, textStart uint64) {
- stdout := bufio.NewWriter(os.Stdout)
- defer stdout.Flush()
-
- printed := false
- for _, sym := range syms {
- if (sym.Code != 'T' && sym.Code != 't') || sym.Size == 0 || sym.Addr < textStart || symRE != nil && !symRE.MatchString(sym.Name) {
- continue
- }
- if sym.Addr >= textStart+uint64(len(textData)) || sym.Addr+uint64(sym.Size) > textStart+uint64(len(textData)) {
- break
- }
- if printed {
- fmt.Fprintf(stdout, "\n")
- } else {
- printed = true
- }
- file, _, _ := tab.PCToLine(sym.Addr)
- fmt.Fprintf(stdout, "TEXT %s(SB) %s\n", sym.Name, file)
- tw := tabwriter.NewWriter(stdout, 1, 8, 1, '\t', 0)
- start := sym.Addr
- end := sym.Addr + uint64(sym.Size)
- for pc := start; pc < end; {
- i := pc - textStart
- text, size := disasm(textData[i:end-textStart], pc, lookup)
- file, line, _ := tab.PCToLine(pc)
-
- // ARM is word-based, so show actual word hex, not byte hex.
- // Since ARM is little endian, they're different.
- if goarch == "arm" && size == 4 {
- fmt.Fprintf(tw, "\t%s:%d\t%#x\t%08x\t%s\n", base(file), line, pc, binary.LittleEndian.Uint32(textData[i:i+uint64(size)]), text)
- } else {
- fmt.Fprintf(tw, "\t%s:%d\t%#x\t%x\t%s\n", base(file), line, pc, textData[i:i+uint64(size)], text)
- }
- pc += uint64(size)
- }
- tw.Flush()
- }
-}
-
-func disasm_386(code []byte, pc uint64, lookup lookupFunc) (string, int) {
- return disasm_x86(code, pc, lookup, 32)
-}
-
-func disasm_amd64(code []byte, pc uint64, lookup lookupFunc) (string, int) {
- return disasm_x86(code, pc, lookup, 64)
-}
-
-func disasm_x86(code []byte, pc uint64, lookup lookupFunc, arch int) (string, int) {
- inst, err := x86asm.Decode(code, 64)
- var text string
- size := inst.Len
- if err != nil || size == 0 || inst.Op == 0 {
- size = 1
- text = "?"
- } else {
- text = x86asm.Plan9Syntax(inst, pc, lookup)
- }
- return text, size
-}
-
-type textReader struct {
- code []byte
- pc uint64
-}
-
-func (r textReader) ReadAt(data []byte, off int64) (n int, err error) {
- if off < 0 || uint64(off) < r.pc {
- return 0, io.EOF
- }
- d := uint64(off) - r.pc
- if d >= uint64(len(r.code)) {
- return 0, io.EOF
- }
- n = copy(data, r.code[d:])
- if n < len(data) {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-func disasm_arm(code []byte, pc uint64, lookup lookupFunc) (string, int) {
- inst, err := armasm.Decode(code, armasm.ModeARM)
- var text string
- size := inst.Len
- if err != nil || size == 0 || inst.Op == 0 {
- size = 4
- text = "?"
- } else {
- text = armasm.Plan9Syntax(inst, pc, lookup, textReader{code, pc})
- }
- return text, size
-}
-
-var disasms = map[string]disasmFunc{
- "386": disasm_386,
- "amd64": disasm_amd64,
- "arm": disasm_arm,
-}
-
-func gnuDump(tab *gosym.Table, lookup lookupFunc, disasm disasmFunc, textData []byte, textStart uint64) {
- start, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(1), "0x"), 16, 64)
- if err != nil {
- log.Fatalf("invalid start PC: %v", err)
- }
- end, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(2), "0x"), 16, 64)
- if err != nil {
- log.Fatalf("invalid end PC: %v", err)
- }
- if start < textStart {
- start = textStart
- }
- if end < start {
- end = start
- }
- if end > textStart+uint64(len(textData)) {
- end = textStart + uint64(len(textData))
- }
-
- stdout := bufio.NewWriter(os.Stdout)
- defer stdout.Flush()
-
- // For now, find spans of same PC/line/fn and
- // emit them as having dummy instructions.
- var (
- spanPC uint64
- spanFile string
- spanLine int
- spanFn *gosym.Func
- )
- flush := func(endPC uint64) {
- if spanPC == 0 {
- return
- }
- fmt.Fprintf(stdout, "%s:%d\n", spanFile, spanLine)
- for pc := spanPC; pc < endPC; {
- text, size := disasm(textData[pc-textStart:], pc, lookup)
- fmt.Fprintf(stdout, " %x: %s\n", pc, text)
- pc += uint64(size)
+ case 3:
+ // disassembly of PC range
+ start, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(1), "0x"), 16, 64)
+ if err != nil {
+ log.Fatalf("invalid start PC: %v", err)
}
- spanPC = 0
- }
-
- for pc := start; pc < end; pc++ {
- file, line, fn := tab.PCToLine(pc)
- if file != spanFile || line != spanLine || fn != spanFn {
- flush(pc)
- spanPC, spanFile, spanLine, spanFn = pc, file, line, fn
+ end, err := strconv.ParseUint(strings.TrimPrefix(flag.Arg(2), "0x"), 16, 64)
+ if err != nil {
+ log.Fatalf("invalid end PC: %v", err)
}
+ dis.Print(os.Stdout, symRE, start, end)
+ os.Exit(0)
}
- flush(end)
}
-
-type ByAddr []objfile.Sym
-
-func (x ByAddr) Less(i, j int) bool { return x[i].Addr < x[j].Addr }
-func (x ByAddr) Len() int { return len(x) }
-func (x ByAddr) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go
index 5047f9aa8..bd09ae9f9 100644
--- a/src/cmd/objdump/objdump_test.go
+++ b/src/cmd/objdump/objdump_test.go
@@ -5,117 +5,15 @@
package main
import (
- "bufio"
- "bytes"
- "fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
- "strconv"
"strings"
"testing"
)
-func loadSyms(t *testing.T) map[string]string {
- switch runtime.GOOS {
- case "android", "nacl":
- t.Skipf("skipping on %s", runtime.GOOS)
- }
-
- cmd := exec.Command("go", "tool", "nm", os.Args[0])
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("go tool nm %v: %v\n%s", os.Args[0], err, string(out))
- }
- syms := make(map[string]string)
- scanner := bufio.NewScanner(bytes.NewReader(out))
- for scanner.Scan() {
- f := strings.Fields(scanner.Text())
- if len(f) < 3 {
- continue
- }
- syms[f[2]] = f[0]
- }
- if err := scanner.Err(); err != nil {
- t.Fatalf("error reading symbols: %v", err)
- }
- return syms
-}
-
-func runObjDump(t *testing.T, exe, startaddr, endaddr string) (path, lineno string) {
- switch runtime.GOOS {
- case "android", "nacl":
- t.Skipf("skipping on %s", runtime.GOOS)
- }
- switch runtime.GOARCH {
- case "power64", "power64le":
- t.Skipf("skipping on %s, issue 9039", runtime.GOARCH)
- }
-
- cmd := exec.Command(exe, os.Args[0], startaddr, endaddr)
- out, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("go tool objdump %v: %v\n%s", os.Args[0], err, string(out))
- }
- f := strings.Split(string(out), "\n")
- if len(f) < 1 {
- t.Fatal("objdump output must have at least one line")
- }
- pathAndLineNo := f[0]
- f = strings.Split(pathAndLineNo, ":")
- if runtime.GOOS == "windows" {
- switch len(f) {
- case 2:
- return f[0], f[1]
- case 3:
- return f[0] + ":" + f[1], f[2]
- default:
- t.Fatalf("no line number found in %q", pathAndLineNo)
- }
- }
- if len(f) != 2 {
- t.Fatalf("no line number found in %q", pathAndLineNo)
- }
- return f[0], f[1]
-}
-
-func testObjDump(t *testing.T, exe, startaddr, endaddr string, line int) {
- srcPath, srcLineNo := runObjDump(t, exe, startaddr, endaddr)
- fi1, err := os.Stat("objdump_test.go")
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
- fi2, err := os.Stat(srcPath)
- if err != nil {
- t.Fatalf("Stat failed: %v", err)
- }
- if !os.SameFile(fi1, fi2) {
- t.Fatalf("objdump_test.go and %s are not same file", srcPath)
- }
- if srcLineNo != fmt.Sprint(line) {
- t.Fatalf("line number = %v; want %d", srcLineNo, line)
- }
-}
-
-func TestObjDump(t *testing.T) {
- _, _, line, _ := runtime.Caller(0)
- syms := loadSyms(t)
-
- tmp, exe := buildObjdump(t)
- defer os.RemoveAll(tmp)
-
- startaddr := syms["cmd/objdump.TestObjDump"]
- addr, err := strconv.ParseUint(startaddr, 16, 64)
- if err != nil {
- t.Fatalf("invalid start address %v: %v", startaddr, err)
- }
- endaddr := fmt.Sprintf("%x", addr+10)
- testObjDump(t, exe, startaddr, endaddr, line-1)
- testObjDump(t, exe, "0x"+startaddr, "0x"+endaddr, line-1)
-}
-
func buildObjdump(t *testing.T) (tmp, exe string) {
switch runtime.GOOS {
case "android", "nacl":
diff --git a/src/cmd/pprof/README b/src/cmd/pprof/README
new file mode 100644
index 000000000..a728ef235
--- /dev/null
+++ b/src/cmd/pprof/README
@@ -0,0 +1,8 @@
+The pprof in this directory is adapted from the pprof used inside Google
+for C++, Java, and Go programs. Because it was developed for that broader
+context, it is overgeneralized when used here for the specific use case
+of profiling standard Go programs. However, we've left the abstractions
+intact in order to share updates between this copy and Google's internal one.
+
+Please do not take the level of abstraction in this program as an example
+to follow in your own.
diff --git a/src/cmd/pprof/doc.go b/src/cmd/pprof/doc.go
new file mode 100644
index 000000000..c6ff11d10
--- /dev/null
+++ b/src/cmd/pprof/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Pprof interprets and displays profiles of Go programs.
+//
+// Usage:
+//
+// go tool pprof binary profile
+//
+// For more information, see http://blog.golang.org/profiling-go-programs.
+package main
diff --git a/src/cmd/pprof/pprof.go b/src/cmd/pprof/pprof.go
index 89a5bb7d2..44f4f6cb7 100644
--- a/src/cmd/pprof/pprof.go
+++ b/src/cmd/pprof/pprof.go
@@ -11,6 +11,7 @@ import (
"os"
"regexp"
"strings"
+ "sync"
"cmd/internal/objfile"
"cmd/pprof/internal/commands"
@@ -100,7 +101,10 @@ func (flags) ExtraUsage() string {
// objTool implements plugin.ObjTool using Go libraries
// (instead of invoking GNU binutils).
-type objTool struct{}
+type objTool struct {
+ mu sync.Mutex
+ disasmCache map[string]*objfile.Disasm
+}
func (*objTool) Open(name string, start uint64) (plugin.ObjFile, error) {
of, err := objfile.Open(name)
@@ -119,8 +123,39 @@ func (*objTool) Demangle(names []string) (map[string]string, error) {
return make(map[string]string), nil
}
-func (*objTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
- return nil, fmt.Errorf("disassembly not supported")
+func (t *objTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) {
+ d, err := t.cachedDisasm(file)
+ if err != nil {
+ return nil, err
+ }
+ var asm []plugin.Inst
+ d.Decode(start, end, func(pc, size uint64, file string, line int, text string) {
+ asm = append(asm, plugin.Inst{Addr: pc, File: file, Line: line, Text: text})
+ })
+ return asm, nil
+}
+
+func (t *objTool) cachedDisasm(file string) (*objfile.Disasm, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.disasmCache == nil {
+ t.disasmCache = make(map[string]*objfile.Disasm)
+ }
+ d := t.disasmCache[file]
+ if d != nil {
+ return d, nil
+ }
+ f, err := objfile.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ d, err = f.Disasm()
+ f.Close()
+ if err != nil {
+ return nil, err
+ }
+ t.disasmCache[file] = d
+ return d, nil
}
func (*objTool) SetConfig(config string) {
diff --git a/src/compress/lzw/reader.go b/src/compress/lzw/reader.go
index 0835bd8b9..526620c82 100644
--- a/src/compress/lzw/reader.go
+++ b/src/compress/lzw/reader.go
@@ -11,7 +11,7 @@
// two non-literal codes are a clear code and an EOF code.
//
// The TIFF file format uses a similar but incompatible version of the LZW
-// algorithm. See the code.google.com/p/go.image/tiff/lzw package for an
+// algorithm. See the golang.org/x/image/tiff/lzw package for an
// implementation.
package lzw
diff --git a/src/crypto/crypto.go b/src/crypto/crypto.go
index 5a91baca0..59b23e93f 100644
--- a/src/crypto/crypto.go
+++ b/src/crypto/crypto.go
@@ -21,7 +21,7 @@ func (h Hash) HashFunc() Hash {
}
const (
- MD4 Hash = 1 + iota // import code.google.com/p/go.crypto/md4
+ MD4 Hash = 1 + iota // import golang.org/x/crypto/md4
MD5 // import crypto/md5
SHA1 // import crypto/sha1
SHA224 // import crypto/sha256
@@ -29,11 +29,11 @@ const (
SHA384 // import crypto/sha512
SHA512 // import crypto/sha512
MD5SHA1 // no implementation; MD5+SHA1 used for TLS RSA
- RIPEMD160 // import code.google.com/p/go.crypto/ripemd160
- SHA3_224 // import code.google.com/p/go.crypto/sha3
- SHA3_256 // import code.google.com/p/go.crypto/sha3
- SHA3_384 // import code.google.com/p/go.crypto/sha3
- SHA3_512 // import code.google.com/p/go.crypto/sha3
+ RIPEMD160 // import golang.org/x/crypto/ripemd160
+ SHA3_224 // import golang.org/x/crypto/sha3
+ SHA3_256 // import golang.org/x/crypto/sha3
+ SHA3_384 // import golang.org/x/crypto/sha3
+ SHA3_512 // import golang.org/x/crypto/sha3
maxHash
)
diff --git a/src/debug/goobj/read_test.go b/src/debug/goobj/read_test.go
index dee140533..cc991e5d9 100644
--- a/src/debug/goobj/read_test.go
+++ b/src/debug/goobj/read_test.go
@@ -12,7 +12,7 @@ var importPathToPrefixTests = []struct {
}{
{"runtime", "runtime"},
{"sync/atomic", "sync/atomic"},
- {"code.google.com/p/go.tools/godoc", "code.google.com/p/go.tools/godoc"},
+ {"golang.org/x/tools/godoc", "golang.org/x/tools/godoc"},
{"foo.bar/baz.quux", "foo.bar/baz%2equux"},
{"", ""},
{"%foo%bar", "%25foo%25bar"},
diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go
index 389ab58e4..0e0fac928 100644
--- a/src/net/http/cookiejar/jar.go
+++ b/src/net/http/cookiejar/jar.go
@@ -30,7 +30,7 @@ import (
// set a cookie for bar.com.
//
// A public suffix list implementation is in the package
-// code.google.com/p/go.net/publicsuffix.
+// golang.org/x/net/publicsuffix.
type PublicSuffixList interface {
// PublicSuffix returns the public suffix of domain.
//
diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go
index bb44ac853..5e0a0053c 100644
--- a/src/net/http/serve_test.go
+++ b/src/net/http/serve_test.go
@@ -2819,6 +2819,7 @@ func benchmarkClientServerParallel(b *testing.B, parallelism int, useTLS bool) {
InsecureSkipVerify: true,
},
}
+ defer noVerifyTransport.CloseIdleConnections()
client := &Client{Transport: noVerifyTransport}
for pb.Next() {
res, err := client.Get(ts.URL)
diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go
index bc9c00eff..197d3e8b4 100644
--- a/src/os/exec/exec_test.go
+++ b/src/os/exec/exec_test.go
@@ -246,7 +246,7 @@ func TestPipeLookPathLeak(t *testing.T) {
}
func numOpenFDS(t *testing.T) (n int, lsof []byte) {
- lsof, err := exec.Command("lsof", "-n", "-p", strconv.Itoa(os.Getpid())).Output()
+ lsof, err := exec.Command("lsof", "-b", "-n", "-p", strconv.Itoa(os.Getpid())).Output()
if err != nil {
t.Skip("skipping test; error finding or running lsof")
}
diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go
index 5efc2a4f1..132594eed 100644
--- a/src/os/file_plan9.go
+++ b/src/os/file_plan9.go
@@ -25,7 +25,8 @@ type file struct {
dirinfo *dirInfo // nil unless directory being read
}
-// Fd returns the integer Unix file descriptor referencing the open file.
+// Fd returns the integer Plan 9 file descriptor referencing the open file.
+// The file descriptor is valid only until f.Close is called or f is garbage collected.
func (f *File) Fd() uintptr {
if f == nil {
return ^(uintptr(0))
diff --git a/src/os/file_unix.go b/src/os/file_unix.go
index f59d563e6..ff4fc7d12 100644
--- a/src/os/file_unix.go
+++ b/src/os/file_unix.go
@@ -29,6 +29,7 @@ type file struct {
}
// Fd returns the integer Unix file descriptor referencing the open file.
+// The file descriptor is valid only until f.Close is called or f is garbage collected.
func (f *File) Fd() uintptr {
if f == nil {
return ^(uintptr(0))
diff --git a/src/os/file_windows.go b/src/os/file_windows.go
index 3b5519390..2a90a5055 100644
--- a/src/os/file_windows.go
+++ b/src/os/file_windows.go
@@ -36,6 +36,7 @@ type file struct {
}
// Fd returns the Windows handle referencing the open file.
+// The handle is valid only until f.Close is called or f is garbage collected.
func (file *File) Fd() uintptr {
if file == nil {
return uintptr(syscall.InvalidHandle)
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 572e611fa..2064922f6 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -1533,12 +1533,8 @@ func (gc *gcProg) appendProg(t *rtype) {
gc.appendProg(e)
}
case Interface:
- gc.appendWord(bitsMultiWord)
- if t.NumMethod() == 0 {
- gc.appendWord(bitsEface)
- } else {
- gc.appendWord(bitsIface)
- }
+ gc.appendWord(bitsPointer)
+ gc.appendWord(bitsPointer)
case Struct:
c := t.NumField()
for i := 0; i < c; i++ {
diff --git a/src/run.bash b/src/run.bash
index 3c9430c87..91f12a174 100755
--- a/src/run.bash
+++ b/src/run.bash
@@ -66,7 +66,8 @@ go test sync -short -timeout=$(expr 120 \* $timeout_scale)s -cpu=10
# Race detector only supported on Linux, FreeBSD and OS X,
# and only on amd64, and only when cgo is enabled.
-case "$GOHOSTOS-$GOOS-$GOARCH-$CGO_ENABLED" in
+# DISABLED until we get garbage collection working.
+case "$GOHOSTOS-$GOOS-$GOARCH-$CGO_ENABLED-XXX-DISABLED" in
linux-linux-amd64-1 | freebsd-freebsd-amd64-1 | darwin-darwin-amd64-1)
echo
echo '# Testing race detector.'
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 8cbabfed2..501e64b09 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -502,7 +502,7 @@ fail:
// return 1;
// }else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-13
+TEXT runtime·casp1(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
@@ -537,7 +537,7 @@ TEXT runtime·xchg(SB), NOSPLIT, $0-12
MOVL AX, ret+8(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-12
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
@@ -555,7 +555,7 @@ again:
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-8
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 2871a172a..1aa2d71a8 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -489,7 +489,7 @@ TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-25
+TEXT runtime·casp1(SB), NOSPLIT, $0-25
MOVQ ptr+0(FP), BX
MOVQ old+8(FP), AX
MOVQ new+16(FP), CX
@@ -541,7 +541,7 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVQ AX, ret+16(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-24
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-24
MOVQ ptr+0(FP), BX
MOVQ new+8(FP), AX
XCHGQ AX, 0(BX)
@@ -559,7 +559,7 @@ again:
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
XCHGQ AX, 0(BX)
diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s
index 0d62320de..153564b14 100644
--- a/src/runtime/asm_amd64p32.s
+++ b/src/runtime/asm_amd64p32.s
@@ -460,7 +460,7 @@ fail:
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-17
+TEXT runtime·casp1(SB), NOSPLIT, $0-17
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
@@ -512,7 +512,7 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVQ AX, ret+16(FP)
RET
-TEXT runtime·xchgp(SB), NOSPLIT, $0-12
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
@@ -530,7 +530,7 @@ again:
JNZ again
RET
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-8
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
diff --git a/src/runtime/asm_power64x.s b/src/runtime/asm_power64x.s
index a75bb8ce1..ba900c2b3 100644
--- a/src/runtime/asm_power64x.s
+++ b/src/runtime/asm_power64x.s
@@ -472,7 +472,7 @@ TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
// return 1;
// } else
// return 0;
-TEXT runtime·casp(SB), NOSPLIT, $0-25
+TEXT runtime·casp1(SB), NOSPLIT, $0-25
BR runtime·cas64(SB)
// uint32 xadd(uint32 volatile *ptr, int32 delta)
@@ -529,7 +529,7 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVD R3, ret+16(FP)
RETURN
-TEXT runtime·xchgp(SB), NOSPLIT, $0-24
+TEXT runtime·xchgp1(SB), NOSPLIT, $0-24
BR runtime·xchg64(SB)
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
@@ -538,7 +538,7 @@ TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
TEXT runtime·procyield(SB),NOSPLIT,$0-0
RETURN
-TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
+TEXT runtime·atomicstorep1(SB), NOSPLIT, $0-16
BR runtime·atomicstore64(SB)
TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
diff --git a/src/runtime/atomic.go b/src/runtime/atomic.go
index 7e9d9b3aa..a0e4d84e9 100644
--- a/src/runtime/atomic.go
+++ b/src/runtime/atomic.go
@@ -20,8 +20,16 @@ func xchg(ptr *uint32, new uint32) uint32
//go:noescape
func xchg64(ptr *uint64, new uint64) uint64
-//go:noescape
-func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+// Cannot use noescape here: ptr does not but new does escape.
+// Instead use noescape(ptr) in wrapper below.
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ old := xchgp1(noescape(ptr), new)
+ writebarrierptr_nostore((*uintptr)(ptr), uintptr(new))
+ return old
+}
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
@@ -47,5 +55,27 @@ func atomicstore(ptr *uint32, val uint32)
//go:noescape
func atomicstore64(ptr *uint64, val uint64)
-//go:noescape
-func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
+// Cannot use noescape here: ptr does not but val does escape.
+// Instead use noescape(ptr) in wrapper below.
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), val)
+ // TODO(rsc): Why does the compiler think writebarrierptr_nostore's dst argument escapes?
+ writebarrierptr_nostore((*uintptr)(noescape(ptr)), uintptr(val))
+}
+
+// Cannot use noescape here: ptr does not but new does escape.
+// Instead use noescape(ptr) in wrapper below.
+func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+//go:nosplit
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ ok := casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), old, new)
+ if !ok {
+ return false
+ }
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new))
+ return true
+}
diff --git a/src/runtime/cgo/dragonfly.c b/src/runtime/cgo/dragonfly.c
index 3c95ff354..c233c8ba9 100644
--- a/src/runtime/cgo/dragonfly.c
+++ b/src/runtime/cgo/dragonfly.c
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build dragonfly
+
#include "textflag.h"
// Supply environ and __progname, because we don't
diff --git a/src/runtime/cgo/freebsd.c b/src/runtime/cgo/freebsd.c
index aefc481e6..4876b2abe 100644
--- a/src/runtime/cgo/freebsd.c
+++ b/src/runtime/cgo/freebsd.c
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build freebsd
+
#include "textflag.h"
// Supply environ and __progname, because we don't
diff --git a/src/runtime/cgo/netbsd.c b/src/runtime/cgo/netbsd.c
index de38bb770..076cc87f1 100644
--- a/src/runtime/cgo/netbsd.c
+++ b/src/runtime/cgo/netbsd.c
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build netbsd
+
#include "textflag.h"
// Supply environ and __progname, because we don't
diff --git a/src/runtime/cgo/openbsd.c b/src/runtime/cgo/openbsd.c
index 7c2b6c173..476649544 100644
--- a/src/runtime/cgo/openbsd.c
+++ b/src/runtime/cgo/openbsd.c
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build openbsd
+
#include "textflag.h"
// Supply environ, __progname and __guard_local, because
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index be352557f..65e918e84 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -26,7 +26,7 @@ var Exitsyscall = exitsyscall
var LockedOSThread = lockedOSThread
type LFNode struct {
- Next *LFNode
+ Next uint64
Pushcnt uintptr
}
diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go
index 2c6d4d662..662b7546d 100644
--- a/src/runtime/gcinfo_test.go
+++ b/src/runtime/gcinfo_test.go
@@ -188,6 +188,6 @@ var (
infoString = []byte{BitsPointer, BitsDead}
infoSlice = []byte{BitsPointer, BitsDead, BitsDead}
- infoEface = []byte{BitsMultiWord, BitsEface}
- infoIface = []byte{BitsMultiWord, BitsIface}
+ infoEface = []byte{BitsPointer, BitsPointer}
+ infoIface = []byte{BitsPointer, BitsPointer}
)
diff --git a/src/runtime/heapdump.c b/src/runtime/heapdump.c
index 94a4bd2be..da14f2d24 100644
--- a/src/runtime/heapdump.c
+++ b/src/runtime/heapdump.c
@@ -251,7 +251,9 @@ dumpbv(BitVector *bv, uintptr offset)
for(i = 0; i < bv->n; i += BitsPerPointer) {
switch(bv->bytedata[i/8] >> i%8 & 3) {
case BitsDead:
- return;
+ // BitsDead has already been processed in makeheapobjbv.
+ // We should only see it in stack maps, in which case we should continue processing.
+ break;
case BitsScalar:
break;
case BitsPointer:
@@ -259,20 +261,7 @@ dumpbv(BitVector *bv, uintptr offset)
dumpint(offset + i / BitsPerPointer * PtrSize);
break;
case BitsMultiWord:
- switch(bv->bytedata[(i+BitsPerPointer)/8] >> (i+BitsPerPointer)%8 & 3) {
- default:
- runtime·throw("unexpected garbage collection bits");
- case BitsIface:
- dumpint(FieldKindIface);
- dumpint(offset + i / BitsPerPointer * PtrSize);
- i += BitsPerPointer;
- break;
- case BitsEface:
- dumpint(FieldKindEface);
- dumpint(offset + i / BitsPerPointer * PtrSize);
- i += BitsPerPointer;
- break;
- }
+ runtime·throw("bumpbv unexpected garbage collection bits");
}
}
}
diff --git a/src/runtime/lfstack.c b/src/runtime/lfstack.c
index 57e0af282..0ced839c2 100644
--- a/src/runtime/lfstack.c
+++ b/src/runtime/lfstack.c
@@ -46,7 +46,7 @@ runtime·lfstackpush(uint64 *head, LFNode *node)
new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
for(;;) {
old = runtime·atomicload64(head);
- node->next = (LFNode*)(uintptr)(old&PTR_MASK);
+ node->next = old;
if(runtime·cas64(head, old, new))
break;
}
@@ -55,19 +55,17 @@ runtime·lfstackpush(uint64 *head, LFNode *node)
LFNode*
runtime·lfstackpop(uint64 *head)
{
- LFNode *node, *node2;
- uint64 old, new;
+ LFNode *node;
+ uint64 old, next;
for(;;) {
old = runtime·atomicload64(head);
if(old == 0)
return nil;
node = (LFNode*)(uintptr)(old&PTR_MASK);
- node2 = runtime·atomicloadp(&node->next);
- new = 0;
- if(node2 != nil)
- new = (uint64)(uintptr)node2|(((uint64)node2->pushcnt&CNT_MASK)<<PTR_BITS);
- if(runtime·cas64(head, old, new))
+ next = runtime·atomicload64(&node->next);
+
+ if(runtime·cas64(head, old, next))
return node;
}
}
diff --git a/src/runtime/lfstack_test.go b/src/runtime/lfstack_test.go
index e51877704..68f221d6e 100644
--- a/src/runtime/lfstack_test.go
+++ b/src/runtime/lfstack_test.go
@@ -121,7 +121,7 @@ func TestLFStackStress(t *testing.T) {
}
cnt++
sum2 += node.data
- node.Next = nil
+ node.Next = 0
}
}
if cnt != K {
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 294bc4870..fab8cf269 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -41,7 +41,7 @@ var zerobase uintptr
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
-func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
+func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
if size == 0 {
return unsafe.Pointer(&zerobase)
}
@@ -245,6 +245,8 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
masksize = masksize * pointersPerByte / 8 // 4 bits per word
masksize++ // unroll flag in the beginning
if masksize > maxGCMask && typ.gc[1] != 0 {
+ // write barriers have not been updated to deal with this case yet.
+ gothrow("maxGCMask too small for now")
// If the mask is too large, unroll the program directly
// into the GC bitmap. It's 7 times slower than copying
// from the pre-unrolled mask, but saves 1/16 of type size
@@ -304,6 +306,18 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
}
}
marked:
+
+ // GCmarkterminate allocates black
+ // All slots hold nil so no scanning is needed.
+ // This may be racing with GC so do it atomically if there can be
+ // a race marking the bit.
+ if gcphase == _GCmarktermination {
+ mp := acquirem()
+ mp.ptrarg[0] = x
+ onM(gcmarknewobject_m)
+ releasem(mp)
+ }
+
if raceenabled {
racemalloc(x, size)
}
@@ -344,9 +358,40 @@ marked:
return x
}
+func loadPtrMask(typ *_type) []uint8 {
+ var ptrmask *uint8
+ nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize
+ if typ.kind&kindGCProg != 0 {
+ masksize := nptr
+ if masksize%2 != 0 {
+ masksize *= 2 // repeated
+ }
+ masksize = masksize * pointersPerByte / 8 // 4 bits per word
+ masksize++ // unroll flag in the beginning
+ if masksize > maxGCMask && typ.gc[1] != 0 {
+ // write barriers have not been updated to deal with this case yet.
+ gothrow("maxGCMask too small for now")
+ }
+ ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
+ // Check whether the program is already unrolled
+ // by checking if the unroll flag byte is set
+ maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
+ if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
+ mp := acquirem()
+ mp.ptrarg[0] = unsafe.Pointer(typ)
+ onM(unrollgcprog_m)
+ releasem(mp)
+ }
+ ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
+ } else {
+ ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask
+ }
+ return (*[1 << 30]byte)(unsafe.Pointer(ptrmask))[:(nptr+1)/2]
+}
+
// implementation of new builtin
func newobject(typ *_type) unsafe.Pointer {
- flags := 0
+ flags := uint32(0)
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
@@ -355,7 +400,7 @@ func newobject(typ *_type) unsafe.Pointer {
// implementation of make builtin for slices
func newarray(typ *_type, n uintptr) unsafe.Pointer {
- flags := 0
+ flags := uint32(0)
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
@@ -438,7 +483,20 @@ func gogc(force int32) {
mp = acquirem()
mp.gcing = 1
releasem(mp)
+
onM(stoptheworld)
+ onM(finishsweep_m) // finish sweep before we start concurrent scan.
+ if false { // To turn on concurrent scan and mark set to true...
+ onM(starttheworld)
+ // Do a concurrent heap scan before we stop the world.
+ onM(gcscan_m)
+ onM(stoptheworld)
+ onM(gcinstallmarkwb_m)
+ onM(starttheworld)
+ onM(gcmark_m)
+ onM(stoptheworld)
+ onM(gcinstalloffwb_m)
+ }
if mp != acquirem() {
gothrow("gogc: rescheduled")
}
@@ -469,6 +527,8 @@ func gogc(force int32) {
onM(gc_m)
}
+ onM(gccheckmark_m)
+
// all done
mp.gcing = 0
semrelease(&worldsema)
@@ -483,6 +543,14 @@ func gogc(force int32) {
}
}
+func GCcheckmarkenable() {
+ onM(gccheckmarkenable_m)
+}
+
+func GCcheckmarkdisable() {
+ onM(gccheckmarkdisable_m)
+}
+
// GC runs a garbage collection.
func GC() {
gogc(2)
diff --git a/src/runtime/malloc.h b/src/runtime/malloc.h
index adb8d3d67..522b11bba 100644
--- a/src/runtime/malloc.h
+++ b/src/runtime/malloc.h
@@ -86,6 +86,7 @@ typedef struct MSpan MSpan;
typedef struct MStats MStats;
typedef struct MLink MLink;
typedef struct GCStats GCStats;
+typedef struct Workbuf Workbuf;
enum
{
@@ -344,8 +345,6 @@ struct MCache
SudoG* sudogcache;
- void* gcworkbuf;
-
// Local allocator stats, flushed during GC.
uintptr local_nlookup; // number of pointer lookups
uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
@@ -356,7 +355,7 @@ struct MCache
MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass);
void runtime·MCache_ReleaseAll(MCache *c);
void runtime·stackcache_clear(MCache *c);
-void runtime·gcworkbuffree(void *b);
+void runtime·gcworkbuffree(Workbuf *b);
enum
{
diff --git a/src/runtime/mcache.c b/src/runtime/mcache.c
index 5fdbe3266..95ddced3e 100644
--- a/src/runtime/mcache.c
+++ b/src/runtime/mcache.c
@@ -39,12 +39,12 @@ runtime·allocmcache(void)
return c;
}
+// mheap.lock needs to be held to release the gcworkbuf.
static void
freemcache(MCache *c)
{
runtime·MCache_ReleaseAll(c);
runtime·stackcache_clear(c);
- runtime·gcworkbuffree(c->gcworkbuf);
runtime·lock(&runtime·mheap.lock);
runtime·purgecachedstats(c);
runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c
index 897dc1415..f37c01af0 100644
--- a/src/runtime/mgc0.c
+++ b/src/runtime/mgc0.c
@@ -4,22 +4,72 @@
// Garbage collector (GC).
//
-// GC is:
-// - mark&sweep
-// - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
-// - parallel (up to MaxGcproc threads)
-// - partially concurrent (mark is stop-the-world, while sweep is concurrent)
-// - non-moving/non-compacting
-// - full (non-partial)
+// The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple GC
+// thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
+// non-generational and non-compacting. Allocation is done using size segregated per P allocation
+// areas to minimize fragmentation while eliminating locks in the common case.
//
-// GC rate.
-// Next GC is after we've allocated an extra amount of memory proportional to
-// the amount already in use. The proportion is controlled by GOGC environment variable
-// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
-// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
-// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
-// (and also the amount of extra memory used).
+// The algorithm decomposes into several steps.
+// This is a high level description of the algorithm being used. For an overview of GC a good
+// place to start is Richard Jones' gchandbook.org.
+//
+// The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
+// Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
+// On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 966-975.
+// For journal quality proofs that these steps are complete, correct, and terminate see
+// Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
+// Concurrency and Computation: Practice and Experience 15(3-5), 2003.
//
+// 0. Set phase = GCscan from GCoff.
+// 1. Wait for all P's to acknowledge phase change.
+// At this point all goroutines have passed through a GC safepoint and
+// know we are in the GCscan phase.
+// 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
+// (marking avoids most duplicate enqueuing but races may produce duplication which is benign).
+// Preempted goroutines are scanned before P schedules next goroutine.
+// 3. Set phase = GCmark.
+// 4. Wait for all P's to acknowledge phase change.
+// 5. Now write barrier marks and enqueues black, grey, or white to white pointers.
+// Malloc still allocates white (non-marked) objects.
+// 6. Meanwhile GC transitively walks the heap marking reachable objects.
+// 7. When GC finishes marking heap, it preempts P's one-by-one and
+// retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
+// currently scheduled on the P).
+// 8. Once the GC has exhausted all available marking work it sets phase = marktermination.
+// 9. Wait for all P's to acknowledge phase change.
+// 10. Malloc now allocates black objects, so number of unmarked reachable objects
+// monotonically decreases.
+// 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet reachable objects.
+// 12. When GC completes a full cycle over P's and discovers no new grey
+// objects, (which means all reachable objects are marked) set phase = GCsweep.
+// 13. Wait for all P's to acknowledge phase change.
+// 14. Now malloc allocates white (but sweeps spans before use).
+// Write barrier becomes nop.
+// 15. GC does background sweeping, see description below.
+// 16. When sweeping is complete set phase to GCoff.
+// 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
+// see discussion of GC rate below.
+
+// Changing phases.
+// Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
+// All phase action must be benign in the presence of a change.
+// Starting with GCoff
+// GCoff to GCscan
+// GSscan scans stacks and globals greying them and never marks an object black.
+// Once all the P's are aware of the new phase they will scan gs on preemption.
+// This means that the scanning of preempted gs can't start until all the Ps
+// have acknowledged.
+// GCscan to GCmark
+// GCMark turns on the write barrier which also only greys objects. No scanning
+// of objects (making them black) can happen until all the Ps have acknowledged
+// the phase change.
+// GCmark to GCmarktermination
+// The only change here is that we start allocating black so the Ps must acknowledge
+// the change before we begin the termination algorithm
+// GCmarktermination to GSsweep
+// Object currently on the freelist must be marked black for this to work.
+// Are things on the free lists black or white? How does the sweep phase work?
+
// Concurrent sweep.
// The sweep phase proceeds concurrently with normal program execution.
// The heap is swept span-by-span both lazily (when a goroutine needs another span)
@@ -50,6 +100,14 @@
// The finalizer goroutine is kicked off only when all spans are swept.
// When the next GC starts, it sweeps all not-yet-swept spans (if any).
+// GC rate.
+// Next GC is after we've allocated an extra amount of memory proportional to
+// the amount already in use. The proportion is controlled by GOGC environment variable
+// (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
+// (this mark is tracked in next_gc variable). This keeps the GC cost in linear
+// proportion to the allocation cost. Adjusting GOGC just changes the linear constant
+// (and also the amount of extra memory used).
+
#include "runtime.h"
#include "arch_GOARCH.h"
#include "malloc.h"
@@ -65,9 +123,8 @@
enum {
Debug = 0,
DebugPtrs = 0, // if 1, print trace of every pointer load during GC
- ConcurrentSweep = 0,
+ ConcurrentSweep = 1,
- WorkbufSize = 4*1024,
FinBlockSize = 4*1024,
RootData = 0,
RootBss = 1,
@@ -80,7 +137,7 @@ enum {
// ptrmask for an allocation containing a single pointer.
static byte oneptr[] = {BitsPointer};
-// Initialized from $GOGC. GOGC=off means no gc.
+// Initialized from $GOGC. GOGC=off means no GC.
extern int32 runtime·gcpercent;
// Holding worldsema grants an M the right to try to stop the world.
@@ -98,12 +155,16 @@ extern int32 runtime·gcpercent;
//
uint32 runtime·worldsema = 1;
-typedef struct Workbuf Workbuf;
-struct Workbuf
-{
- LFNode node; // must be first
- uintptr nobj;
- byte* obj[(WorkbufSize-sizeof(LFNode)-sizeof(uintptr))/PtrSize];
+// It is a bug if bits does not have bitBoundary set but
+// there are still some cases where this happens related
+// to stack spans.
+typedef struct Markbits Markbits;
+struct Markbits {
+ byte *bitp; // pointer to the byte holding xbits
+ byte shift; // bits xbits needs to be shifted to get bits
+ byte xbits; // byte holding all the bits from *bitp
+ byte bits; // mark and boundary bits relevant to corresponding slot.
+ byte tbits; // pointer||scalar bits relevant to corresponding slot.
};
extern byte runtime·data[];
@@ -128,26 +189,40 @@ BitVector runtime·gcbssmask;
Mutex runtime·gclock;
-static uintptr badblock[1024];
-static int32 nbadblock;
-
+static Workbuf* getpartialorempty(void);
+static void putpartial(Workbuf*);
static Workbuf* getempty(Workbuf*);
static Workbuf* getfull(Workbuf*);
static void putempty(Workbuf*);
+static void putfull(Workbuf*);
static Workbuf* handoff(Workbuf*);
static void gchelperstart(void);
static void flushallmcaches(void);
-static bool scanframe(Stkframe *frame, void *unused);
-static void scanstack(G *gp);
-static BitVector unrollglobgcprog(byte *prog, uintptr size);
+static bool scanframe(Stkframe*, void*);
+static void scanstack(G*);
+static BitVector unrollglobgcprog(byte*, uintptr);
+static void scanblock(byte*, uintptr, byte*);
+static byte* objectstart(byte*, Markbits*);
+static Workbuf* greyobject(byte*, Markbits*, Workbuf*);
+static bool inheap(byte*);
+static bool shaded(byte*);
+static void shade(byte*);
+static void slottombits(byte*, Markbits*);
+static void atomicxor8(byte*, byte);
+static bool ischeckmarked(Markbits*);
+static bool ismarked(Markbits*);
+static void clearcheckmarkbits(void);
+static void clearcheckmarkbitsspan(MSpan*);
void runtime·bgsweep(void);
+void runtime·finishsweep_m(void);
static FuncVal bgsweepv = {runtime·bgsweep};
typedef struct WorkData WorkData;
struct WorkData {
- uint64 full; // lock-free list of full blocks
- uint64 empty; // lock-free list of empty blocks
+ uint64 full; // lock-free list of full blocks
+ uint64 empty; // lock-free list of empty blocks
+ uint64 partial; // lock-free list of partially filled blocks
byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
uint32 nproc;
int64 tstart;
@@ -162,315 +237,422 @@ struct WorkData {
};
WorkData runtime·work;
-// Is _cgo_allocate linked into the binary?
+// To help debug the concurrent GC we remark with the world
+// stopped ensuring that any object encountered has their normal
+// mark bit set. To do this we use an orthogonal bit
+// pattern to indicate the object is marked. The following pattern
+// uses the upper two bits in the object's bounday nibble.
+// 01: scalar not marked
+// 10: pointer not marked
+// 11: pointer marked
+// 00: scalar marked
+// Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
+// The higher bit is 1 for pointers and 0 for scalars, whether the object
+// is marked or not.
+// The first nibble no longer holds the bitsDead pattern indicating that the
+// there are no more pointers in the object. This information is held
+// in the second nibble.
+
+// When marking an object if the bool checkmark is true one uses the above
+// encoding, otherwise one uses the bitMarked bit in the lower two bits
+// of the nibble.
+static bool checkmark = false;
+static bool gccheckmarkenable = true;
+
+// Is address b in the known heap. If it doesn't have a valid gcmap
+// returns false. For example pointers into stacks will return false.
static bool
-have_cgo_allocate(void)
+inheap(byte *b)
{
- extern byte go·weak·runtime·_cgo_allocate_internal[1];
- return go·weak·runtime·_cgo_allocate_internal != nil;
+ MSpan *s;
+ pageID k;
+ uintptr x;
+
+ if(b == nil || b < runtime·mheap.arena_start || b >= runtime·mheap.arena_used)
+ return false;
+ // Not a beginning of a block, consult span table to find the block beginning.
+ k = (uintptr)b>>PageShift;
+ x = k;
+ x -= (uintptr)runtime·mheap.arena_start>>PageShift;
+ s = runtime·mheap.spans[x];
+ if(s == nil || k < s->start || b >= s->limit || s->state != MSpanInUse)
+ return false;
+ return true;
}
-// scanblock scans a block of n bytes starting at pointer b for references
-// to other objects, scanning any it finds recursively until there are no
-// unscanned objects left. Instead of using an explicit recursion, it keeps
-// a work list in the Workbuf* structures and loops in the main function
-// body. Keeping an explicit work list is easier on the stack allocator and
-// more efficient.
+// Given an address in the heap return the relevant byte from the gcmap. This routine
+// can be used on addresses to the start of an object or to the interior of the an object.
static void
-scanblock(byte *b, uintptr n, byte *ptrmask)
+slottombits(byte *obj, Markbits *mbits)
{
- byte *obj, *obj0, *p, *arena_start, *arena_used, **wp, *scanbuf[8], *ptrbitp, *bitp;
- uintptr i, j, nobj, size, idx, x, off, scanbufpos, bits, xbits, shift;
- Workbuf *wbuf;
- Iface *iface;
- Eface *eface;
- Type *typ;
+ uintptr off;
+
+ off = (uintptr*)((uintptr)obj&~(PtrSize-1)) - (uintptr*)runtime·mheap.arena_start;
+ mbits->bitp = runtime·mheap.arena_start - off/wordsPerBitmapByte - 1;
+ mbits->shift = (off % wordsPerBitmapByte) * gcBits;
+ mbits->xbits = *mbits->bitp;
+ mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
+ mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
+}
+
+// b is a pointer into the heap.
+// Find the start of the object refered to by b.
+// Set mbits to the associated bits from the bit map.
+// If b is not a valid heap object return nil and
+// undefined values in mbits.
+static byte*
+objectstart(byte *b, Markbits *mbits)
+{
+ byte *obj, *p;
MSpan *s;
pageID k;
- bool keepworking;
+ uintptr x, size, idx;
- // Cache memory arena parameters in local vars.
- arena_start = runtime·mheap.arena_start;
- arena_used = runtime·mheap.arena_used;
+ obj = (byte*)((uintptr)b&~(PtrSize-1));
+ for(;;) {
+ slottombits(obj, mbits);
+ if((mbits->bits&bitBoundary) == bitBoundary)
+ break;
- wbuf = getempty(nil);
- nobj = wbuf->nobj;
- wp = &wbuf->obj[nobj];
- keepworking = b == nil;
- scanbufpos = 0;
- for(i = 0; i < nelem(scanbuf); i++)
- scanbuf[i] = nil;
+ // Not a beginning of a block, consult span table to find the block beginning.
+ k = (uintptr)obj>>PageShift;
+ x = k;
+ x -= (uintptr)runtime·mheap.arena_start>>PageShift;
+ s = runtime·mheap.spans[x];
+ if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse){
+ if(s != nil && s->state == MSpanStack) {
+ return nil; // This is legit.
+ }
+ // The following ensures that we are rigorous about what data
+ // structures hold valid pointers
+ if(0) {
+ // Still happens sometimes. We don't know why.
+ runtime·printf("runtime:objectstart Span weird: obj=%p, k=%p", obj, k);
+ if (s == nil)
+ runtime·printf(" s=nil\n");
+ else
+ runtime·printf(" s->start=%p s->limit=%p, s->state=%d\n", s->start*PageSize, s->limit, s->state);
+ runtime·throw("objectstart: bad pointer in unexpected span");
+ }
+ return nil;
+ }
+ p = (byte*)((uintptr)s->start<<PageShift);
+ if(s->sizeclass != 0) {
+ size = s->elemsize;
+ idx = ((byte*)obj - p)/size;
+ p = p+idx*size;
+ }
+ if(p == obj) {
+ runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n",
+ p, s->start*PageSize, s->limit);
+ runtime·throw("failed to find block beginning");
+ }
+ obj = p;
+ }
+ // if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
+ // Clear any low bits to get to the start of the object.
+ // greyobject depends on this.
+ return obj;
+}
+
+// Slow for now as we serialize this, since this is on a debug path
+// speed is not critical at this point.
+static Mutex andlock;
+static void
+atomicand8(byte *src, byte val)
+{
+ runtime·lock(&andlock);
+ *src = *src&val;
+ runtime·unlock(&andlock);
+}
+
+// Mark using the checkmark scheme.
+void
+docheckmark(Markbits *mbits)
+{
+ // xor 01 moves 01(scalar unmarked) to 00(scalar marked)
+ // and 10(pointer unmarked) to 11(pointer marked)
+ if(mbits->tbits == BitsScalar)
+ atomicand8(mbits->bitp, ~(byte)(BitsCheckMarkXor<<mbits->shift<<2));
+ else if(mbits->tbits == BitsPointer)
+ runtime·atomicor8(mbits->bitp, BitsCheckMarkXor<<mbits->shift<<2);
+
+ // reload bits for ischeckmarked
+ mbits->xbits = *mbits->bitp;
+ mbits->bits = (mbits->xbits >> mbits->shift) & bitMask;
+ mbits->tbits = ((mbits->xbits >> mbits->shift) & bitPtrMask) >> 2;
+
+ return;
+}
+
+// In the default scheme does mbits refer to a marked object.
+static bool
+ismarked(Markbits *mbits)
+{
+ if((mbits->bits&bitBoundary) != bitBoundary)
+ runtime·throw("ismarked: bits should have boundary bit set");
+ return (mbits->bits&bitMarked) == bitMarked;
+}
+
+// In the checkmark scheme does mbits refer to a marked object.
+static bool
+ischeckmarked(Markbits *mbits)
+{
+ if((mbits->bits&bitBoundary) != bitBoundary)
+ runtime·printf("runtime:ischeckmarked: bits should have boundary bit set\n");
+ return mbits->tbits==BitsScalarMarked || mbits->tbits==BitsPointerMarked;
+}
+
+// When in GCmarkterminate phase we allocate black.
+void
+runtime·gcmarknewobject_m(void)
+{
+ Markbits mbits;
+ byte *obj;
+
+ if(runtime·gcphase != GCmarktermination)
+ runtime·throw("marking new object while not in mark termination phase");
+ if(checkmark) // The world should be stopped so this should not happen.
+ runtime·throw("gcmarknewobject called while doing checkmark");
+
+ obj = g->m->ptrarg[0];
+ slottombits((byte*)((uintptr)obj & (PtrSize-1)), &mbits);
+
+ if((mbits.bits&bitMarked) != 0)
+ return;
+
+ // Each byte of GC bitmap holds info for two words.
+ // If the current object is larger than two words, or if the object is one word
+ // but the object it shares the byte with is already marked,
+ // then all the possible concurrent updates are trying to set the same bit,
+ // so we can use a non-atomic update.
+ if((mbits.xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
+ *mbits.bitp = mbits.xbits | (bitMarked<<mbits.shift);
+ else
+ runtime·atomicor8(mbits.bitp, bitMarked<<mbits.shift);
+ return;
+}
+
+// obj is the start of an object with mark mbits.
+// If it isn't already marked, mark it and enqueue into workbuf.
+// Return possibly new workbuf to use.
+static Workbuf*
+greyobject(byte *obj, Markbits *mbits, Workbuf *wbuf)
+{
+ // obj should be start of allocation, and so must be at least pointer-aligned.
+ if(((uintptr)obj & (PtrSize-1)) != 0)
+ runtime·throw("greyobject: obj not pointer-aligned");
+
+ if(checkmark) {
+ if(!ismarked(mbits)) {
+ MSpan *s;
+ pageID k;
+ uintptr x, i;
+
+ runtime·printf("runtime:greyobject: checkmarks finds unexpected unmarked object obj=%p, mbits->bits=%x, *mbits->bitp=%x\n", obj, mbits->bits, *mbits->bitp);
+
+ k = (uintptr)obj>>PageShift;
+ x = k;
+ x -= (uintptr)runtime·mheap.arena_start>>PageShift;
+ s = runtime·mheap.spans[x];
+ runtime·printf("runtime:greyobject Span: obj=%p, k=%p", obj, k);
+ if (s == nil) {
+ runtime·printf(" s=nil\n");
+ } else {
+ runtime·printf(" s->start=%p s->limit=%p, s->state=%d, s->sizeclass=%d, s->elemsize=%D \n", s->start*PageSize, s->limit, s->state, s->sizeclass, s->elemsize);
+ for(i=0; i<s->sizeclass; i++) {
+ runtime·printf(" ((uintptr*)obj)[%D]=%p\n", i, ((uintptr*)obj)[i]);
+ }
+ }
+ runtime·throw("checkmark found unmarked object");
+ }
+ if(ischeckmarked(mbits))
+ return wbuf;
+ docheckmark(mbits);
+ if(!ischeckmarked(mbits)) {
+ runtime·printf("mbits xbits=%x bits=%x tbits=%x shift=%d\n", mbits->xbits, mbits->bits, mbits->tbits, mbits->shift);
+ runtime·throw("docheckmark and ischeckmarked disagree");
+ }
+ } else {
+ // If marked we have nothing to do.
+ if((mbits->bits&bitMarked) != 0)
+ return wbuf;
+
+ // Each byte of GC bitmap holds info for two words.
+ // If the current object is larger than two words, or if the object is one word
+ // but the object it shares the byte with is already marked,
+ // then all the possible concurrent updates are trying to set the same bit,
+ // so we can use a non-atomic update.
+ if((mbits->xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) || runtime·work.nproc == 1)
+ *mbits->bitp = mbits->xbits | (bitMarked<<mbits->shift);
+ else
+ runtime·atomicor8(mbits->bitp, bitMarked<<mbits->shift);
+ }
+
+ if (!checkmark && (((mbits->xbits>>(mbits->shift+2))&BitsMask) == BitsDead))
+ return wbuf; // noscan object
+
+ // Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
+ // seems like a nice optimization that can be added back in.
+ // There needs to be time between the PREFETCH and the use.
+ // Previously we put the obj in an 8 element buffer that is drained at a rate
+ // to give the PREFETCH time to do its work.
+ // Use of PREFETCHNTA might be more appropriate than PREFETCH
+
+ // If workbuf is full, obtain an empty one.
+ if(wbuf->nobj >= nelem(wbuf->obj)) {
+ wbuf = getempty(wbuf);
+ }
+
+ wbuf->obj[wbuf->nobj] = obj;
+ wbuf->nobj++;
+ return wbuf;
+}
+
+// Scan the object b of size n, adding pointers to wbuf.
+// Return possibly new wbuf to use.
+// If ptrmask != nil, it specifies where pointers are in b.
+// If ptrmask == nil, the GC bitmap should be consulted.
+// In this case, n may be an overestimate of the size; the GC bitmap
+// must also be used to make sure the scan stops at the end of b.
+static Workbuf*
+scanobject(byte *b, uintptr n, byte *ptrmask, Workbuf *wbuf)
+{
+ byte *obj, *arena_start, *arena_used, *ptrbitp;
+ uintptr i, j;
+ int32 bits;
+ Markbits mbits;
+
+ arena_start = (byte*)runtime·mheap.arena_start;
+ arena_used = runtime·mheap.arena_used;
ptrbitp = nil;
+ // Find bits of the beginning of the object.
+ if(ptrmask == nil) {
+ b = objectstart(b, &mbits);
+ if(b == nil)
+ return wbuf;
+ ptrbitp = mbits.bitp; //arena_start - off/wordsPerBitmapByte - 1;
+ }
+ for(i = 0; i < n; i += PtrSize) {
+ // Find bits for this word.
+ if(ptrmask != nil) {
+ // dense mask (stack or data)
+ bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask;
+ } else {
+ // Check if we have reached end of span.
+ // n is an overestimate of the size of the object.
+ if((((uintptr)b+i)%PageSize) == 0 &&
+ runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift])
+ break;
+ // Consult GC bitmap.
+ bits = *ptrbitp;
+ if(wordsPerBitmapByte != 2)
+ runtime·throw("alg doesn't work for wordsPerBitmapByte != 2");
+ j = ((uintptr)b+i)/PtrSize & 1; // j indicates upper nibble or lower nibble
+ bits >>= gcBits*j;
+ if(i == 0)
+ bits &= ~bitBoundary;
+ ptrbitp -= j;
+
+ if((bits&bitBoundary) != 0 && i != 0)
+ break; // reached beginning of the next object
+ bits = (bits&bitPtrMask)>>2; // bits refer to the type bits.
+
+ if(i != 0 && bits == BitsDead) // BitsDead in first nibble not valid during checkmark
+ break; // reached no-scan part of the object
+ }
+
+ if(bits <= BitsScalar) // Bits Scalar ||
+ // BitsDead || // default encoding
+ // BitsScalarMarked // checkmark encoding
+ continue;
+
+ if((bits&BitsPointer) != BitsPointer) {
+ runtime·printf("gc checkmark=%d, b=%p ptrmask=%p, mbits.bitp=%p, mbits.xbits=%x, bits=%x\n", checkmark, b, ptrmask, mbits.bitp, mbits.xbits, bits);
+ runtime·throw("unexpected garbage collection bits");
+ }
+
+ obj = *(byte**)(b+i);
+ // At this point we have extracted the next potential pointer.
+ // Check if it points into heap.
+ if(obj == nil || obj < arena_start || obj >= arena_used)
+ continue;
+ // Mark the object. return some important bits.
+ // We we combine the following two rotines we don't have to pass mbits or obj around.
+ obj = objectstart(obj, &mbits);
+ // In the case of the span being MSpan_Stack mbits is useless and will not have
+ // the boundary bit set. It does not need to be greyed since it will be
+ // scanned using the scan stack mechanism.
+ if(obj == nil)
+ continue;
+ wbuf = greyobject(obj, &mbits, wbuf);
+ }
+ return wbuf;
+}
+
+// scanblock starts by scanning b as scanobject would.
+// If the gcphase is GCscan, that's all scanblock does.
+// Otherwise it traverses some fraction of the pointers it found in b, recursively.
+// As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
+// stopping only when no work is left in the system.
+static void
+scanblock(byte *b, uintptr n, byte *ptrmask)
+{
+ Workbuf *wbuf;
+ bool keepworking;
+
+ wbuf = getpartialorempty();
+ if(b != nil) {
+ wbuf = scanobject(b, n, ptrmask, wbuf);
+ if(runtime·gcphase == GCscan) {
+ if(inheap(b) && !ptrmask)
+ // b is in heap, we are in GCscan so there should be a ptrmask.
+ runtime·throw("scanblock: In GCscan phase and inheap is true.");
+ // GCscan only goes one level deep since mark wb not turned on.
+ putpartial(wbuf);
+ return;
+ }
+ }
+ if(runtime·gcphase == GCscan) {
+ runtime·throw("scanblock: In GCscan phase but no b passed in.");
+ }
+
+ keepworking = b == nil;
+
// ptrmask can have 2 possible values:
// 1. nil - obtain pointer mask from GC bitmap.
// 2. pointer to a compact mask (for stacks and data).
- if(b != nil)
- goto scanobj;
for(;;) {
- if(nobj == 0) {
- // Out of work in workbuf.
- // First, see is there is any work in scanbuf.
- for(i = 0; i < nelem(scanbuf); i++) {
- b = scanbuf[scanbufpos];
- scanbuf[scanbufpos++] = nil;
- scanbufpos %= nelem(scanbuf);
- if(b != nil) {
- n = arena_used - b; // scan until bitBoundary or BitsDead
- ptrmask = nil; // use GC bitmap for pointer info
- goto scanobj;
- }
- }
+ if(wbuf->nobj == 0) {
if(!keepworking) {
putempty(wbuf);
return;
}
// Refill workbuf from global queue.
wbuf = getfull(wbuf);
- if(wbuf == nil)
+ if(wbuf == nil) // nil means out of work barrier reached
return;
- nobj = wbuf->nobj;
- wp = &wbuf->obj[nobj];
+
+ if(wbuf->nobj<=0) {
+ runtime·throw("runtime:scanblock getfull returns empty buffer");
+ }
+
}
// If another proc wants a pointer, give it some.
- if(runtime·work.nwait > 0 && nobj > 4 && runtime·work.full == 0) {
- wbuf->nobj = nobj;
+ if(runtime·work.nwait > 0 && wbuf->nobj > 4 && runtime·work.full == 0) {
wbuf = handoff(wbuf);
- nobj = wbuf->nobj;
- wp = &wbuf->obj[nobj];
- }
-
- wp--;
- nobj--;
- b = *wp;
- n = arena_used - b; // scan until next bitBoundary or BitsDead
- ptrmask = nil; // use GC bitmap for pointer info
-
- scanobj:
- if(DebugPtrs)
- runtime·printf("scanblock %p +%p %p\n", b, n, ptrmask);
- // Find bits of the beginning of the object.
- if(ptrmask == nil) {
- off = (uintptr*)b - (uintptr*)arena_start;
- ptrbitp = arena_start - off/wordsPerBitmapByte - 1;
}
- for(i = 0; i < n; i += PtrSize) {
- obj = nil;
- // Find bits for this word.
- if(ptrmask == nil) {
- // Check is we have reached end of span.
- if((((uintptr)b+i)%PageSize) == 0 &&
- runtime·mheap.spans[(b-arena_start)>>PageShift] != runtime·mheap.spans[(b+i-arena_start)>>PageShift])
- break;
- // Consult GC bitmap.
- bits = *ptrbitp;
-
- if(wordsPerBitmapByte != 2)
- runtime·throw("alg doesn't work for wordsPerBitmapByte != 2");
- j = ((uintptr)b+i)/PtrSize & 1;
- ptrbitp -= j;
- bits >>= gcBits*j;
-
- if((bits&bitBoundary) != 0 && i != 0)
- break; // reached beginning of the next object
- bits = (bits>>2)&BitsMask;
- if(bits == BitsDead)
- break; // reached no-scan part of the object
- } else // dense mask (stack or data)
- bits = (ptrmask[(i/PtrSize)/4]>>(((i/PtrSize)%4)*BitsPerPointer))&BitsMask;
-
- if(bits <= BitsScalar) // BitsScalar || BitsDead
- continue;
- if(bits == BitsPointer) {
- obj = *(byte**)(b+i);
- obj0 = obj;
- goto markobj;
- }
-
- // With those three out of the way, must be multi-word.
- if(Debug && bits != BitsMultiWord)
- runtime·throw("unexpected garbage collection bits");
- // Find the next pair of bits.
- if(ptrmask == nil) {
- bits = *ptrbitp;
- j = ((uintptr)b+i+PtrSize)/PtrSize & 1;
- ptrbitp -= j;
- bits >>= gcBits*j;
- bits = (bits>>2)&BitsMask;
- } else
- bits = (ptrmask[((i+PtrSize)/PtrSize)/4]>>((((i+PtrSize)/PtrSize)%4)*BitsPerPointer))&BitsMask;
-
- if(Debug && bits != BitsIface && bits != BitsEface)
- runtime·throw("unexpected garbage collection bits");
-
- if(bits == BitsIface) {
- iface = (Iface*)(b+i);
- if(iface->tab != nil) {
- typ = iface->tab->type;
- if(!(typ->kind&KindDirectIface) || !(typ->kind&KindNoPointers))
- obj = iface->data;
- }
- } else {
- eface = (Eface*)(b+i);
- typ = eface->type;
- if(typ != nil) {
- if(!(typ->kind&KindDirectIface) || !(typ->kind&KindNoPointers))
- obj = eface->data;
- }
- }
- i += PtrSize;
-
- obj0 = obj;
- markobj:
- // At this point we have extracted the next potential pointer.
- // Check if it points into heap.
- if(obj == nil)
- continue;
- if(obj < arena_start || obj >= arena_used) {
- if((uintptr)obj < PhysPageSize && runtime·invalidptr) {
- s = nil;
- goto badobj;
- }
- continue;
- }
- // Mark the object.
- obj = (byte*)((uintptr)obj & ~(PtrSize-1));
- off = (uintptr*)obj - (uintptr*)arena_start;
- bitp = arena_start - off/wordsPerBitmapByte - 1;
- shift = (off % wordsPerBitmapByte) * gcBits;
- xbits = *bitp;
- bits = (xbits >> shift) & bitMask;
- if((bits&bitBoundary) == 0) {
- // Not a beginning of a block, consult span table to find the block beginning.
- k = (uintptr)obj>>PageShift;
- x = k;
- x -= (uintptr)arena_start>>PageShift;
- s = runtime·mheap.spans[x];
- if(s == nil || k < s->start || obj >= s->limit || s->state != MSpanInUse) {
- // Stack pointers lie within the arena bounds but are not part of the GC heap.
- // Ignore them.
- if(s != nil && s->state == MSpanStack)
- continue;
-
- badobj:
- // If cgo_allocate is linked into the binary, it can allocate
- // memory as []unsafe.Pointer that may not contain actual
- // pointers and must be scanned conservatively.
- // In this case alone, allow the bad pointer.
- if(have_cgo_allocate() && ptrmask == nil)
- continue;
-
- // Anything else indicates a bug somewhere.
- // If we're in the middle of chasing down a different bad pointer,
- // don't confuse the trace by printing about this one.
- if(nbadblock > 0)
- continue;
-
- runtime·printf("runtime: garbage collector found invalid heap pointer *(%p+%p)=%p", b, i, obj);
- if(s == nil)
- runtime·printf(" s=nil\n");
- else
- runtime·printf(" span=%p-%p-%p state=%d\n", (uintptr)s->start<<PageShift, s->limit, (uintptr)(s->start+s->npages)<<PageShift, s->state);
- if(ptrmask != nil)
- runtime·throw("invalid heap pointer");
- // Add to badblock list, which will cause the garbage collection
- // to keep repeating until it has traced the chain of pointers
- // leading to obj all the way back to a root.
- if(nbadblock == 0)
- badblock[nbadblock++] = (uintptr)b;
- continue;
- }
- p = (byte*)((uintptr)s->start<<PageShift);
- if(s->sizeclass != 0) {
- size = s->elemsize;
- idx = ((byte*)obj - p)/size;
- p = p+idx*size;
- }
- if(p == obj) {
- runtime·printf("runtime: failed to find block beginning for %p s=%p s->limit=%p\n",
- p, s->start*PageSize, s->limit);
- runtime·throw("failed to find block beginning");
- }
- obj = p;
- goto markobj;
- }
- if(DebugPtrs)
- runtime·printf("scan *%p = %p => base %p\n", b+i, obj0, obj);
-
- if(nbadblock > 0 && (uintptr)obj == badblock[nbadblock-1]) {
- // Running garbage collection again because
- // we want to find the path from a root to a bad pointer.
- // Found possible next step; extend or finish path.
- for(j=0; j<nbadblock; j++)
- if(badblock[j] == (uintptr)b)
- goto AlreadyBad;
- runtime·printf("runtime: found *(%p+%p) = %p+%p\n", b, i, obj0, (uintptr)(obj-obj0));
- if(ptrmask != nil)
- runtime·throw("bad pointer");
- if(nbadblock >= nelem(badblock))
- runtime·throw("badblock trace too long");
- badblock[nbadblock++] = (uintptr)b;
- AlreadyBad:;
- }
-
- // Now we have bits, bitp, and shift correct for
- // obj pointing at the base of the object.
- // Only care about not marked objects.
- if((bits&bitMarked) != 0)
- continue;
- // If obj size is greater than 8, then each byte of GC bitmap
- // contains info for at most one object. In such case we use
- // non-atomic byte store to mark the object. This can lead
- // to double enqueue of the object for scanning, but scanning
- // is an idempotent operation, so it is OK. This cannot lead
- // to bitmap corruption because the single marked bit is the
- // only thing that can change in the byte.
- // For 8-byte objects we use non-atomic store, if the other
- // quadruple is already marked. Otherwise we resort to CAS
- // loop for marking.
- if((xbits&(bitMask|(bitMask<<gcBits))) != (bitBoundary|(bitBoundary<<gcBits)) ||
- runtime·work.nproc == 1)
- *bitp = xbits | (bitMarked<<shift);
- else
- runtime·atomicor8(bitp, bitMarked<<shift);
-
- if(((xbits>>(shift+2))&BitsMask) == BitsDead)
- continue; // noscan object
-
- // Queue the obj for scanning.
- PREFETCH(obj);
- p = scanbuf[scanbufpos];
- scanbuf[scanbufpos++] = obj;
- scanbufpos %= nelem(scanbuf);
- if(p == nil)
- continue;
-
- // If workbuf is full, obtain an empty one.
- if(nobj >= nelem(wbuf->obj)) {
- wbuf->nobj = nobj;
- wbuf = getempty(wbuf);
- nobj = wbuf->nobj;
- wp = &wbuf->obj[nobj];
- }
- *wp = p;
- wp++;
- nobj++;
- }
- if(DebugPtrs)
- runtime·printf("end scanblock %p +%p %p\n", b, n, ptrmask);
-
- if(Debug && ptrmask == nil) {
- // For heap objects ensure that we did not overscan.
- n = 0;
- p = nil;
- if(!runtime·mlookup(b, &p, &n, nil) || b != p || i > n) {
- runtime·printf("runtime: scanned (%p,%p), heap object (%p,%p)\n", b, i, p, n);
- runtime·throw("scanblock: scanned invalid object");
- }
- }
+ // This might be a good place to add prefetch code...
+ // if(wbuf->nobj > 4) {
+ // PREFETCH(wbuf->obj[wbuf->nobj - 3];
+ // }
+ --wbuf->nobj;
+ b = wbuf->obj[wbuf->nobj];
+ wbuf = scanobject(b, runtime·mheap.arena_used - b, nil, wbuf);
}
}
@@ -484,7 +666,7 @@ markroot(ParFor *desc, uint32 i)
void *p;
uint32 status;
bool restart;
-
+
USED(&desc);
// Note: if you add a case here, please also update heapdump.c:dumproots.
switch(i) {
@@ -511,7 +693,8 @@ markroot(ParFor *desc, uint32 i)
s = runtime·work.spans[spanidx];
if(s->state != MSpanInUse)
continue;
- if(s->sweepgen != sg) {
+ if(!checkmark && s->sweepgen != sg) {
+ // sweepgen was updated (+2) during non-checkmark GC pass
runtime·printf("sweep %d %d\n", s->sweepgen, sg);
runtime·throw("gc: unswept span");
}
@@ -523,14 +706,16 @@ markroot(ParFor *desc, uint32 i)
spf = (SpecialFinalizer*)sp;
// A finalizer can be set for an inner byte of an object, find object beginning.
p = (void*)((s->start << PageShift) + spf->special.offset/s->elemsize*s->elemsize);
- scanblock(p, s->elemsize, nil);
+ if(runtime·gcphase != GCscan)
+ scanblock(p, s->elemsize, nil); // Scanned during mark phase
scanblock((void*)&spf->fn, PtrSize, oneptr);
}
}
break;
case RootFlushCaches:
- flushallmcaches();
+ if (runtime·gcphase != GCscan) // Do not flush mcaches during GCscan phase.
+ flushallmcaches();
break;
default:
@@ -540,17 +725,37 @@ markroot(ParFor *desc, uint32 i)
gp = runtime·allg[i - RootCount];
// remember when we've first observed the G blocked
// needed only to output in traceback
- status = runtime·readgstatus(gp);
+ status = runtime·readgstatus(gp); // We are not in a scan state
if((status == Gwaiting || status == Gsyscall) && gp->waitsince == 0)
gp->waitsince = runtime·work.tstart;
- // Shrink a stack if not much of it is being used.
- runtime·shrinkstack(gp);
- if(runtime·readgstatus(gp) == Gdead)
+ // Shrink a stack if not much of it is being used but not in the scan phase.
+ if (runtime·gcphase != GCscan) // Do not shrink during GCscan phase.
+ runtime·shrinkstack(gp);
+ if(runtime·readgstatus(gp) == Gdead)
gp->gcworkdone = true;
else
gp->gcworkdone = false;
restart = runtime·stopg(gp);
- scanstack(gp);
+
+ // goroutine will scan its own stack when it stops running.
+ // Wait until it has.
+ while(runtime·readgstatus(gp) == Grunning && !gp->gcworkdone) {
+ }
+
+ // scanstack(gp) is done as part of gcphasework
+ // But to make sure we finished we need to make sure that
+ // the stack traps have all responded so drop into
+ // this while loop until they respond.
+ while(!gp->gcworkdone){
+ status = runtime·readgstatus(gp);
+ if(status == Gdead) {
+ gp->gcworkdone = true; // scan is a noop
+ break;
+ //do nothing, scan not needed.
+ }
+ if(status == Gwaiting || status == Grunnable)
+ restart = runtime·stopg(gp);
+ }
if(restart)
runtime·restartg(gp);
break;
@@ -562,53 +767,95 @@ markroot(ParFor *desc, uint32 i)
static Workbuf*
getempty(Workbuf *b)
{
- MCache *c;
-
- if(b != nil)
- runtime·lfstackpush(&runtime·work.full, &b->node);
- b = nil;
- c = g->m->mcache;
- if(c->gcworkbuf != nil) {
- b = c->gcworkbuf;
- c->gcworkbuf = nil;
+ if(b != nil) {
+ putfull(b);
+ b = nil;
}
- if(b == nil)
+ if(runtime·work.empty)
b = (Workbuf*)runtime·lfstackpop(&runtime·work.empty);
- if(b == nil)
+
+ if(b && b->nobj != 0) {
+ runtime·printf("m%d: getempty: popped b=%p with non-zero b->nobj=%d\n", g->m->id, b, (uint32)b->nobj);
+ runtime·throw("getempty: workbuffer not empty, b->nobj not 0");
+ }
+ if(b == nil) {
b = runtime·persistentalloc(sizeof(*b), CacheLineSize, &mstats.gc_sys);
- b->nobj = 0;
+ b->nobj = 0;
+ }
return b;
}
static void
putempty(Workbuf *b)
{
- MCache *c;
-
- c = g->m->mcache;
- if(c->gcworkbuf == nil) {
- c->gcworkbuf = b;
- return;
+ if(b->nobj != 0) {
+ runtime·throw("putempty: b->nobj not 0\n");
}
runtime·lfstackpush(&runtime·work.empty, &b->node);
}
-void
-runtime·gcworkbuffree(void *b)
+// Put a full or partially full workbuf on the full list.
+static void
+putfull(Workbuf *b)
{
- if(b != nil)
- putempty(b);
+ if(b->nobj <= 0) {
+ runtime·throw("putfull: b->nobj <= 0\n");
+ }
+ runtime·lfstackpush(&runtime·work.full, &b->node);
}
-// Get a full work buffer off the work.full list, or return nil.
+// Get an partially empty work buffer
+// if none are available get an empty one.
+static Workbuf*
+getpartialorempty(void)
+{
+ Workbuf *b;
+
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
+ if(b == nil)
+ b = getempty(nil);
+ return b;
+}
+
+static void
+putpartial(Workbuf *b)
+{
+
+ if(b->nobj == 0)
+ runtime·lfstackpush(&runtime·work.empty, &b->node);
+ else if (b->nobj < nelem(b->obj))
+ runtime·lfstackpush(&runtime·work.partial, &b->node);
+ else if (b->nobj == nelem(b->obj))
+ runtime·lfstackpush(&runtime·work.full, &b->node);
+ else {
+ runtime·printf("b=%p, b->nobj=%d, nelem(b->obj)=%d\n", b, (uint32)b->nobj, (uint32)nelem(b->obj));
+ runtime·throw("putpartial: bad Workbuf b->nobj");
+ }
+}
+
+// Get a full work buffer off the work.full or a partially
+// filled one off the work.partial list. If nothing is available
+// wait until all the other gc helpers have finished and then
+// return nil.
+// getfull acts as a barrier for work.nproc helpers. As long as one
+// gchelper is actively marking objects it
+// may create a workbuffer that the other helpers can work on.
+// The for loop either exits when a work buffer is found
+// or when _all_ of the work.nproc GC helpers are in the loop
+// looking for work and thus not capable of creating new work.
+// This is in fact the termination condition for the STW mark
+// phase.
static Workbuf*
getfull(Workbuf *b)
{
int32 i;
if(b != nil)
- runtime·lfstackpush(&runtime·work.empty, &b->node);
+ putempty(b);
+
b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
+ if(b==nil)
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
if(b != nil || runtime·work.nproc == 1)
return b;
@@ -617,7 +864,9 @@ getfull(Workbuf *b)
if(runtime·work.full != 0) {
runtime·xadd(&runtime·work.nwait, -1);
b = (Workbuf*)runtime·lfstackpop(&runtime·work.full);
- if(b != nil)
+ if(b==nil)
+ b = (Workbuf*)runtime·lfstackpop(&runtime·work.partial);
+ if(b != nil)
return b;
runtime·xadd(&runtime·work.nwait, +1);
}
@@ -737,7 +986,7 @@ scanframe(Stkframe *frame, void *unused)
}
bv = runtime·stackmapdata(stackmap, pcdata);
}
- scanblock((byte*)frame->argp, bv.n/BitsPerPointer*PtrSize, bv.bytedata);
+ scanblock((byte*)frame->argp, bv.n/BitsPerPointer*PtrSize, bv.bytedata);
}
return true;
}
@@ -760,8 +1009,7 @@ scanstack(G *gp)
case Gdead:
return;
case Grunning:
- runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp->goid, runtime·readgstatus(gp));
- runtime·throw("mark - world not stopped");
+ runtime·throw("scanstack: - goroutine not stopped");
case Grunnable:
case Gsyscall:
case Gwaiting:
@@ -778,8 +1026,117 @@ scanstack(G *gp)
runtime·tracebackdefers(gp, &fn, nil);
}
-// The gp has been moved to a gc safepoint. If there is gcphase specific
-// work it is done here.
+// If the slot is grey or black return true, if white return false.
+// If the slot is not in the known heap and thus does not have a valid GC bitmap then
+// it is considered grey. Globals and stacks can hold such slots.
+// The slot is grey if its mark bit is set and it is enqueued to be scanned.
+// The slot is black if it has already been scanned.
+// It is white if it has a valid mark bit and the bit is not set.
+static bool
+shaded(byte *slot)
+{
+ Markbits mbits;
+ byte *valid;
+
+ if(!inheap(slot)) // non-heap slots considered grey
+ return true;
+
+ valid = objectstart(slot, &mbits);
+ if(valid == nil)
+ return true;
+
+ if(checkmark)
+ return ischeckmarked(&mbits);
+
+ return (mbits.bits&bitMarked) != 0;
+}
+
+// Shade the object if it isn't already.
+// The object is not nil and known to be in the heap.
+static void
+shade(byte *b)
+{
+ byte *obj;
+ Workbuf *wbuf;
+ Markbits mbits;
+
+ if(!inheap(b))
+ runtime·throw("shade: passed an address not in the heap");
+
+ wbuf = getpartialorempty();
+ // Mark the object, return some important bits.
+ // If we combine the following two rotines we don't have to pass mbits or obj around.
+ obj = objectstart(b, &mbits);
+ if(obj != nil)
+ wbuf = greyobject(obj, &mbits, wbuf); // augments the wbuf
+
+ putpartial(wbuf);
+ return;
+}
+
+// This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
+// The original Dijkstra barrier only shaded ptrs being placed in black slots.
+//
+// Shade indicates that it has seen a white pointer by adding the referent
+// to wbuf as well as marking it.
+//
+// slot is the destination (dst) in go code
+// ptr is the value that goes into the slot (src) in the go code
+//
+// Dijkstra pointed out that maintaining the no black to white
+// pointers means that white to white pointers not need
+// to be noted by the write barrier. Furthermore if either
+// white object dies before it is reached by the
+// GC then the object can be collected during this GC cycle
+// instead of waiting for the next cycle. Unfortunately the cost of
+// ensure that the object holding the slot doesn't concurrently
+// change to black without the mutator noticing seems prohibitive.
+//
+// Consider the following example where the mutator writes into
+// a slot and then loads the slot's mark bit while the GC thread
+// writes to the slot's mark bit and then as part of scanning reads
+// the slot.
+//
+// Initially both [slot] and [slotmark] are 0 (nil)
+// Mutator thread GC thread
+// st [slot], ptr st [slotmark], 1
+//
+// ld r1, [slotmark] ld r2, [slot]
+//
+// This is a classic example of independent reads of independent writes,
+// aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
+// answer is yes without inserting a memory barriers between the st and the ld.
+// These barriers are expensive so we have decided that we will
+// always grey the ptr object regardless of the slot's color.
+//
+void
+runtime·gcmarkwb_m()
+{
+ byte *ptr;
+ ptr = (byte*)g->m->scalararg[1];
+
+ switch(runtime·gcphase) {
+ default:
+ runtime·throw("gcphasework in bad gcphase");
+ case GCoff:
+ case GCquiesce:
+ case GCstw:
+ case GCsweep:
+ case GCscan:
+ break;
+ case GCmark:
+ if(ptr != nil && inheap(ptr))
+ shade(ptr);
+ break;
+ case GCmarktermination:
+ if(ptr != nil && inheap(ptr))
+ shade(ptr);
+ break;
+ }
+}
+
+// The gp has been moved to a GC safepoint. GC phase specific
+// work is done here.
void
runtime·gcphasework(G *gp)
{
@@ -790,12 +1147,18 @@ runtime·gcphasework(G *gp)
case GCquiesce:
case GCstw:
case GCsweep:
- // No work for now.
+ // No work.
+ break;
+ case GCscan:
+ // scan the stack, mark the objects, put pointers in work buffers
+ // hanging off the P where this is being run.
+ scanstack(gp);
break;
case GCmark:
- // Disabled until concurrent GC is implemented
- // but indicate the scan has been done.
- // scanstack(gp);
+ break;
+ case GCmarktermination:
+ scanstack(gp);
+ // All available mark work will be emptied before returning.
break;
}
gp->gcworkdone = true;
@@ -885,6 +1248,7 @@ runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*
}
}
+// Returns only when span s has been swept.
void
runtime·MSpan_EnsureSwept(MSpan *s)
{
@@ -899,6 +1263,7 @@ runtime·MSpan_EnsureSwept(MSpan *s)
sg = runtime·mheap.sweepgen;
if(runtime·atomicload(&s->sweepgen) == sg)
return;
+ // The caller must be sure that the span is a MSpanInUse span.
if(runtime·cas(&s->sweepgen, sg-2, sg-1)) {
runtime·MSpan_Sweep(s, false);
return;
@@ -926,6 +1291,9 @@ runtime·MSpan_Sweep(MSpan *s, bool preserve)
Special *special, **specialp, *y;
bool res, sweepgenset;
+ if(checkmark)
+ runtime·throw("MSpan_Sweep: checkmark only runs in STW and after the sweep.");
+
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
if(g->m->locks == 0 && g->m->mallocing == 0 && g != g->m->g0)
@@ -1173,6 +1541,7 @@ runtime·gosweepdone(void)
return runtime·mheap.sweepdone;
}
+
void
runtime·gchelper(void)
{
@@ -1181,13 +1550,11 @@ runtime·gchelper(void)
g->m->traceback = 2;
gchelperstart();
- // parallel mark for over gc roots
+ // parallel mark for over GC roots
runtime·parfordo(runtime·work.markfor);
-
- // help other threads scan secondary blocks
- scanblock(nil, 0, nil);
-
- nproc = runtime·work.nproc; // runtime·work.nproc can change right after we increment runtime·work.ndone
+ if(runtime·gcphase != GCscan)
+ scanblock(nil, 0, nil); // blocks in getfull
+ nproc = runtime·work.nproc; // work.nproc can change right after we increment work.ndone
if(runtime·xadd(&runtime·work.ndone, +1) == nproc-1)
runtime·notewakeup(&runtime·work.alldone);
g->m->traceback = 0;
@@ -1353,6 +1720,7 @@ runtime·gcinit(void)
runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
}
+// Called from malloc.go using onM, stopping and starting the world handled in caller.
void
runtime·gc_m(void)
{
@@ -1366,17 +1734,296 @@ runtime·gc_m(void)
a.start_time = (uint64)(g->m->scalararg[0]) | ((uint64)(g->m->scalararg[1]) << 32);
a.eagersweep = g->m->scalararg[2];
gc(&a);
+ runtime·casgstatus(gp, Gwaiting, Grunning);
+}
+
+// Similar to clearcheckmarkbits but works on a single span.
+// It preforms two tasks.
+// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
+// for nibbles with the BoundaryBit set.
+// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
+// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
+// For the second case it is possible to restore the BitsDead pattern but since
+// clearmark is a debug tool performance has a lower priority than simplicity.
+// The span is MSpanInUse and the world is stopped.
+static void
+clearcheckmarkbitsspan(MSpan *s)
+{
+ int32 cl, n, npages, i;
+ uintptr size, off, step;
+ byte *p, *bitp, *arena_start, b;
+
+ if(s->state != MSpanInUse) {
+ runtime·printf("runtime:clearcheckmarkbitsspan: state=%d\n",
+ s->state);
+ runtime·throw("clearcheckmarkbitsspan: bad span state");
+ }
+ arena_start = runtime·mheap.arena_start;
+ cl = s->sizeclass;
+ size = s->elemsize;
+ if(cl == 0) {
+ n = 1;
+ } else {
+ // Chunk full of small blocks.
+ npages = runtime·class_to_allocnpages[cl];
+ n = (npages << PageShift) / size;
+ }
+
+ // MSpan_Sweep has similar code but instead of overloading and
+ // complicating that routine we do a simpler walk here.
+ // Sweep through n objects of given size starting at p.
+ // This thread owns the span now, so it can manipulate
+ // the block bitmap without atomic operations.
+ p = (byte*)(s->start << PageShift);
+ // Find bits for the beginning of the span.
+ off = (uintptr*)p - (uintptr*)arena_start;
+ bitp = arena_start - off/wordsPerBitmapByte - 1;
+ step = size/(PtrSize*wordsPerBitmapByte);
+
+ // The type bit values are:
+ // 00 - BitsDead, for us BitsScalarMarked
+ // 01 - BitsScalar
+ // 10 - BitsPointer
+ // 11 - unused, for us BitsPointerMarked
+ //
+ // When called to prepare for the checkmark phase (checkmark==1),
+ // we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
+ // type bits anywhere.
+ //
+ // The checkmark phase marks by changing BitsScalar to BitsScalarMarked
+ // and BitsPointer to BitsPointerMarked.
+ //
+ // When called to clean up after the checkmark phase (checkmark==0),
+ // we unmark by changing BitsScalarMarked back to BitsScalar and
+ // BitsPointerMarked back to BitsPointer.
+ //
+ // There are two problems with the scheme as just described.
+ // First, the setup rewrites BitsDead to BitsScalar, but the type bits
+ // following a BitsDead are uninitialized and must not be used.
+ // Second, objects that are free are expected to have their type
+ // bits zeroed (BitsDead), so in the cleanup we need to restore
+ // any BitsDeads that were there originally.
+ //
+ // In a one-word object (8-byte allocation on 64-bit system),
+ // there is no difference between BitsScalar and BitsDead, because
+ // neither is a pointer and there are no more words in the object,
+ // so using BitsScalar during the checkmark is safe and mapping
+ // both back to BitsDead during cleanup is also safe.
+ //
+ // In a larger object, we need to be more careful. During setup,
+ // if the type of the first word is BitsDead, we change it to BitsScalar
+ // (as we must) but also initialize the type of the second
+ // word to BitsDead, so that a scan during the checkmark phase
+ // will still stop before seeing the uninitialized type bits in the
+ // rest of the object. The sequence 'BitsScalar BitsDead' never
+ // happens in real type bitmaps - BitsDead is always as early
+ // as possible, so immediately after the last BitsPointer.
+ // During cleanup, if we see a BitsScalar, we can check to see if it
+ // is followed by BitsDead. If so, it was originally BitsDead and
+ // we can change it back.
- if(nbadblock > 0) {
- // Work out path from root to bad block.
- for(;;) {
- gc(&a);
- if(nbadblock >= nelem(badblock))
- runtime·throw("cannot find path to bad pointer");
+ if(step == 0) {
+ // updating top and bottom nibbles, all boundaries
+ for(i=0; i<n/2; i++, bitp--) {
+ if((*bitp & bitBoundary) != bitBoundary)
+ runtime·throw("missing bitBoundary");
+ b = (*bitp & bitPtrMask)>>2;
+ if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
+ *bitp &= ~0x0c; // convert to BitsDead
+ else if(b == BitsScalarMarked || b == BitsPointerMarked)
+ *bitp ^= BitsCheckMarkXor<<2;
+
+ if(((*bitp>>gcBits) & bitBoundary) != bitBoundary)
+ runtime·throw("missing bitBoundary");
+ b = ((*bitp>>gcBits) & bitPtrMask)>>2;
+ if(!checkmark && (b == BitsScalar || b == BitsScalarMarked))
+ *bitp &= ~0xc0; // convert to BitsDead
+ else if(b == BitsScalarMarked || b == BitsPointerMarked)
+ *bitp ^= BitsCheckMarkXor<<(2+gcBits);
+ }
+ } else {
+ // updating bottom nibble for first word of each object
+ for(i=0; i<n; i++, bitp -= step) {
+ if((*bitp & bitBoundary) != bitBoundary)
+ runtime·throw("missing bitBoundary");
+ b = (*bitp & bitPtrMask)>>2;
+
+ if(checkmark && b == BitsDead) {
+ // move BitsDead into second word.
+ // set bits to BitsScalar in preparation for checkmark phase.
+ *bitp &= ~0xc0;
+ *bitp |= BitsScalar<<2;
+ } else if(!checkmark && (b == BitsScalar || b == BitsScalarMarked) && (*bitp & 0xc0) == 0) {
+ // Cleaning up after checkmark phase.
+ // First word is scalar or dead (we forgot)
+ // and second word is dead.
+ // First word might as well be dead too.
+ *bitp &= ~0x0c;
+ } else if(b == BitsScalarMarked || b == BitsPointerMarked)
+ *bitp ^= BitsCheckMarkXor<<2;
}
}
+}
- runtime·casgstatus(gp, Gwaiting, Grunning);
+// clearcheckmarkbits preforms two tasks.
+// 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
+// for nibbles with the BoundaryBit set.
+// 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
+// BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
+// This is a bit expensive but preserves the BitsDead encoding during the normal marking.
+// BitsDead remains valid for every nibble except the ones with BitsBoundary set.
+static void
+clearcheckmarkbits(void)
+{
+ uint32 idx;
+ MSpan *s;
+ for(idx=0; idx<runtime·work.nspan; idx++) {
+ s = runtime·work.spans[idx];
+ if(s->state == MSpanInUse) {
+ clearcheckmarkbitsspan(s);
+ }
+ }
+}
+
+// Called from malloc.go using onM.
+// The world is stopped. Rerun the scan and mark phases
+// using the bitMarkedCheck bit instead of the
+// bitMarked bit. If the marking encounters an
+// bitMarked bit that is not set then we throw.
+void
+runtime·gccheckmark_m(void)
+{
+ if(!gccheckmarkenable)
+ return;
+
+ if(checkmark)
+ runtime·throw("gccheckmark_m, entered with checkmark already true.");
+
+ checkmark = true;
+ clearcheckmarkbits(); // Converts BitsDead to BitsScalar.
+ runtime·gc_m(); // turns off checkmark
+ // Work done, fixed up the GC bitmap to remove the checkmark bits.
+ clearcheckmarkbits();
+}
+
+// checkmarkenable is initially false
+void
+runtime·gccheckmarkenable_m(void)
+{
+ gccheckmarkenable = true;
+}
+
+void
+runtime·gccheckmarkdisable_m(void)
+{
+ gccheckmarkenable = false;
+}
+
+void
+runtime·finishsweep_m(void)
+{
+ uint32 i, sg;
+ MSpan *s;
+
+ // The world is stopped so we should be able to complete the sweeps
+ // quickly.
+ while(runtime·sweepone() != -1)
+ runtime·sweep.npausesweep++;
+
+ // There may be some other spans being swept concurrently that
+ // we need to wait for. If finishsweep_m is done with the world stopped
+ // this code is not required.
+ sg = runtime·mheap.sweepgen;
+ for(i=0; i<runtime·work.nspan; i++) {
+ s = runtime·work.spans[i];
+ if(s->sweepgen == sg) {
+ continue;
+ }
+ if(s->state != MSpanInUse) // Span is not part of the GCed heap so no need to ensure it is swept.
+ continue;
+ runtime·MSpan_EnsureSwept(s);
+ }
+}
+
+// Scan all of the stacks, greying (or graying if in America) the referents
+// but not blackening them since the mark write barrier isn't installed.
+void
+runtime·gcscan_m(void)
+{
+ uint32 i, allglen, oldphase;
+ G *gp, *mastergp, **allg;
+
+ // Grab the g that called us and potentially allow rescheduling.
+ // This allows it to be scanned like other goroutines.
+ mastergp = g->m->curg;
+
+ runtime·casgstatus(mastergp, Grunning, Gwaiting);
+ mastergp->waitreason = runtime·gostringnocopy((byte*)"garbage collection scan");
+
+ // Span sweeping has been done by finishsweep_m.
+ // Long term we will want to make this goroutine runnable
+ // by placing it onto a scanenqueue state and then calling
+ // runtime·restartg(mastergp) to make it Grunnable.
+ // At the bottom we will want to return this p back to the scheduler.
+
+ oldphase = runtime·gcphase;
+
+ runtime·lock(&runtime·allglock);
+ allglen = runtime·allglen;
+ allg = runtime·allg;
+ // Prepare flag indicating that the scan has not been completed.
+ for(i = 0; i < allglen; i++) {
+ gp = allg[i];
+ gp->gcworkdone = false; // set to true in gcphasework
+ }
+ runtime·unlock(&runtime·allglock);
+
+ runtime·work.nwait = 0;
+ runtime·work.ndone = 0;
+ runtime·work.nproc = 1; // For now do not do this in parallel.
+ runtime·gcphase = GCscan;
+ // ackgcphase is not needed since we are not scanning running goroutines.
+ runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + allglen, nil, false, markroot);
+ runtime·parfordo(runtime·work.markfor);
+
+ runtime·lock(&runtime·allglock);
+
+ allg = runtime·allg;
+ // Check that gc work is done.
+ for(i = 0; i < allglen; i++) {
+ gp = allg[i];
+ if(!gp->gcworkdone) {
+ runtime·throw("scan missed a g");
+ }
+ }
+ runtime·unlock(&runtime·allglock);
+
+ runtime·gcphase = oldphase;
+ runtime·casgstatus(mastergp, Gwaiting, Grunning);
+ // Let the g that called us continue to run.
+}
+
+// Mark all objects that are known about.
+void
+runtime·gcmark_m(void)
+{
+ scanblock(nil, 0, nil);
+}
+
+// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
+// all go routines see the new barrier.
+void
+runtime·gcinstallmarkwb_m(void)
+{
+ runtime·gcphase = GCmark;
+}
+
+// For now this must be bracketed with a stoptheworld and a starttheworld to ensure
+// all go routines see the new barrier.
+void
+runtime·gcinstalloffwb_m(void)
+{
+ runtime·gcphase = GCoff;
}
static void
@@ -1385,9 +2032,9 @@ gc(struct gc_args *args)
int64 t0, t1, t2, t3, t4;
uint64 heap0, heap1, obj;
GCStats stats;
-
- if(DebugPtrs)
- runtime·printf("GC start\n");
+ uint32 oldphase;
+ uint32 i;
+ G *gp;
if(runtime·debug.allocfreetrace)
runtime·tracegc();
@@ -1400,11 +2047,10 @@ gc(struct gc_args *args)
if(runtime·debug.gctrace)
t1 = runtime·nanotime();
- // Sweep what is not sweeped by bgsweep.
- while(runtime·sweepone() != -1)
- runtime·sweep.npausesweep++;
+ if(!checkmark)
+ runtime·finishsweep_m(); // skip during checkmark debug phase.
- // Cache runtime.mheap.allspans in work.spans to avoid conflicts with
+ // Cache runtime·mheap.allspans in work.spans to avoid conflicts with
// resizing/freeing allspans.
// New spans can be created while GC progresses, but they are not garbage for
// this round:
@@ -1421,10 +2067,19 @@ gc(struct gc_args *args)
runtime·work.spans = runtime·mheap.allspans;
runtime·work.nspan = runtime·mheap.nspan;
runtime·unlock(&runtime·mheap.lock);
+ oldphase = runtime·gcphase;
runtime·work.nwait = 0;
runtime·work.ndone = 0;
- runtime·work.nproc = runtime·gcprocs();
+ runtime·work.nproc = runtime·gcprocs();
+ runtime·gcphase = GCmarktermination;
+
+ // World is stopped so allglen will not change.
+ for(i = 0; i < runtime·allglen; i++) {
+ gp = runtime·allg[i];
+ gp->gcworkdone = false; // set to true in gcphasework
+ }
+
runtime·parforsetup(runtime·work.markfor, runtime·work.nproc, RootCount + runtime·allglen, nil, false, markroot);
if(runtime·work.nproc > 1) {
runtime·noteclear(&runtime·work.alldone);
@@ -1437,8 +2092,15 @@ gc(struct gc_args *args)
gchelperstart();
runtime·parfordo(runtime·work.markfor);
+
scanblock(nil, 0, nil);
+ if(runtime·work.full)
+ runtime·throw("runtime·work.full != nil");
+ if(runtime·work.partial)
+ runtime·throw("runtime·work.partial != nil");
+
+ runtime·gcphase = oldphase;
t3 = 0;
if(runtime·debug.gctrace)
t3 = runtime·nanotime();
@@ -1499,6 +2161,16 @@ gc(struct gc_args *args)
// Free the old cached mark array if necessary.
if(runtime·work.spans != nil && runtime·work.spans != runtime·mheap.allspans)
runtime·SysFree(runtime·work.spans, runtime·work.nspan*sizeof(runtime·work.spans[0]), &mstats.other_sys);
+
+ if(gccheckmarkenable) {
+ if(!checkmark) {
+ // first half of two-pass; don't set up sweep
+ runtime·unlock(&runtime·mheap.lock);
+ return;
+ }
+ checkmark = false; // done checking marks
+ }
+
// Cache the current array for sweeping.
runtime·mheap.gcspans = runtime·mheap.allspans;
runtime·mheap.sweepgen += 2;
@@ -1508,6 +2180,7 @@ gc(struct gc_args *args)
runtime·sweep.spanidx = 0;
runtime·unlock(&runtime·mheap.lock);
+
if(ConcurrentSweep && !args->eagersweep) {
runtime·lock(&runtime·gclock);
if(runtime·sweep.g == nil)
@@ -1527,9 +2200,6 @@ gc(struct gc_args *args)
runtime·mProf_GC();
g->m->traceback = 0;
-
- if(DebugPtrs)
- runtime·printf("GC end\n");
}
extern uintptr runtime·sizeof_C_MStats;
@@ -1802,7 +2472,7 @@ runtime·unrollgcprog_m(void)
prog = (byte*)typ->gc[1];
unrollgcprog1(mask, prog, &pos, false, true);
}
-
+
// atomic way to say mask[0] = 1
x = *(uintptr*)mask;
((byte*)&x)[0] = 1;
diff --git a/src/runtime/mgc0.go b/src/runtime/mgc0.go
index 3a7204b54..dc4eec519 100644
--- a/src/runtime/mgc0.go
+++ b/src/runtime/mgc0.go
@@ -83,54 +83,139 @@ func bgsweep() {
}
}
+const (
+ _PoisonGC = 0xf969696969696969 & (1<<(8*ptrSize) - 1)
+ _PoisonStack = 0x6868686868686868 & (1<<(8*ptrSize) - 1)
+)
+
// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
// but if we do that, Go inserts a write barrier on *dst = src.
//go:nosplit
func writebarrierptr(dst *uintptr, src uintptr) {
*dst = src
+ writebarrierptr_nostore(dst, src)
+}
+
+// Like writebarrierptr, but the store has already been applied.
+// Do not reapply.
+//go:nosplit
+func writebarrierptr_nostore(dst *uintptr, src uintptr) {
+ if getg() == nil { // very low-level startup
+ return
+ }
+
+ if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
+ onM(func() { gothrow("bad pointer in write barrier") })
+ }
+
+ mp := acquirem()
+ if mp.inwb || mp.dying > 0 {
+ releasem(mp)
+ return
+ }
+ mp.inwb = true
+ oldscalar0 := mp.scalararg[0]
+ oldscalar1 := mp.scalararg[1]
+ mp.scalararg[0] = uintptr(unsafe.Pointer(dst))
+ mp.scalararg[1] = src
+ onM_signalok(gcmarkwb_m)
+ mp.scalararg[0] = oldscalar0
+ mp.scalararg[1] = oldscalar1
+ mp.inwb = false
+ releasem(mp)
}
//go:nosplit
func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
- dst[0] = src[0]
+ writebarrierptr(&dst[0], src[0])
dst[1] = src[1]
}
//go:nosplit
func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
- dst[0] = src[0]
+ writebarrierptr(&dst[0], src[0])
dst[1] = src[1]
dst[2] = src[2]
}
//go:nosplit
func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
-}
-
-//go:nosplit
-func writebarrierfat2(dst *[2]uintptr, _ *byte, src [2]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
}
-//go:nosplit
-func writebarrierfat3(dst *[3]uintptr, _ *byte, src [3]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
- dst[2] = src[2]
-}
+//go:generate go run wbfat_gen.go -- wbfat.go
+//
+// The above line generates multiword write barriers for
+// all the combinations of ptr+scalar up to four words.
+// The implementations are written to wbfat.go.
//go:nosplit
-func writebarrierfat4(dst *[4]uintptr, _ *byte, src [4]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
- dst[2] = src[2]
- dst[3] = src[3]
+func writebarrierfat(typ *_type, dst, src unsafe.Pointer) {
+ mask := loadPtrMask(typ)
+ nptr := typ.size / ptrSize
+ for i := uintptr(0); i < nptr; i += 2 {
+ bits := mask[i/2]
+ if (bits>>2)&_BitsMask == _BitsPointer {
+ writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
+ } else {
+ *(*uintptr)(dst) = *(*uintptr)(src)
+ }
+ dst = add(dst, ptrSize)
+ src = add(src, ptrSize)
+ if i+1 == nptr {
+ break
+ }
+ bits >>= 4
+ if (bits>>2)&_BitsMask == _BitsPointer {
+ writebarrierptr((*uintptr)(dst), *(*uintptr)(src))
+ } else {
+ *(*uintptr)(dst) = *(*uintptr)(src)
+ }
+ dst = add(dst, ptrSize)
+ src = add(src, ptrSize)
+ }
}
//go:nosplit
-func writebarrierfat(typ *_type, dst, src unsafe.Pointer) {
- memmove(dst, src, typ.size)
+func writebarriercopy(typ *_type, dst, src slice) int {
+ n := dst.len
+ if n > src.len {
+ n = src.len
+ }
+ if n == 0 {
+ return 0
+ }
+ dstp := unsafe.Pointer(dst.array)
+ srcp := unsafe.Pointer(src.array)
+
+ if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
+ // Overlap with src before dst.
+ // Copy backward, being careful not to move dstp/srcp
+ // out of the array they point into.
+ dstp = add(dstp, uintptr(n-1)*typ.size)
+ srcp = add(srcp, uintptr(n-1)*typ.size)
+ i := uint(0)
+ for {
+ writebarrierfat(typ, dstp, srcp)
+ if i++; i >= n {
+ break
+ }
+ dstp = add(dstp, -typ.size)
+ srcp = add(srcp, -typ.size)
+ }
+ } else {
+ // Copy forward, being careful not to move dstp/srcp
+ // out of the array they point into.
+ i := uint(0)
+ for {
+ writebarrierfat(typ, dstp, srcp)
+ if i++; i >= n {
+ break
+ }
+ dstp = add(dstp, typ.size)
+ srcp = add(srcp, typ.size)
+ }
+ }
+ return int(n)
}
diff --git a/src/runtime/mgc0.h b/src/runtime/mgc0.h
index 64f818914..519d7206e 100644
--- a/src/runtime/mgc0.h
+++ b/src/runtime/mgc0.h
@@ -45,8 +45,12 @@ enum {
// If you change these, also change scanblock.
// scanblock does "if(bits == BitsScalar || bits == BitsDead)" as "if(bits <= BitsScalar)".
BitsDead = 0,
- BitsScalar = 1,
- BitsPointer = 2,
+ BitsScalar = 1, // 01
+ BitsPointer = 2, // 10
+ BitsCheckMarkXor = 1, // 10
+ BitsScalarMarked = BitsScalar ^ BitsCheckMarkXor, // 00
+ BitsPointerMarked = BitsPointer ^ BitsCheckMarkXor, // 11
+
BitsMultiWord = 3,
// BitsMultiWord will be set for the first word of a multi-word item.
// When it is set, one of the following will be set for the second word.
@@ -56,7 +60,7 @@ enum {
BitsEface = 3,
// 64 bytes cover objects of size 1024/512 on 64/32 bits, respectively.
- MaxGCMask = 64,
+ MaxGCMask = 65536, // TODO(rsc): change back to 64
};
// Bits in per-word bitmap.
diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go
index d64e3be69..d409c6c30 100644
--- a/src/runtime/mprof.go
+++ b/src/runtime/mprof.go
@@ -528,8 +528,6 @@ var allgs []*g // proc.c
// Most clients should use the runtime/pprof package instead
// of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool) {
- sp := getcallersp(unsafe.Pointer(&p))
- pc := getcallerpc(unsafe.Pointer(&p))
n = NumGoroutine()
if n <= len(p) {
@@ -542,7 +540,11 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
if n <= len(p) {
ok = true
r := p
- saveg(pc, sp, gp, &r[0])
+ sp := getcallersp(unsafe.Pointer(&p))
+ pc := getcallerpc(unsafe.Pointer(&p))
+ onM(func() {
+ saveg(pc, sp, gp, &r[0])
+ })
r = r[1:]
for _, gp1 := range allgs {
if gp1 == gp || readgstatus(gp1) == _Gdead {
@@ -573,8 +575,6 @@ func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
// If all is true, Stack formats stack traces of all other goroutines
// into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int {
- sp := getcallersp(unsafe.Pointer(&buf))
- pc := getcallerpc(unsafe.Pointer(&buf))
mp := acquirem()
gp := mp.curg
if all {
@@ -589,14 +589,19 @@ func Stack(buf []byte, all bool) int {
n := 0
if len(buf) > 0 {
- gp.writebuf = buf[0:0:len(buf)]
- goroutineheader(gp)
- traceback(pc, sp, 0, gp)
- if all {
- tracebackothers(gp)
- }
- n = len(gp.writebuf)
- gp.writebuf = nil
+ sp := getcallersp(unsafe.Pointer(&buf))
+ pc := getcallerpc(unsafe.Pointer(&buf))
+ onM(func() {
+ g0 := getg()
+ g0.writebuf = buf[0:0:len(buf)]
+ goroutineheader(gp)
+ traceback(pc, sp, 0, gp)
+ if all {
+ tracebackothers(gp)
+ }
+ n = len(g0.writebuf)
+ g0.writebuf = nil
+ })
}
if all {
@@ -623,7 +628,11 @@ func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
}
if gp.m.curg == nil || gp == gp.m.curg {
goroutineheader(gp)
- traceback(getcallerpc(unsafe.Pointer(&p)), getcallersp(unsafe.Pointer(&p)), 0, gp)
+ pc := getcallerpc(unsafe.Pointer(&p))
+ sp := getcallersp(unsafe.Pointer(&p))
+ onM(func() {
+ traceback(pc, sp, 0, gp)
+ })
} else {
goroutineheader(gp.m.curg)
traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
@@ -639,7 +648,11 @@ func tracefree(p unsafe.Pointer, size uintptr) {
gp.m.traceback = 2
print("tracefree(", p, ", ", hex(size), ")\n")
goroutineheader(gp)
- traceback(getcallerpc(unsafe.Pointer(&p)), getcallersp(unsafe.Pointer(&p)), 0, gp)
+ pc := getcallerpc(unsafe.Pointer(&p))
+ sp := getcallersp(unsafe.Pointer(&p))
+ onM(func() {
+ traceback(pc, sp, 0, gp)
+ })
print("\n")
gp.m.traceback = 0
unlock(&tracelock)
diff --git a/src/runtime/os_android.c b/src/runtime/os_android.c
index 58e0dac93..5805f6871 100644
--- a/src/runtime/os_android.c
+++ b/src/runtime/os_android.c
@@ -9,7 +9,7 @@
// Export the runtime entry point symbol.
//
// Used by the app package to start the Go runtime after loading
-// a shared library via JNI. See code.google.com/p/go.mobile/app.
+// a shared library via JNI. See golang.org/x/mobile/app.
void _rt0_arm_linux1();
#pragma cgo_export_static _rt0_arm_linux1
diff --git a/src/runtime/os_darwin.c b/src/runtime/os_darwin.c
index bbd29282b..b866863d0 100644
--- a/src/runtime/os_darwin.c
+++ b/src/runtime/os_darwin.c
@@ -135,7 +135,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_dragonfly.c b/src/runtime/os_dragonfly.c
index e372205ec..051192ad3 100644
--- a/src/runtime/os_dragonfly.c
+++ b/src/runtime/os_dragonfly.c
@@ -195,7 +195,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_freebsd.c b/src/runtime/os_freebsd.c
index a513cb604..1c126547a 100644
--- a/src/runtime/os_freebsd.c
+++ b/src/runtime/os_freebsd.c
@@ -203,7 +203,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_linux.c b/src/runtime/os_linux.c
index 9bd123d59..cc23774e3 100644
--- a/src/runtime/os_linux.c
+++ b/src/runtime/os_linux.c
@@ -233,7 +233,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_nacl.c b/src/runtime/os_nacl.c
index 14b558303..ad72cc7c6 100644
--- a/src/runtime/os_nacl.c
+++ b/src/runtime/os_nacl.c
@@ -20,7 +20,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024); // OS X wants >=8K, Linux >=2K
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_netbsd.c b/src/runtime/os_netbsd.c
index 58e5bedf2..28929ea57 100644
--- a/src/runtime/os_netbsd.c
+++ b/src/runtime/os_netbsd.c
@@ -271,7 +271,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_openbsd.c b/src/runtime/os_openbsd.c
index eebaa13ee..960aaffff 100644
--- a/src/runtime/os_openbsd.c
+++ b/src/runtime/os_openbsd.c
@@ -217,7 +217,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_plan9.c b/src/runtime/os_plan9.c
index f8c543f6f..18460fc12 100644
--- a/src/runtime/os_plan9.c
+++ b/src/runtime/os_plan9.c
@@ -20,12 +20,18 @@ runtime·mpreinit(M *mp)
{
// Initialize stack and goroutine for note handling.
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
+
mp->notesig = (int8*)runtime·mallocgc(ERRMAX*sizeof(int8), nil, FlagNoScan);
+ runtime·writebarrierptr_nostore(&mp->notesig, mp->notesig);
// Initialize stack for handling strings from the
// errstr system call, as used in package syscall.
mp->errstr = (byte*)runtime·mallocgc(ERRMAX*sizeof(byte), nil, FlagNoScan);
+ runtime·writebarrierptr_nostore(&mp->errstr, mp->errstr);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/os_solaris.c b/src/runtime/os_solaris.c
index e16b8e637..bee91d8e6 100644
--- a/src/runtime/os_solaris.c
+++ b/src/runtime/os_solaris.c
@@ -176,7 +176,10 @@ void
runtime·mpreinit(M *mp)
{
mp->gsignal = runtime·malg(32*1024);
+ runtime·writebarrierptr_nostore(&mp->gsignal, mp->gsignal);
+
mp->gsignal->m = mp;
+ runtime·writebarrierptr_nostore(&mp->gsignal->m, mp->gsignal->m);
}
// Called to initialize a new m (including the bootstrap m).
diff --git a/src/runtime/print1.go b/src/runtime/print1.go
index 8f8268873..3d812bd04 100644
--- a/src/runtime/print1.go
+++ b/src/runtime/print1.go
@@ -41,7 +41,31 @@ func snprintf(dst *byte, n int32, s *byte) {
gp.writebuf = nil
}
-//var debuglock mutex
+var debuglock mutex
+
+// The compiler emits calls to printlock and printunlock around
+// the multiple calls that implement a single Go print or println
+// statement. Some of the print helpers (printsp, for example)
+// call print recursively. There is also the problem of a crash
+// happening during the print routines and needing to acquire
+// the print lock to print information about the crash.
+// For both these reasons, let a thread acquire the printlock 'recursively'.
+
+func printlock() {
+ mp := getg().m
+ mp.printlock++
+ if mp.printlock == 1 {
+ lock(&debuglock)
+ }
+}
+
+func printunlock() {
+ mp := getg().m
+ mp.printlock--
+ if mp.printlock == 0 {
+ unlock(&debuglock)
+ }
+}
// write to goroutine-local buffer if diverting output,
// or else standard error.
@@ -80,7 +104,7 @@ func printnl() {
// Very simple printf. Only for debugging prints.
// Do not add to this without checking with Rob.
func vprintf(str string, arg unsafe.Pointer) {
- //lock(&debuglock);
+ printlock()
s := bytes(str)
start := 0
@@ -160,7 +184,7 @@ func vprintf(str string, arg unsafe.Pointer) {
gwrite(s[start:i])
}
- //unlock(&debuglock);
+ printunlock()
}
func printpc(p unsafe.Pointer) {
diff --git a/src/runtime/proc.c b/src/runtime/proc.c
index feee8ea19..ce39db4ab 100644
--- a/src/runtime/proc.c
+++ b/src/runtime/proc.c
@@ -423,13 +423,7 @@ runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
- // Help GC if needed.
- if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
- gp->preemptscan = false;
- g->m->ptrarg[0] = gp;
- fn = helpcasgstatus;
- runtime·onM(&fn);
- }
+
}
}
@@ -504,6 +498,13 @@ runtime·stopg(G *gp)
return false;
case Grunning:
+ if(runtime·gcphase == GCscan) {
+ gp->gcworkdone = true;
+ return false;
+ // Running routines not scanned during
+ // GCscan phase, we only scan non-running routines.
+ }
+
// Claim goroutine, so we aren't racing with a status
// transition away from Grunning.
if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
@@ -581,9 +582,10 @@ mquiesce(G *gpmaster)
uint32 status;
uint32 activeglen;
- activeglen = runtime·allglen;
// enqueue the calling goroutine.
runtime·restartg(gpmaster);
+
+ activeglen = runtime·allglen;
for(i = 0; i < activeglen; i++) {
gp = runtime·allg[i];
if(runtime·readgstatus(gp) == Gdead)
@@ -874,7 +876,9 @@ runtime·allocm(P *p)
mp->g0 = runtime·malg(-1);
else
mp->g0 = runtime·malg(8192);
+ runtime·writebarrierptr_nostore(&mp->g0, mp->g0);
mp->g0->m = mp;
+ runtime·writebarrierptr_nostore(&mp->g0->m, mp->g0->m);
if(p == g->m->p)
releasep();
@@ -990,7 +994,7 @@ runtime·newextram(void)
// the goroutine stack ends.
mp = runtime·allocm(nil);
gp = runtime·malg(4096);
- gp->sched.pc = (uintptr)runtime·goexit;
+ gp->sched.pc = (uintptr)runtime·goexit + PCQuantum;
gp->sched.sp = gp->stack.hi;
gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
gp->sched.lr = 0;
@@ -1058,7 +1062,7 @@ runtime·dropm(void)
unlockextra(mp);
}
-#define MLOCKED ((M*)1)
+#define MLOCKED 1
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
@@ -1069,28 +1073,28 @@ runtime·dropm(void)
static M*
lockextra(bool nilokay)
{
- M *mp;
+ uintptr mpx;
void (*yield)(void);
for(;;) {
- mp = runtime·atomicloadp(&runtime·extram);
- if(mp == MLOCKED) {
+ mpx = runtime·atomicloaduintptr((uintptr*)&runtime·extram);
+ if(mpx == MLOCKED) {
yield = runtime·osyield;
yield();
continue;
}
- if(mp == nil && !nilokay) {
+ if(mpx == 0 && !nilokay) {
runtime·usleep(1);
continue;
}
- if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
+ if(!runtime·casuintptr((uintptr*)&runtime·extram, mpx, MLOCKED)) {
yield = runtime·osyield;
yield();
continue;
}
break;
}
- return mp;
+ return (M*)mpx;
}
#pragma textflag NOSPLIT
@@ -1915,6 +1919,7 @@ exitsyscallfast(void)
// Freezetheworld sets stopwait but does not retake P's.
if(runtime·sched.stopwait) {
+ g->m->mcache = nil;
g->m->p = nil;
return false;
}
@@ -1927,6 +1932,7 @@ exitsyscallfast(void)
return true;
}
// Try to get any other idle P.
+ g->m->mcache = nil;
g->m->p = nil;
if(runtime·sched.pidle) {
fn = exitsyscallfast_pidle;
@@ -2424,9 +2430,10 @@ static struct ProfState {
int32 hz;
} prof;
-static void System(void) {}
-static void ExternalCode(void) {}
-static void GC(void) {}
+static void System(void) { System(); }
+static void ExternalCode(void) { ExternalCode(); }
+static void GC(void) { GC(); }
+
extern void runtime·cpuproftick(uintptr*, int32);
extern byte runtime·etext[];
@@ -2614,6 +2621,8 @@ runtime·setcpuprofilerate_m(void)
P *runtime·newP(void);
// Change number of processors. The world is stopped, sched is locked.
+// gcworkbufs are not being modified by either the GC or
+// the write barrier code.
static void
procresize(int32 new)
{
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 5b8c7d8ae..f41ffbff3 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -165,6 +165,9 @@ func acquireSudog() *sudog {
// which keeps the garbage collector from being invoked.
mp := acquirem()
p := new(sudog)
+ if p.elem != nil {
+ gothrow("acquireSudog: found p.elem != nil after new")
+ }
releasem(mp)
return p
}
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 977c4547d..330ed429b 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -94,6 +94,7 @@ typedef struct PollDesc PollDesc;
typedef struct DebugVars DebugVars;
typedef struct ForceGCState ForceGCState;
typedef struct Stack Stack;
+typedef struct Workbuf Workbuf;
/*
* Per-CPU declaration.
@@ -304,7 +305,7 @@ struct G
bool paniconfault; // panic (instead of crash) on unexpected fault address
bool preemptscan; // preempted g does scan for GC
bool gcworkdone; // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
- bool throwsplit; // must not split stack
+ bool throwsplit; // must not split stack
int8 raceignore; // ignore race detection events
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
@@ -344,6 +345,8 @@ struct M
int32 helpgc;
bool spinning; // M is out of work and is actively looking for work
bool blocked; // M is blocked on a Note
+ bool inwb; // M is executing a write barrier
+ int8 printlock;
uint32 fastrand;
uint64 ncgocall; // number of cgo calls in total
int32 ncgo; // number of cgo calls currently in progress
@@ -570,9 +573,10 @@ enum {
#endif
// Lock-free stack node.
+// Also known to export_test.go.
struct LFNode
{
- LFNode *next;
+ uint64 next;
uintptr pushcnt;
};
@@ -598,6 +602,16 @@ struct ParFor
uint64 nsleep;
};
+enum {
+ WorkbufSize = 4*1024,
+};
+struct Workbuf
+{
+ LFNode node; // must be first
+ uintptr nobj;
+ byte* obj[(WorkbufSize-sizeof(LFNode)-sizeof(uintptr))/PtrSize];
+};
+
// Track memory allocated by code not written in Go during a cgo call,
// so that the garbage collector can see them.
struct CgoMal
@@ -620,12 +634,14 @@ struct DebugVars
// Indicates to write barrier and sychronization task to preform.
enum
-{ // Synchronization Write barrier
- GCoff, // stop and start nop
- GCquiesce, // stop and start nop
- GCstw, // stop the ps nop
- GCmark, // scan the stacks and start no white to black
- GCsweep, // stop and start nop
+{ // Action WB installation
+ GCoff = 0, // stop and start no wb
+ GCquiesce, // stop and start no wb
+ GCstw, // stop the ps nop
+ GCscan, // scan the stacks prior to marking
+ GCmark, // mark use wbufs from GCscan and globals, scan the stacks, then go to GCtermination
+ GCmarktermination, // mark termination detection. Allocate black, Ps help out GC
+ GCsweep, // stop and start nop
};
struct ForceGCState
@@ -636,6 +652,7 @@ struct ForceGCState
};
extern uint32 runtime·gcphase;
+extern Mutex runtime·allglock;
/*
* defined macros
@@ -666,6 +683,7 @@ enum {
uint32 runtime·readgstatus(G*);
void runtime·casgstatus(G*, uint32, uint32);
+bool runtime·castogscanstatus(G*, uint32, uint32);
void runtime·quiesce(G*);
bool runtime·stopg(G*);
void runtime·restartg(G*);
@@ -882,6 +900,7 @@ int32 runtime·round2(int32 x); // round x up to a power of 2.
bool runtime·cas(uint32*, uint32, uint32);
bool runtime·cas64(uint64*, uint64, uint64);
bool runtime·casp(void**, void*, void*);
+bool runtime·casuintptr(uintptr*, uintptr, uintptr);
// Don't confuse with XADD x86 instruction,
// this one is actually 'addx', that is, add-and-fetch.
uint32 runtime·xadd(uint32 volatile*, int32);
@@ -1108,6 +1127,8 @@ void runtime·osyield(void);
void runtime·lockOSThread(void);
void runtime·unlockOSThread(void);
+void runtime·writebarrierptr_nostore(void*, void*);
+
bool runtime·showframe(Func*, G*);
void runtime·printcreatedby(G*);
diff --git a/src/runtime/select.go b/src/runtime/select.go
index efe68c1f5..d703e1d79 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -377,12 +377,7 @@ loop:
// iterating through the linked list they are in reverse order.
cas = nil
sglist = gp.waiting
- // Clear all selectdone and elem before unlinking from gp.waiting.
- // They must be cleared before being put back into the sudog cache.
- // Clear before unlinking, because if a stack copy happens after the unlink,
- // they will not be updated, they will be left pointing to the old stack,
- // which creates dangling pointers, which may be detected by the
- // garbage collector.
+ // Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.selectdone = nil
sg1.elem = nil
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index a42a29988..d2a028c01 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -259,6 +259,7 @@ func syncsemrelease(s *syncSema, n uint32) {
}
s.tail = w
goparkunlock(&s.lock, "semarelease")
+ releaseSudog(w)
} else {
unlock(&s.lock)
}
diff --git a/src/runtime/stack.c b/src/runtime/stack.c
index 072bc242b..ffae73a2a 100644
--- a/src/runtime/stack.c
+++ b/src/runtime/stack.c
@@ -382,8 +382,6 @@ adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
uintptr delta;
int32 num, i;
byte *p, *minp, *maxp;
- Type *t;
- Itab *tab;
minp = (byte*)adjinfo->old.lo;
maxp = (byte*)adjinfo->old.hi;
@@ -415,43 +413,7 @@ adjustpointers(byte **scanp, BitVector *bv, AdjustInfo *adjinfo, Func *f)
}
break;
case BitsMultiWord:
- switch(bv->bytedata[(i+1) / (8 / BitsPerPointer)] >> ((i+1) * BitsPerPointer & 7) & 3) {
- default:
- runtime·throw("unexpected garbage collection bits");
- case BitsEface:
- t = (Type*)scanp[i];
- if(t != nil && ((t->kind & KindDirectIface) == 0 || (t->kind & KindNoPointers) == 0)) {
- p = scanp[i+1];
- if(minp <= p && p < maxp) {
- if(StackDebug >= 3)
- runtime·printf("adjust eface %p\n", p);
- if(t->size > PtrSize) // currently we always allocate such objects on the heap
- runtime·throw("large interface value found on stack");
- scanp[i+1] = p + delta;
- }
- }
- i++;
- break;
- case BitsIface:
- tab = (Itab*)scanp[i];
- if(tab != nil) {
- t = tab->type;
- //runtime·printf(" type=%p\n", t);
- if((t->kind & KindDirectIface) == 0 || (t->kind & KindNoPointers) == 0) {
- p = scanp[i+1];
- if(minp <= p && p < maxp) {
- if(StackDebug >= 3)
- runtime·printf("adjust iface %p\n", p);
- if(t->size > PtrSize) // currently we always allocate such objects on the heap
- runtime·throw("large interface value found on stack");
- scanp[i+1] = p + delta;
- }
- }
- }
- i++;
- break;
- }
- break;
+ runtime·throw("adjustpointers: unexpected garbage collection bits");
}
}
}
@@ -587,13 +549,13 @@ adjustsudogs(G *gp, AdjustInfo *adjinfo)
}
// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
static void
copystack(G *gp, uintptr newsize)
{
Stack old, new;
uintptr used;
AdjustInfo adjinfo;
- uint32 oldstatus;
bool (*cb)(Stkframe*, void*);
byte *p, *ep;
@@ -637,20 +599,11 @@ copystack(G *gp, uintptr newsize)
}
runtime·memmove((byte*)new.hi - used, (byte*)old.hi - used, used);
- oldstatus = runtime·readgstatus(gp);
- oldstatus &= ~Gscan;
- if(oldstatus == Gwaiting || oldstatus == Grunnable)
- runtime·casgstatus(gp, oldstatus, Gcopystack); // oldstatus is Gwaiting or Grunnable
- else
- runtime·throw("copystack: bad status, not Gwaiting or Grunnable");
-
// Swap out old stack for new one
gp->stack = new;
gp->stackguard0 = new.lo + StackGuard; // NOTE: might clobber a preempt request
gp->sched.sp = new.hi - used;
- runtime·casgstatus(gp, Gcopystack, oldstatus); // oldstatus is Gwaiting or Grunnable
-
// free old stack
if(StackPoisonCopy) {
p = (byte*)old.lo;
@@ -700,6 +653,7 @@ void
runtime·newstack(void)
{
int32 oldsize, newsize;
+ uint32 oldstatus;
uintptr sp;
G *gp;
Gobuf morebuf;
@@ -752,6 +706,14 @@ runtime·newstack(void)
runtime·printf("runtime: split stack overflow: %p < %p\n", sp, gp->stack.lo);
runtime·throw("runtime: split stack overflow");
}
+
+ if(gp->sched.ctxt != nil) {
+ // morestack wrote sched.ctxt on its way in here,
+ // without a write barrier. Run the write barrier now.
+ // It is not possible to be preempted between then
+ // and now, so it's okay.
+ runtime·writebarrierptr_nostore(&gp->sched.ctxt, gp->sched.ctxt);
+ }
if(gp->stackguard0 == (uintptr)StackPreempt) {
if(gp == g->m->g0)
@@ -789,12 +751,15 @@ runtime·newstack(void)
runtime·throw("stack overflow");
}
- // Note that the concurrent GC might be scanning the stack as we try to replace it.
- // copystack takes care of the appropriate coordination with the stack scanner.
+ oldstatus = runtime·readgstatus(gp);
+ oldstatus &= ~Gscan;
+ runtime·casgstatus(gp, oldstatus, Gcopystack); // oldstatus is Gwaiting or Grunnable
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
copystack(gp, newsize);
if(StackDebug >= 1)
runtime·printf("stack grow done\n");
- runtime·casgstatus(gp, Gwaiting, Grunning);
+ runtime·casgstatus(gp, Gcopystack, Grunning);
runtime·gogo(&gp->sched);
}
@@ -825,6 +790,7 @@ void
runtime·shrinkstack(G *gp)
{
uintptr used, oldsize, newsize;
+ uint32 oldstatus;
if(runtime·readgstatus(gp) == Gdead) {
if(gp->stack.lo != 0) {
@@ -858,8 +824,19 @@ runtime·shrinkstack(G *gp)
#endif
if(StackDebug > 0)
runtime·printf("shrinking stack %D->%D\n", (uint64)oldsize, (uint64)newsize);
+ // This is being done in a Gscan state and was initiated by the GC so no need to move to
+ // the Gcopystate.
+ // The world is stopped, so the goroutine must be Gwaiting or Grunnable,
+ // and what it is is not changing underfoot.
+
+ oldstatus = runtime·readgstatus(gp);
+ oldstatus &= ~Gscan;
+ if(oldstatus != Gwaiting && oldstatus != Grunnable)
+ runtime·throw("status is not Gwaiting or Grunnable");
+ runtime·casgstatus(gp, oldstatus, Gcopystack);
copystack(gp, newsize);
-}
+ runtime·casgstatus(gp, Gcopystack, oldstatus);
+ }
// Do any delayed stack freeing that was queued up during GC.
void
diff --git a/src/runtime/string.c b/src/runtime/string.c
index ed5debc33..475ea2de6 100644
--- a/src/runtime/string.c
+++ b/src/runtime/string.c
@@ -48,7 +48,7 @@ runtime·gostringnocopy(byte *str)
s.len = runtime·findnull(str);
while(true) {
ms = runtime·maxstring;
- if(s.len <= ms || runtime·casp((void**)&runtime·maxstring, (void*)ms, (void*)s.len))
+ if(s.len <= ms || runtime·casuintptr(&runtime·maxstring, ms, s.len))
return s;
}
}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 341904719..9889567d6 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -106,6 +106,16 @@ func recovery_m(*g)
func mcacheRefill_m()
func largeAlloc_m()
func gc_m()
+func gcscan_m()
+func gcmark_m()
+func gccheckmark_m()
+func gccheckmarkenable_m()
+func gccheckmarkdisable_m()
+func gcinstallmarkwb_m()
+func gcinstalloffwb_m()
+func gcmarknewobject_m()
+func gcmarkwb_m()
+func finishsweep_m()
func scavenge_m()
func setFinalizer_m()
func removeFinalizer_m()
@@ -204,9 +214,6 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
func cas(ptr *uint32, old, new uint32) bool
//go:noescape
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-
-//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool
//go:noescape
@@ -221,6 +228,34 @@ func atomicloaduint(ptr *uint) uint
//go:noescape
func setcallerpc(argp unsafe.Pointer, pc uintptr)
+// getcallerpc returns the program counter (PC) of its caller's caller.
+// getcallersp returns the stack pointer (SP) of its caller's caller.
+// For both, the argp must be a pointer to the caller's first function argument.
+// The implementation may or may not use argp, depending on
+// the architecture.
+//
+// For example:
+//
+// func f(arg1, arg2, arg3 int) {
+// pc := getcallerpc(unsafe.Pointer(&arg1))
+// sp := getcallerpc(unsafe.Pointer(&arg2))
+// }
+//
+// These two lines find the PC and SP immediately following
+// the call to f (where f will return).
+//
+// The call to getcallerpc and getcallersp must be done in the
+// frame being asked about. It would not be correct for f to pass &arg1
+// to another function g and let g call getcallerpc/getcallersp.
+// The call inside g might return information about g's caller or
+// information about f's caller or complete garbage.
+//
+// The result of getcallersp is correct at the time of the return,
+// but it may be invalidated by any subsequent call to a function
+// that might relocate the stack in order to grow or shrink it.
+// A general rule is that the result of getcallersp should be used
+// immediately and can only be passed to nosplit functions.
+
//go:noescape
func getcallerpc(argp unsafe.Pointer) uintptr
diff --git a/src/runtime/sys_x86.c b/src/runtime/sys_x86.c
index a450b3e58..edbe47ff4 100644
--- a/src/runtime/sys_x86.c
+++ b/src/runtime/sys_x86.c
@@ -20,6 +20,7 @@ runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
gobuf->sp = (uintptr)sp;
gobuf->pc = (uintptr)fn;
gobuf->ctxt = ctxt;
+ runtime·writebarrierptr_nostore(&gobuf->ctxt, ctxt);
}
// Called to rewind context saved during morestack back to beginning of function.
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 834435b40..1c6ce6e64 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -101,6 +101,22 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
gothrow("gentraceback before goexitPC initialization")
}
g := getg()
+ if g == gp && g == g.m.curg {
+ // The starting sp has been passed in as a uintptr, and the caller may
+ // have other uintptr-typed stack references as well.
+ // If during one of the calls that got us here or during one of the
+ // callbacks below the stack must be grown, all these uintptr references
+ // to the stack will not be updated, and gentraceback will continue
+ // to inspect the old stack memory, which may no longer be valid.
+ // Even if all the variables were updated correctly, it is not clear that
+ // we want to expose a traceback that begins on one stack and ends
+ // on another stack. That could confuse callers quite a bit.
+ // Instead, we require that gentraceback and any other function that
+ // accepts an sp for the current goroutine (typically obtained by
+ // calling getcallersp) must not run on that goroutine's stack but
+ // instead on the g0 stack.
+ gothrow("gentraceback cannot trace user goroutine on its own stack")
+ }
gotraceback := gotraceback(nil)
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
if gp.syscallsp != 0 {
@@ -511,7 +527,11 @@ func traceback1(pc uintptr, sp uintptr, lr uintptr, gp *g, flags uint) {
func callers(skip int, pcbuf *uintptr, m int) int {
sp := getcallersp(unsafe.Pointer(&skip))
pc := uintptr(getcallerpc(unsafe.Pointer(&skip)))
- return gentraceback(pc, sp, 0, getg(), skip, pcbuf, m, nil, nil, 0)
+ var n int
+ onM(func() {
+ n = gentraceback(pc, sp, 0, getg(), skip, pcbuf, m, nil, nil, 0)
+ })
+ return n
}
func gcallers(gp *g, skip int, pcbuf *uintptr, m int) int {
diff --git a/src/runtime/wbfat.go b/src/runtime/wbfat.go
new file mode 100644
index 000000000..75c58b26b
--- /dev/null
+++ b/src/runtime/wbfat.go
@@ -0,0 +1,190 @@
+// generated by wbfat_gen.go; use go generate
+
+package runtime
+
+//go:nosplit
+func writebarrierfat01(dst *[2]uintptr, _ *byte, src [2]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+}
+
+//go:nosplit
+func writebarrierfat10(dst *[2]uintptr, _ *byte, src [2]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+}
+
+//go:nosplit
+func writebarrierfat11(dst *[2]uintptr, _ *byte, src [2]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+}
+
+//go:nosplit
+func writebarrierfat001(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+}
+
+//go:nosplit
+func writebarrierfat010(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+}
+
+//go:nosplit
+func writebarrierfat011(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+}
+
+//go:nosplit
+func writebarrierfat100(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ dst[2] = src[2]
+}
+
+//go:nosplit
+func writebarrierfat101(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+}
+
+//go:nosplit
+func writebarrierfat110(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+}
+
+//go:nosplit
+func writebarrierfat111(dst *[3]uintptr, _ *byte, src [3]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+}
+
+//go:nosplit
+func writebarrierfat0001(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ dst[2] = src[2]
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat0010(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat0011(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat0100(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat0101(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat0110(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat0111(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ dst[0] = src[0]
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat1000(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ dst[2] = src[2]
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat1001(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ dst[2] = src[2]
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat1010(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat1011(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ dst[1] = src[1]
+ writebarrierptr(&dst[2], src[2])
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat1100(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat1101(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ dst[2] = src[2]
+ writebarrierptr(&dst[3], src[3])
+}
+
+//go:nosplit
+func writebarrierfat1110(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+ dst[3] = src[3]
+}
+
+//go:nosplit
+func writebarrierfat1111(dst *[4]uintptr, _ *byte, src [4]uintptr) {
+ writebarrierptr(&dst[0], src[0])
+ writebarrierptr(&dst[1], src[1])
+ writebarrierptr(&dst[2], src[2])
+ writebarrierptr(&dst[3], src[3])
+}
diff --git a/src/runtime/wbfat_gen.go b/src/runtime/wbfat_gen.go
new file mode 100644
index 000000000..78d5b6271
--- /dev/null
+++ b/src/runtime/wbfat_gen.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+)
+
+func main() {
+ flag.Parse()
+ if flag.NArg() > 0 {
+ f, err := os.Create(flag.Arg(0))
+ if err != nil {
+ log.Fatal(err)
+ }
+ os.Stdout = f
+ }
+ fmt.Printf("// generated by wbfat_gen.go; use go generate\n\n")
+ fmt.Printf("package runtime\n")
+ for i := uint(2); i <= 4; i++ {
+ for j := 1; j < 1<<i; j++ {
+ fmt.Printf("\n//go:nosplit\n")
+ fmt.Printf("func writebarrierfat%0*b(dst *[%d]uintptr, _ *byte, src [%d]uintptr) {\n", int(i), j, i, i)
+ for k := uint(0); k < i; k++ {
+ if j&(1<<(i-1-k)) != 0 {
+ fmt.Printf("\twritebarrierptr(&dst[%d], src[%d])\n", k, k)
+ } else {
+ fmt.Printf("\tdst[%d] = src[%d]\n", k, k)
+ }
+ }
+ fmt.Printf("}\n")
+ }
+ }
+}
diff --git a/src/sync/atomic/atomic_test.go b/src/sync/atomic/atomic_test.go
index 9f13af48b..ec573aa8c 100644
--- a/src/sync/atomic/atomic_test.go
+++ b/src/sync/atomic/atomic_test.go
@@ -164,7 +164,7 @@ func TestSwapPointer(t *testing.T) {
x.before = magicptr
x.after = magicptr
var j uintptr
- for delta := uintptr(1); delta+delta > delta; delta += delta {
+ for delta := uintptr(1 << 16); delta+delta > delta; delta += delta {
k := SwapPointer(&x.i, unsafe.Pointer(delta))
if uintptr(x.i) != delta || uintptr(k) != j {
t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
@@ -456,7 +456,7 @@ func TestCompareAndSwapPointer(t *testing.T) {
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
- for val := uintptr(1); val+val > val; val += val {
+ for val := uintptr(1 << 16); val+val > val; val += val {
x.i = unsafe.Pointer(val)
if !CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+1)) {
t.Fatalf("should have swapped %#x %#x", val, val+1)
@@ -595,7 +595,7 @@ func TestLoadPointer(t *testing.T) {
magicptr := uintptr(m)
x.before = magicptr
x.after = magicptr
- for delta := uintptr(1); delta+delta > delta; delta += delta {
+ for delta := uintptr(1 << 16); delta+delta > delta; delta += delta {
k := LoadPointer(&x.i)
if k != x.i {
t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
@@ -731,7 +731,7 @@ func TestStorePointer(t *testing.T) {
x.before = magicptr
x.after = magicptr
v := unsafe.Pointer(uintptr(0))
- for delta := uintptr(1); delta+delta > delta; delta += delta {
+ for delta := uintptr(1 << 16); delta+delta > delta; delta += delta {
StorePointer(&x.i, unsafe.Pointer(v))
if x.i != v {
t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)