summaryrefslogtreecommitdiff
path: root/src/runtime/stack1.go
diff options
context:
space:
mode:
authorRick Hudson <rlh@golang.org>2014-11-20 12:08:13 -0500
committerRick Hudson <rlh@golang.org>2014-11-20 12:08:13 -0500
commit1e210ecf449d3b518aa000891559321ce61a62da (patch)
tree9b75ebdb8149e1586a00a77655aba21d9d97a333 /src/runtime/stack1.go
parentfb002ba0ea389cd183fbee72a2a34fbbd6a89279 (diff)
downloadgo-1e210ecf449d3b518aa000891559321ce61a62da.tar.gz
[dev.garbage] runtime: Turn concurrent GC on by default. Avoid write barriers for GC internal structures such as free lists.
LGTM=rsc R=rsc CC=golang-codereviews, rsc https://codereview.appspot.com/179000043
Diffstat (limited to 'src/runtime/stack1.go')
-rw-r--r--src/runtime/stack1.go44
1 files changed, 22 insertions, 22 deletions
diff --git a/src/runtime/stack1.go b/src/runtime/stack1.go
index 963f4fa73..78bcccc41 100644
--- a/src/runtime/stack1.go
+++ b/src/runtime/stack1.go
@@ -58,7 +58,7 @@ func stackinit() {
// Allocates a stack from the free pool. Must be called with
// stackpoolmu held.
-func stackpoolalloc(order uint8) *mlink {
+func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.next
if s == list {
@@ -70,23 +70,23 @@ func stackpoolalloc(order uint8) *mlink {
if s.ref != 0 {
gothrow("bad ref")
}
- if s.freelist != nil {
+ if s.freelist.ptr() != nil {
gothrow("bad freelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
- x := (*mlink)(unsafe.Pointer(uintptr(s.start)<<_PageShift + i))
- x.next = s.freelist
+ x := gclinkptr(uintptr(s.start)<<_PageShift + i)
+ x.ptr().next = s.freelist
s.freelist = x
}
mSpanList_Insert(list, s)
}
x := s.freelist
- if x == nil {
+ if x.ptr() == nil {
gothrow("span has no free stacks")
}
- s.freelist = x.next
+ s.freelist = x.ptr().next
s.ref++
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// all stacks in s are allocated.
mSpanList_Remove(s)
}
@@ -94,22 +94,22 @@ func stackpoolalloc(order uint8) *mlink {
}
// Adds stack x to the free pool. Must be called with stackpoolmu held.
-func stackpoolfree(x *mlink, order uint8) {
+func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
if s.state != _MSpanStack {
gothrow("freeing stack not in a stack span")
}
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// s will now have a free stack
mSpanList_Insert(&stackpool[order], s)
}
- x.next = s.freelist
+ x.ptr().next = s.freelist
s.freelist = x
s.ref--
if s.ref == 0 {
// span is completely free - return to heap
mSpanList_Remove(s)
- s.freelist = nil
+ s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
}
@@ -123,12 +123,12 @@ func stackcacherefill(c *mcache, order uint8) {
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
- var list *mlink
+ var list gclinkptr
var size uintptr
lock(&stackpoolmu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
- x.next = list
+ x.ptr().next = list
list = x
size += _FixedStack << order
}
@@ -145,7 +145,7 @@ func stackcacherelease(c *mcache, order uint8) {
size := c.stackcache[order].size
lock(&stackpoolmu)
for size > _StackCacheSize/2 {
- y := x.next
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= _FixedStack << order
@@ -162,12 +162,12 @@ func stackcache_clear(c *mcache) {
lock(&stackpoolmu)
for order := uint8(0); order < _NumStackOrders; order++ {
x := c.stackcache[order].list
- for x != nil {
- y := x.next
+ for x.ptr() != nil {
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
}
- c.stackcache[order].list = nil
+ c.stackcache[order].list = 0
c.stackcache[order].size = 0
}
unlock(&stackpoolmu)
@@ -207,7 +207,7 @@ func stackalloc(n uint32) stack {
order++
n2 >>= 1
}
- var x *mlink
+ var x gclinkptr
c := thisg.m.mcache
if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
@@ -219,11 +219,11 @@ func stackalloc(n uint32) stack {
unlock(&stackpoolmu)
} else {
x = c.stackcache[order].list
- if x == nil {
+ if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
- c.stackcache[order].list = x.next
+ c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
v = (unsafe.Pointer)(x)
@@ -270,7 +270,7 @@ func stackfree(stk stack) {
order++
n2 >>= 1
}
- x := (*mlink)(v)
+ x := gclinkptr(v)
c := gp.m.mcache
if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
lock(&stackpoolmu)
@@ -280,7 +280,7 @@ func stackfree(stk stack) {
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
- x.next = c.stackcache[order].list
+ x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}