summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mcentral.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/mcentral.go')
-rw-r--r--libgo/go/runtime/mcentral.go74
1 files changed, 49 insertions, 25 deletions
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go
index 50a4791e8e6..0196ba44c5d 100644
--- a/libgo/go/runtime/mcentral.go
+++ b/libgo/go/runtime/mcentral.go
@@ -6,8 +6,8 @@
//
// See malloc.go for an overview.
//
-// The MCentral doesn't actually contain the list of free objects; the MSpan does.
-// Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
+// The mcentral doesn't actually contain the list of free objects; the mspan does.
+// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
// and those that are completely allocated (c->empty).
package runtime
@@ -36,7 +36,7 @@ func (c *mcentral) init(spc spanClass) {
c.empty.init()
}
-// Allocate a span to use in an MCache.
+// Allocate a span to use in an mcache.
func (c *mcentral) cacheSpan() *mspan {
// Deduct credit for this span allocation and sweep if necessary.
spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
@@ -126,8 +126,7 @@ havespan:
if trace.enabled && !traceDone {
traceGCSweepDone()
}
- cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.allocCount)
+ n := int(s.nelems) - int(s.allocCount)
if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
throw("span has no free objects")
}
@@ -144,7 +143,6 @@ havespan:
// heap_live changed.
gcController.revise()
}
- s.incache = true
freeByteBase := s.freeindex &^ (64 - 1)
whichByte := freeByteBase / 8
// Init alloc bits cache.
@@ -157,30 +155,56 @@ havespan:
return s
}
-// Return span from an MCache.
+// Return span from an mcache.
func (c *mcentral) uncacheSpan(s *mspan) {
- lock(&c.lock)
-
- s.incache = false
-
if s.allocCount == 0 {
throw("uncaching span but s.allocCount == 0")
}
- cap := int32((s.npages << _PageShift) / s.elemsize)
- n := cap - int32(s.allocCount)
+ sg := mheap_.sweepgen
+ stale := s.sweepgen == sg+1
+ if stale {
+ // Span was cached before sweep began. It's our
+ // responsibility to sweep it.
+ //
+ // Set sweepgen to indicate it's not cached but needs
+ // sweeping and can't be allocated from. sweep will
+ // set s.sweepgen to indicate s is swept.
+ atomic.Store(&s.sweepgen, sg-1)
+ } else {
+ // Indicate that s is no longer cached.
+ atomic.Store(&s.sweepgen, sg)
+ }
+
+ n := int(s.nelems) - int(s.allocCount)
if n > 0 {
- c.empty.remove(s)
- c.nonempty.insert(s)
- // mCentral_CacheSpan conservatively counted
- // unallocated slots in heap_live. Undo this.
- atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
// cacheSpan updated alloc assuming all objects on s
// were going to be allocated. Adjust for any that
- // weren't.
+ // weren't. We must do this before potentially
+ // sweeping the span.
atomic.Xadd64(&c.nmalloc, -int64(n))
+
+ lock(&c.lock)
+ c.empty.remove(s)
+ c.nonempty.insert(s)
+ if !stale {
+ // mCentral_CacheSpan conservatively counted
+ // unallocated slots in heap_live. Undo this.
+ //
+ // If this span was cached before sweep, then
+ // heap_live was totally recomputed since
+ // caching this span, so we don't do this for
+ // stale spans.
+ atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
+ }
+ unlock(&c.lock)
+ }
+
+ if stale {
+ // Now that s is in the right mcentral list, we can
+ // sweep it.
+ s.sweep(false)
}
- unlock(&c.lock)
}
// freeSpan updates c and s after sweeping s.
@@ -188,17 +212,17 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// and, based on the number of free objects in s,
// moves s to the appropriate list of c or returns it
// to the heap.
-// freeSpan returns true if s was returned to the heap.
+// freeSpan reports whether s was returned to the heap.
// If preserve=true, it does not move s (the caller
// must take care of it).
func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
- if s.incache {
+ if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
throw("freeSpan given cached span")
}
s.needzero = 1
if preserve {
- // preserve is set only when called from MCentral_CacheSpan above,
+ // preserve is set only when called from (un)cacheSpan above,
// the span must be in the empty list.
if !s.inList() {
throw("can't preserve unlinked span")
@@ -216,7 +240,7 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
}
// delay updating sweepgen until here. This is the signal that
- // the span may be used in an MCache, so it must come after the
+ // the span may be used in an mcache, so it must come after the
// linked list operations above (actually, just after the
// lock of c above.)
atomic.Store(&s.sweepgen, mheap_.sweepgen)
@@ -228,7 +252,7 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
c.nonempty.remove(s)
unlock(&c.lock)
- mheap_.freeSpan(s, 0)
+ mheap_.freeSpan(s, false)
return true
}