summaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-11-20 11:48:08 -0500
committerRuss Cox <rsc@golang.org>2014-11-20 11:48:08 -0500
commit4a674f45d109048fcef7cd9f2b876e270625c8ed (patch)
tree3015eab80ef343dda34fb968843c8b2d28987f05 /src/runtime/malloc.go
parentfbd764146d71d8929edadc4abd51e89932742000 (diff)
parent1ac4b754e58ba42944de03808fb96210b065e4c4 (diff)
downloadgo-4a674f45d109048fcef7cd9f2b876e270625c8ed.tar.gz
[dev.cc] all: merge default (e4ab8f908aac) into dev.cc
TBR=austin CC=golang-codereviews https://codereview.appspot.com/179040044
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go101
1 files changed, 40 insertions, 61 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 117044944..d73d1ba6a 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -4,9 +4,7 @@
package runtime
-import (
- "unsafe"
-)
+import "unsafe"
const (
debugMalloc = false
@@ -28,10 +26,11 @@ const (
maxGCMask = _MaxGCMask
bitsDead = _BitsDead
bitsPointer = _BitsPointer
+ bitsScalar = _BitsScalar
mSpanInUse = _MSpanInUse
- concurrentSweep = _ConcurrentSweep != 0
+ concurrentSweep = _ConcurrentSweep
)
// Page number (address>>pageShift)
@@ -56,7 +55,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// This function must be atomic wrt GC, but for performance reasons
// we don't acquirem/releasem on fast path. The code below does not have
// split stack checks, so it can't be preempted by GC.
- // Functions like roundup/add are inlined. And onM/racemalloc are nosplit.
+ // Functions like roundup/add are inlined. And systemstack/racemalloc are nosplit.
// If debugMalloc = true, these assumptions are checked below.
if debugMalloc {
mp := acquirem()
@@ -142,10 +141,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[tinySizeClass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = tinySizeClass
- onM(mcacheRefill_m)
- releasem(mp)
+ systemstack(func() {
+ mCache_Refill(c, tinySizeClass)
+ })
s = c.alloc[tinySizeClass]
v = s.freelist
}
@@ -173,10 +171,9 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
s = c.alloc[sizeclass]
v := s.freelist
if v == nil {
- mp := acquirem()
- mp.scalararg[0] = uintptr(sizeclass)
- onM(mcacheRefill_m)
- releasem(mp)
+ systemstack(func() {
+ mCache_Refill(c, int32(sizeclass))
+ })
s = c.alloc[sizeclass]
v = s.freelist
}
@@ -193,13 +190,10 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
}
c.local_cachealloc += intptr(size)
} else {
- mp := acquirem()
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(flags)
- onM(largeAlloc_m)
- s = (*mspan)(mp.ptrarg[0])
- mp.ptrarg[0] = nil
- releasem(mp)
+ var s *mspan
+ systemstack(func() {
+ s = largeAlloc(size, uint32(flags))
+ })
x = unsafe.Pointer(uintptr(s.start << pageShift))
size = uintptr(s.elemsize)
}
@@ -251,22 +245,19 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
// into the GC bitmap. It's 7 times slower than copying
// from the pre-unrolled mask, but saves 1/16 of type size
// memory for the mask.
- mp := acquirem()
- mp.ptrarg[0] = x
- mp.ptrarg[1] = unsafe.Pointer(typ)
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(size0)
- onM(unrollgcproginplace_m)
- releasem(mp)
+ systemstack(func() {
+ unrollgcproginplace_m(x, typ, size, size0)
+ })
goto marked
}
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
- // Check whether the program is already unrolled.
- if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(typ)
- onM(unrollgcprog_m)
- releasem(mp)
+ // Check whether the program is already unrolled
+ // by checking if the unroll flag byte is set
+ maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
+ if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
+ systemstack(func() {
+ unrollgcprog_m(typ)
+ })
}
ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
} else {
@@ -359,7 +350,7 @@ func newarray(typ *_type, n uintptr) unsafe.Pointer {
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
- if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) {
+ if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
panic("runtime: allocation size out of range")
}
return mallocgc(uintptr(typ.size)*n, typ, flags)
@@ -438,7 +429,7 @@ func gogc(force int32) {
mp = acquirem()
mp.gcing = 1
releasem(mp)
- onM(stoptheworld)
+ systemstack(stoptheworld)
if mp != acquirem() {
gothrow("gogc: rescheduled")
}
@@ -459,20 +450,16 @@ func gogc(force int32) {
startTime = nanotime()
}
// switch to g0, call gc, then switch back
- mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
- mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
- if force >= 2 {
- mp.scalararg[2] = 1 // eagersweep
- } else {
- mp.scalararg[2] = 0
- }
- onM(gc_m)
+ eagersweep := force >= 2
+ systemstack(func() {
+ gc_m(startTime, eagersweep)
+ })
}
// all done
mp.gcing = 0
semrelease(&worldsema)
- onM(starttheworld)
+ systemstack(starttheworld)
releasem(mp)
mp = nil
@@ -591,11 +578,10 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
f := (*eface)(unsafe.Pointer(&finalizer))
ftyp := f._type
if ftyp == nil {
- // switch to M stack and remove finalizer
- mp := acquirem()
- mp.ptrarg[0] = e.data
- onM(removeFinalizer_m)
- releasem(mp)
+ // switch to system stack and remove finalizer
+ systemstack(func() {
+ removefinalizer(e.data)
+ })
return
}
@@ -640,18 +626,11 @@ okarg:
// make sure we have a finalizer goroutine
createfing()
- // switch to M stack to add finalizer record
- mp := acquirem()
- mp.ptrarg[0] = f.data
- mp.ptrarg[1] = e.data
- mp.scalararg[0] = nret
- mp.ptrarg[2] = unsafe.Pointer(fint)
- mp.ptrarg[3] = unsafe.Pointer(ot)
- onM(setFinalizer_m)
- if mp.scalararg[0] != 1 {
- gothrow("runtime.SetFinalizer: finalizer already set")
- }
- releasem(mp)
+ systemstack(func() {
+ if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
+ gothrow("runtime.SetFinalizer: finalizer already set")
+ }
+ })
}
// round n up to a multiple of a. a must be a power of 2.