summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/malloc.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/malloc.go')
-rw-r--r--libgo/go/runtime/malloc.go130
1 files changed, 96 insertions, 34 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index 7005e53a2f3..e5ab8dedafa 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -101,6 +101,8 @@
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
@@ -160,7 +162,7 @@ const (
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
+ _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
@@ -209,15 +211,21 @@ const (
// we further limit it to 31 bits.
//
// On ios/arm64, although 64-bit pointers are presumably
- // available, pointers are truncated to 33 bits. Furthermore,
- // only the top 4 GiB of the address space are actually available
- // to the application, but we allow the whole 33 bits anyway for
- // simplicity.
- // TODO(mknyszek): Consider limiting it to 32 bits and using
- // arenaBaseOffset to offset into the top 4 GiB.
+ // available, pointers are truncated to 33 bits in iOS <14.
+ // Furthermore, only the top 4 GiB of the address space are
+ // actually available to the application. In iOS >=14, more
+ // of the address space is available, and the OS can now
+ // provide addresses outside of those 33 bits. Pick 40 bits
+ // as a reasonable balance between address space usage by the
+ // page allocator, and flexibility for what mmap'd regions
+ // we'll accept for the heap. We can't just move to the full
+ // 48 bits because this uses too much address space for older
+ // iOS versions.
+ // TODO(mknyszek): Once iOS <14 is deprecated, promote ios/arm64
+ // to a 48-bit address space like every other arm64 platform.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
+ heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 40*goos.IsIos*goarch.IsArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
@@ -258,10 +266,10 @@ const (
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
@@ -278,7 +286,7 @@ const (
// We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2.
- arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
+ arenaL1Bits = 6 * (_64bit * goos.IsWindows)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
@@ -313,7 +321,7 @@ const (
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
- arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix*sys.GoarchPpc64
+ arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
// A typed version of this constant that will make it into DWARF (for viewcore).
arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
@@ -430,9 +438,6 @@ func mallocinit() {
throw("bad TinySizeClass")
}
- // Not used for gccgo.
- // testdefersizes()
-
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
@@ -496,7 +501,7 @@ func mallocinit() {
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
@@ -743,7 +748,7 @@ mapped:
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
@@ -754,9 +759,9 @@ mapped:
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
@@ -764,16 +769,16 @@ mapped:
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
@@ -919,6 +924,14 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if size == 0 {
return unsafe.Pointer(&zerobase)
}
+ userSize := size
+ if asanenabled {
+ // Refer to ASAN runtime library, the malloc() function allocates extra memory,
+ // the redzone, around the user requested memory region. And the redzones are marked
+ // as unaddressable. We perform the same operations in Go to detect the overflows or
+ // underflows.
+ size += computeRZlog(size)
+ }
if debug.malloc {
if debug.sbrk != 0 {
@@ -993,8 +1006,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
mp.mallocing = 1
shouldhelpgc := false
- dataSize := size
- c := getMCache()
+ dataSize := userSize
+ c := getMCache(mp)
if c == nil {
throw("mallocgc called without a P or outside bootstrapping")
}
@@ -1002,8 +1015,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var x unsafe.Pointer
noscan := typ == nil || typ.ptrdata == 0
// In some cases block zeroing can profitably (for latency reduction purposes)
- // be delayed till preemption is possible; isZeroed tracks that state.
- isZeroed := true
+ // be delayed till preemption is possible; delayedZeroing tracks that state.
+ delayedZeroing := false
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
@@ -1039,7 +1052,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
- } else if sys.PtrSize == 4 && size == 12 {
+ } else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
@@ -1104,11 +1117,23 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
shouldhelpgc = true
// For large allocations, keep track of zeroed state so that
// bulk zeroing can be happen later in a preemptible context.
- span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan)
+ span = c.allocLarge(size, noscan)
span.freeindex = 1
span.allocCount = 1
- x = unsafe.Pointer(span.base())
size = span.elemsize
+ x = unsafe.Pointer(span.base())
+ if needzero && span.needzero != 0 {
+ if noscan {
+ delayedZeroing = true
+ } else {
+ memclrNoHeapPointers(x, size)
+ // We've in theory cleared almost the whole span here,
+ // and could take the extra step of actually clearing
+ // the whole thing. However, don't. Any GC bits for the
+ // uncleared parts will be zero, and it's just going to
+ // be needzero = 1 once freed anyway.
+ }
+ }
}
var scanSize uintptr
@@ -1151,6 +1176,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
msanmalloc(x, size)
}
+ if asanenabled {
+ // We should only read/write the memory with the size asked by the user.
+ // The rest of the allocated memory should be poisoned, so that we can report
+ // errors when accessing poisoned memory.
+ // The allocated memory is larger than required userSize, it will also include
+ // redzone and some other padding bytes.
+ rzBeg := unsafe.Add(x, userSize)
+ asanpoison(rzBeg, size-userSize)
+ asanunpoison(x, userSize)
+ }
+
if rate := MemProfileRate; rate > 0 {
// Note cache c only valid while m acquired; see #47302
if rate != 1 && size < c.nextSample {
@@ -1164,7 +1200,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Pointerfree data can be zeroed late in a context where preemption can occur.
// x will keep the memory alive.
- if !isZeroed && needzero {
+ if delayedZeroing {
+ if !noscan {
+ throw("delayed zeroing on data that may contain pointers")
+ }
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}
@@ -1281,7 +1320,7 @@ func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
}
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
- c := getMCache()
+ c := getMCache(mp)
if c == nil {
throw("profilealloc called without a P or outside bootstrapping")
}
@@ -1335,7 +1374,7 @@ func fastexprand(mean int) int32 {
// x = -log_e(q) * mean
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
const randomBitCount = 26
- q := fastrand()%(1<<randomBitCount) + 1
+ q := fastrandn(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount
if qlog > 0 {
qlog = 0
@@ -1353,7 +1392,7 @@ func nextSampleNoFP() uintptr {
rate = 0x3fffffff
}
if rate != 0 {
- return uintptr(fastrand() % uint32(2*rate))
+ return uintptr(fastrandn(uint32(2 * rate)))
}
return 0
}
@@ -1444,7 +1483,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
break
}
}
- persistent.off = alignUp(sys.PtrSize, align)
+ persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
@@ -1533,3 +1572,26 @@ type notInHeap struct{}
func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
+
+// computeRZlog computes the size of the redzone.
+// Refer to the implementation of the compiler-rt.
+func computeRZlog(userSize uintptr) uintptr {
+ switch {
+ case userSize <= (64 - 16):
+ return 16 << 0
+ case userSize <= (128 - 32):
+ return 16 << 1
+ case userSize <= (512 - 64):
+ return 16 << 2
+ case userSize <= (4096 - 128):
+ return 16 << 3
+ case userSize <= (1<<14)-256:
+ return 16 << 4
+ case userSize <= (1<<15)-512:
+ return 16 << 5
+ case userSize <= (1<<16)-1024:
+ return 16 << 6
+ default:
+ return 16 << 7
+ }
+}