summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/export_test.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/export_test.go')
-rw-r--r--libgo/go/runtime/export_test.go41
1 files changed, 41 insertions, 0 deletions
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 7f4811c5a0c..d919e0486b2 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -292,6 +292,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.TotalAlloc = 0
slow.Mallocs = 0
slow.Frees = 0
+ slow.HeapReleased = 0
var bySize [_NumSizeClasses]struct {
Mallocs, Frees uint64
}
@@ -331,6 +332,10 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.BySize[i].Frees = bySize[i].Frees
}
+ for i := mheap_.scav.start(); i.valid(); i = i.next() {
+ slow.HeapReleased += uint64(i.span().released())
+ }
+
getg().m.mallocing--
})
@@ -454,3 +459,39 @@ func stackOverflow(x *byte) {
var buf [256]byte
stackOverflow(&buf[0])
}
+
+func MapTombstoneCheck(m map[int]int) {
+ // Make sure emptyOne and emptyRest are distributed correctly.
+ // We should have a series of filled and emptyOne cells, followed by
+ // a series of emptyRest cells.
+ h := *(**hmap)(unsafe.Pointer(&m))
+ i := interface{}(m)
+ t := *(**maptype)(unsafe.Pointer(&i))
+
+ for x := 0; x < 1<<h.B; x++ {
+ b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.bucketsize)))
+ n := 0
+ for b := b0; b != nil; b = b.overflow(t) {
+ for i := 0; i < bucketCnt; i++ {
+ if b.tophash[i] != emptyRest {
+ n++
+ }
+ }
+ }
+ k := 0
+ for b := b0; b != nil; b = b.overflow(t) {
+ for i := 0; i < bucketCnt; i++ {
+ if k < n && b.tophash[i] == emptyRest {
+ panic("early emptyRest")
+ }
+ if k >= n && b.tophash[i] != emptyRest {
+ panic("late non-emptyRest")
+ }
+ if k == n-1 && b.tophash[i] == emptyOne {
+ panic("last non-emptyRest entry is emptyOne")
+ }
+ k++
+ }
+ }
+ }
+}