diff options
author | Russ Cox <rsc@golang.org> | 2014-08-27 21:59:49 -0400 |
---|---|---|
committer | Russ Cox <rsc@golang.org> | 2014-08-27 21:59:49 -0400 |
commit | 3ed5cd831cec9e6a4c8004cbd65a3ba0b40be26c (patch) | |
tree | 54ab4c93040b82f88859c09f1135c85c2e224d95 /src/pkg/runtime | |
parent | 6cf8ddd13fefdb19ee1cd9ee2e57cda6ac22f4d9 (diff) | |
download | go-3ed5cd831cec9e6a4c8004cbd65a3ba0b40be26c.tar.gz |
cmd/cc, runtime: preserve C runtime type names in generated Go
uintptr or uint64 in the runtime C were turning into uint in the Go,
bool was turning into uint8, and so on. Fix that.
Also delete Go wrappers for C functions.
The C functions can be called directly now
(but still eventually need to be converted to Go).
LGTM=bradfitz, minux, iant
R=golang-codereviews, bradfitz, iant, minux
CC=golang-codereviews, khr, r
https://codereview.appspot.com/138740043
Diffstat (limited to 'src/pkg/runtime')
32 files changed, 233 insertions, 297 deletions
diff --git a/src/pkg/runtime/arch_386.go b/src/pkg/runtime/arch_386.go index 32eafb549..287b67e27 100644 --- a/src/pkg/runtime/arch_386.go +++ b/src/pkg/runtime/arch_386.go @@ -7,3 +7,6 @@ package runtime const ( cacheLineSize = 64 ) + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/src/pkg/runtime/arch_amd64.go b/src/pkg/runtime/arch_amd64.go index 32eafb549..fe60c7066 100644 --- a/src/pkg/runtime/arch_amd64.go +++ b/src/pkg/runtime/arch_amd64.go @@ -7,3 +7,6 @@ package runtime const ( cacheLineSize = 64 ) + +type uintreg uint64 +type intptr int64 // TODO(rsc): remove diff --git a/src/pkg/runtime/arch_amd64p32.go b/src/pkg/runtime/arch_amd64p32.go index 32eafb549..90766b404 100644 --- a/src/pkg/runtime/arch_amd64p32.go +++ b/src/pkg/runtime/arch_amd64p32.go @@ -7,3 +7,6 @@ package runtime const ( cacheLineSize = 64 ) + +type uintreg uint64 +type intptr int32 // TODO(rsc): remove diff --git a/src/pkg/runtime/arch_arm.go b/src/pkg/runtime/arch_arm.go index 7faeb9404..23f2711f6 100644 --- a/src/pkg/runtime/arch_arm.go +++ b/src/pkg/runtime/arch_arm.go @@ -7,3 +7,6 @@ package runtime const ( cacheLineSize = 32 ) + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/src/pkg/runtime/asm_386.s b/src/pkg/runtime/asm_386.s index d52eca386..25f92d454 100644 --- a/src/pkg/runtime/asm_386.s +++ b/src/pkg/runtime/asm_386.s @@ -502,6 +502,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-13 MOVB AX, ret+12(FP) RET +TEXT runtime·casuintptr(SB), NOSPLIT, $0-13 + JMP runtime·cas(SB) + // bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // Atomically: // if(*val == *old){ diff --git a/src/pkg/runtime/asm_amd64.s b/src/pkg/runtime/asm_amd64.s index 70e222521..7e3ff1c55 100644 --- a/src/pkg/runtime/asm_amd64.s +++ b/src/pkg/runtime/asm_amd64.s @@ -620,6 +620,9 @@ cas64_fail: MOVL $0, AX MOVB AX, ret+24(FP) RET + +TEXT runtime·casuintptr(SB), NOSPLIT, $0-25 + JMP runtime·cas64(SB) // bool casp(void **val, void *old, void *new) // Atomically: diff --git a/src/pkg/runtime/asm_amd64p32.s b/src/pkg/runtime/asm_amd64p32.s index 83faff281..e08df377e 100644 --- a/src/pkg/runtime/asm_amd64p32.s +++ b/src/pkg/runtime/asm_amd64p32.s @@ -275,7 +275,7 @@ TEXT runtime·newstackcall(SB), NOSPLIT, $0-12 // restore when returning from f. MOVL 0(SP), AX // our caller's PC MOVL AX, (m_morebuf+gobuf_pc)(BX) - LEAL addr+4(FP), AX // our caller's SP + LEAL fn+0(FP), AX // our caller's SP MOVL AX, (m_morebuf+gobuf_sp)(BX) MOVL g(CX), AX MOVL AX, (m_morebuf+gobuf_g)(BX) @@ -562,6 +562,9 @@ TEXT runtime·cas(SB), NOSPLIT, $0-17 MOVB AX, ret+16(FP) RET +TEXT runtime·casuintptr(SB), NOSPLIT, $0-17 + JMP runtime·cas(SB) + // bool runtime·cas64(uint64 *val, uint64 old, uint64 new) // Atomically: // if(*val == *old){ diff --git a/src/pkg/runtime/asm_arm.s b/src/pkg/runtime/asm_arm.s index 3ced211f8..93eb08d84 100644 --- a/src/pkg/runtime/asm_arm.s +++ b/src/pkg/runtime/asm_arm.s @@ -671,7 +671,7 @@ TEXT runtime·abort(SB),NOSPLIT,$-4-0 // TEXT runtime·cas(SB),NOSPLIT,$0 // B runtime·armcas(SB) // -TEXT runtime·armcas(SB),NOSPLIT,$0-12 +TEXT runtime·armcas(SB),NOSPLIT,$0-13 MOVW valptr+0(FP), R1 MOVW old+4(FP), R2 MOVW new+8(FP), R3 @@ -683,11 +683,16 @@ casl: CMP $0, R0 BNE casl MOVW $1, R0 + MOVB R0, ret+12(FP) RET casfail: MOVW $0, R0 + MOVB R0, ret+12(FP) RET +TEXT runtime·casuintptr(SB), NOSPLIT, $0-13 + JMP runtime·cas(SB) + TEXT runtime·stackguard(SB),NOSPLIT,$0-8 MOVW R13, R1 MOVW g_stackguard(g), R2 diff --git a/src/pkg/runtime/chan.go b/src/pkg/runtime/chan.go index 7a44aface..e2d5bc180 100644 --- a/src/pkg/runtime/chan.go +++ b/src/pkg/runtime/chan.go @@ -66,7 +66,7 @@ func chanbuf(c *hchan, i uint) unsafe.Pointer { // entry point for c <- x from compiled code //go:nosplit func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) { - chansend(t, c, elem, true, gogetcallerpc(unsafe.Pointer(&t))) + chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t))) } /* @@ -127,7 +127,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin var t0 int64 if blockprofilerate > 0 { - t0 = gocputicks() + t0 = cputicks() } golock(&c.lock) @@ -155,7 +155,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin // to assign to both types in Go. At some point we'll // write the Go types directly instead of generating them // via the C types. At that point, this nastiness goes away. - *(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks() + *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks() } goready(recvg) return true @@ -189,7 +189,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin panic("send on closed channel") } if mysg.releasetime > 0 { - goblockevent(int64(mysg.releasetime)-t0, 3) + blockevent(int64(mysg.releasetime)-t0, 2) } if mysg != gp.waiting { gothrow("G waiting list is corrupted!") @@ -248,14 +248,14 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin recvg := sg.g gounlock(&c.lock) if sg.releasetime != 0 { - *(*int64)(unsafe.Pointer(&sg.releasetime)) = gocputicks() + *(*int64)(unsafe.Pointer(&sg.releasetime)) = cputicks() } goready(recvg) } else { gounlock(&c.lock) } if t1 > 0 { - goblockevent(t1-t0, 3) + blockevent(t1-t0, 2) } return true } @@ -285,7 +285,7 @@ func (q *waitq) dequeue() *sudog { // if sgp participates in a select and is already signaled, ignore it if sgp.selectdone != nil { // claim the right to signal - if *sgp.selectdone != 0 || !gocas(sgp.selectdone, 0, 1) { + if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) { continue } } diff --git a/src/pkg/runtime/export_test.go b/src/pkg/runtime/export_test.go index f75b742b6..df6f11d67 100644 --- a/src/pkg/runtime/export_test.go +++ b/src/pkg/runtime/export_test.go @@ -75,7 +75,7 @@ var ( func NewParFor(nthrmax uint32) *ParFor { mp := acquirem() - mp.scalararg[0] = uint(nthrmax) + mp.scalararg[0] = uintptr(nthrmax) onM(&newparfor_m) desc := (*ParFor)(mp.ptrarg[0]) mp.ptrarg[0] = nil @@ -88,8 +88,8 @@ func ParForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(* mp.ptrarg[0] = unsafe.Pointer(desc) mp.ptrarg[1] = unsafe.Pointer(ctx) mp.ptrarg[2] = **(**unsafe.Pointer)(unsafe.Pointer(&body)) - mp.scalararg[0] = uint(nthr) - mp.scalararg[1] = uint(n) + mp.scalararg[0] = uintptr(nthr) + mp.scalararg[1] = uintptr(n) mp.scalararg[2] = 0 if wait { mp.scalararg[2] = 1 @@ -108,7 +108,7 @@ func ParForDo(desc *ParFor) { func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) { mp := acquirem() mp.ptrarg[0] = unsafe.Pointer(desc) - mp.scalararg[0] = uint(tid) + mp.scalararg[0] = uintptr(tid) onM(&parforiters_m) begin := uint32(mp.scalararg[0]) end := uint32(mp.scalararg[1]) diff --git a/src/pkg/runtime/hashmap.go b/src/pkg/runtime/hashmap.go index 9dcf48242..1d1e70848 100644 --- a/src/pkg/runtime/hashmap.go +++ b/src/pkg/runtime/hashmap.go @@ -162,12 +162,12 @@ func makemap(t *maptype, hint int64) *hmap { } // check compiler's and reflect's math - if t.key.size > maxKeySize && (t.indirectkey == 0 || t.keysize != uint8(ptrSize)) || - t.key.size <= maxKeySize && (t.indirectkey == 1 || t.keysize != uint8(t.key.size)) { + if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || + t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { gothrow("key size wrong") } - if t.elem.size > maxValueSize && (t.indirectvalue == 0 || t.valuesize != uint8(ptrSize)) || - t.elem.size <= maxValueSize && (t.indirectvalue == 1 || t.valuesize != uint8(t.elem.size)) { + if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || + t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { gothrow("value size wrong") } @@ -234,7 +234,7 @@ func makemap(t *maptype, hint int64) *hmap { // hold onto it for very long. func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess1 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -263,12 +263,12 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey != 0 { + if t.indirectkey { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k, uintptr(t.key.size)) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue != 0 { + if t.indirectvalue { v = *((*unsafe.Pointer)(v)) } return v @@ -283,7 +283,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess2 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -312,12 +312,12 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey != 0 { + if t.indirectkey { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k, uintptr(t.key.size)) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue != 0 { + if t.indirectvalue { v = *((*unsafe.Pointer)(v)) } return v, true @@ -355,12 +355,12 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey != 0 { + if t.indirectkey { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k, uintptr(t.key.size)) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue != 0 { + if t.indirectvalue { v = *((*unsafe.Pointer)(v)) } return k, v @@ -378,7 +378,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { panic("assignment to entry in nil map") } if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapassign1 pc := **(**uintptr)(unsafe.Pointer(&fn)) racewritepc(unsafe.Pointer(h), callerpc, pc) @@ -422,7 +422,7 @@ again: } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k2 := k - if t.indirectkey != 0 { + if t.indirectkey { k2 = *((*unsafe.Pointer)(k2)) } if !alg.equal(key, k2, uintptr(t.key.size)) { @@ -432,7 +432,7 @@ again: memmove(k2, key, uintptr(t.key.size)) v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) v2 := v - if t.indirectvalue != 0 { + if t.indirectvalue { v2 = *((*unsafe.Pointer)(v2)) } memmove(v2, val, uintptr(t.elem.size)) @@ -463,7 +463,7 @@ again: } // store new key/value at insert position - if t.indirectkey != 0 { + if t.indirectkey { if checkgc { memstats.next_gc = memstats.heap_alloc } @@ -471,7 +471,7 @@ again: *(*unsafe.Pointer)(insertk) = kmem insertk = kmem } - if t.indirectvalue != 0 { + if t.indirectvalue { if checkgc { memstats.next_gc = memstats.heap_alloc } @@ -487,7 +487,7 @@ again: func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapdelete pc := **(**uintptr)(unsafe.Pointer(&fn)) racewritepc(unsafe.Pointer(h), callerpc, pc) @@ -514,7 +514,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k2 := k - if t.indirectkey != 0 { + if t.indirectkey { k2 = *((*unsafe.Pointer)(k2)) } if !alg.equal(key, k2, uintptr(t.key.size)) { @@ -544,7 +544,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { it.bptr = nil if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapiterinit pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -579,7 +579,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { if old == old|iterator|oldIterator { break } - if gocas(&h.flags, old, old|iterator|oldIterator) { + if cas(&h.flags, old, old|iterator|oldIterator) { break } } @@ -590,7 +590,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { func mapiternext(it *hiter) { h := it.h if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&it)) + callerpc := getcallerpc(unsafe.Pointer(&it)) fn := mapiternext pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -648,7 +648,7 @@ next: // to the other new bucket (each oldbucket expands to two // buckets during a grow). k2 := k - if t.indirectkey != 0 { + if t.indirectkey { k2 = *((*unsafe.Pointer)(k2)) } if alg.equal(k2, k2, uintptr(t.key.size)) { @@ -673,11 +673,11 @@ next: } if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { // this is the golden data, we can return it. - if t.indirectkey != 0 { + if t.indirectkey { k = *((*unsafe.Pointer)(k)) } it.key = k - if t.indirectvalue != 0 { + if t.indirectvalue { v = *((*unsafe.Pointer)(v)) } it.value = v @@ -685,7 +685,7 @@ next: // The hash table has grown since the iterator was started. // The golden data for this key is now somewhere else. k2 := k - if t.indirectkey != 0 { + if t.indirectkey { k2 = *((*unsafe.Pointer)(k2)) } if alg.equal(k2, k2, uintptr(t.key.size)) { @@ -706,7 +706,7 @@ next: // us because when key!=key we can't look it up // successfully in the current table. it.key = k2 - if t.indirectvalue != 0 { + if t.indirectvalue { v = *((*unsafe.Pointer)(v)) } it.value = v @@ -790,7 +790,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { gothrow("bad map state") } k2 := k - if t.indirectkey != 0 { + if t.indirectkey { k2 = *((*unsafe.Pointer)(k2)) } // Compute hash to make our evacuation decision (whether we need @@ -834,12 +834,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { xv = add(xk, bucketCnt*uintptr(t.keysize)) } x.tophash[xi] = top - if t.indirectkey != 0 { + if t.indirectkey { *(*unsafe.Pointer)(xk) = k2 // copy pointer } else { memmove(xk, k, uintptr(t.key.size)) // copy value } - if t.indirectvalue != 0 { + if t.indirectvalue { *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) } else { memmove(xv, v, uintptr(t.elem.size)) @@ -861,12 +861,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { yv = add(yk, bucketCnt*uintptr(t.keysize)) } y.tophash[yi] = top - if t.indirectkey != 0 { + if t.indirectkey { *(*unsafe.Pointer)(yk) = k2 } else { memmove(yk, k, uintptr(t.key.size)) } - if t.indirectvalue != 0 { + if t.indirectvalue { *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) } else { memmove(yv, v, uintptr(t.elem.size)) @@ -941,7 +941,7 @@ func reflect_maplen(h *hmap) int { return 0 } if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&h)) + callerpc := getcallerpc(unsafe.Pointer(&h)) fn := reflect_maplen pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) diff --git a/src/pkg/runtime/hashmap_fast.go b/src/pkg/runtime/hashmap_fast.go index c1b71d6d8..7059e22a0 100644 --- a/src/pkg/runtime/hashmap_fast.go +++ b/src/pkg/runtime/hashmap_fast.go @@ -10,7 +10,7 @@ import ( func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess1_fast32 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -54,7 +54,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess2_fast32 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -98,7 +98,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess1_fast64 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -142,7 +142,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess2_fast64 pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -186,7 +186,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess1_faststr pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) @@ -290,7 +290,7 @@ dohash: func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { if raceenabled && h != nil { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := mapaccess2_faststr pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadpc(unsafe.Pointer(h), callerpc, pc) diff --git a/src/pkg/runtime/iface.go b/src/pkg/runtime/iface.go index 60dfb49db..1421efe3c 100644 --- a/src/pkg/runtime/iface.go +++ b/src/pkg/runtime/iface.go @@ -53,7 +53,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { if locked != 0 { golock(&ifaceLock) } - for m = (*itab)(goatomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link { + for m = (*itab)(atomicloadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link { if m.inter == inter && m._type == typ { if m.bad != 0 { m = nil @@ -76,7 +76,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { } } - m = (*itab)(gopersistentalloc(unsafe.Sizeof(itab{}) + uintptr(len(inter.mhdr))*ptrSize)) + m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr))*ptrSize, 0, &memstats.other_sys)) m.inter = inter m._type = typ @@ -118,7 +118,7 @@ search: gothrow("invalid itab locking") } m.link = hash[h] - goatomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m)) + atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m)) gounlock(&ifaceLock) if m.bad != 0 { return nil @@ -128,7 +128,7 @@ search: func typ2Itab(t *_type, inter *interfacetype, cache **itab) *itab { tab := getitab(inter, t, false) - goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) + atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) return tab } @@ -150,10 +150,10 @@ func convT2E(t *_type, elem unsafe.Pointer) (e interface{}) { } func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer) (i fInterface) { - tab := (*itab)(goatomicloadp(unsafe.Pointer(cache))) + tab := (*itab)(atomicloadp(unsafe.Pointer(cache))) if tab == nil { tab = getitab(inter, t, false) - goatomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) + atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab)) } size := uintptr(t.size) pi := (*iface)(unsafe.Pointer(&i)) diff --git a/src/pkg/runtime/malloc.go b/src/pkg/runtime/malloc.go index ffe571a18..fb2c037ac 100644 --- a/src/pkg/runtime/malloc.go +++ b/src/pkg/runtime/malloc.go @@ -37,6 +37,9 @@ const ( bitMask = bitBoundary | bitMarked ) +// Page number (address>>pageShift) +type pageID uintptr + // All zero-sized allocations return a pointer to this byte. var zeroObject byte @@ -64,7 +67,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { } mp.mallocing = 1 if mp.curg != nil { - mp.curg.stackguard0 = ^uint(0xfff) | 0xbad + mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad } } @@ -119,7 +122,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { // The object fits into existing tiny block. x = tiny c.tiny = (*byte)(add(x, size)) - c.tinysize -= uint(size1) + c.tinysize -= uintptr(size1) if debugMalloc { mp := acquirem() if mp.mallocing == 0 { @@ -156,7 +159,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { // based on amount of remaining free space. if maxTinySize-size > tinysize { c.tiny = (*byte)(add(x, size)) - c.tinysize = uint(maxTinySize - size) + c.tinysize = uintptr(maxTinySize - size) } size = maxTinySize } else { @@ -171,7 +174,7 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { v := s.freelist if v == nil { mp := acquirem() - mp.scalararg[0] = uint(sizeclass) + mp.scalararg[0] = uintptr(sizeclass) onM(&mcacheRefill_m) releasem(mp) s = c.alloc[sizeclass] @@ -188,11 +191,11 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { } } } - c.local_cachealloc += int(size) + c.local_cachealloc += intptr(size) } else { mp := acquirem() - mp.scalararg[0] = uint(size) - mp.scalararg[1] = uint(flags) + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(flags) onM(&largeAlloc_m) s = (*mspan)(mp.ptrarg[0]) mp.ptrarg[0] = nil @@ -241,15 +244,15 @@ func gomallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer { mp := acquirem() mp.ptrarg[0] = x mp.ptrarg[1] = unsafe.Pointer(typ) - mp.scalararg[0] = uint(size) - mp.scalararg[1] = uint(size0) + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(size0) onM(&unrollgcproginplace_m) releasem(mp) goto marked } ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) // Check whether the program is already unrolled. - if uintptr(goatomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { + if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { mp := acquirem() mp.ptrarg[0] = unsafe.Pointer(typ) onM(&unrollgcprog_m) @@ -394,7 +397,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { } c.next_sample = next } - mp.scalararg[0] = uint(size) + mp.scalararg[0] = uintptr(size) mp.ptrarg[0] = x onM(&mprofMalloc_m) } @@ -402,7 +405,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { // force = 1 - do GC regardless of current heap usage // force = 2 - go GC and eager sweep func gogc(force int32) { - if memstats.enablegc == 0 { + if !memstats.enablegc { return } @@ -421,7 +424,7 @@ func gogc(force int32) { if gcpercent == gcpercentUnknown { golock(&mheap_.lock) if gcpercent == gcpercentUnknown { - gcpercent = goreadgogc() + gcpercent = readgogc() } gounlock(&mheap_.lock) } @@ -439,7 +442,7 @@ func gogc(force int32) { } // Ok, we're doing it! Stop everybody else - startTime := gonanotime() + startTime := nanotime() mp = acquirem() mp.gcing = 1 releasem(mp) @@ -461,11 +464,11 @@ func gogc(force int32) { } for i := 0; i < n; i++ { if i > 0 { - startTime = gonanotime() + startTime = nanotime() } // switch to g0, call gc, then switch back - mp.scalararg[0] = uint(uint32(startTime)) // low 32 bits - mp.scalararg[1] = uint(startTime >> 32) // high 32 bits + mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits + mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits if force >= 2 { mp.scalararg[2] = 1 // eagersweep } else { diff --git a/src/pkg/runtime/malloc.h b/src/pkg/runtime/malloc.h index eafabb364..7a6d0c71d 100644 --- a/src/pkg/runtime/malloc.h +++ b/src/pkg/runtime/malloc.h @@ -93,7 +93,7 @@ enum PageSize = 1<<PageShift, PageMask = PageSize - 1, }; -typedef uintptr PageID; // address >> PageShift +typedef uintptr pageID; // address >> PageShift enum { @@ -403,7 +403,7 @@ struct MSpan { MSpan *next; // in a span linked list MSpan *prev; // in a span linked list - PageID start; // starting page number + pageID start; // starting page number uintptr npages; // number of pages in span MLink *freelist; // list of free objects // sweep generation: @@ -425,7 +425,7 @@ struct MSpan Special *specials; // linked list of special records sorted by offset. }; -void runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages); +void runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages); void runtime·MSpan_EnsureSwept(MSpan *span); bool runtime·MSpan_Sweep(MSpan *span, bool preserve); diff --git a/src/pkg/runtime/mgc0.c b/src/pkg/runtime/mgc0.c index 68e45a316..d70a6373e 100644 --- a/src/pkg/runtime/mgc0.c +++ b/src/pkg/runtime/mgc0.c @@ -225,7 +225,7 @@ scanblock(byte *b, uintptr n, byte *ptrmask) Eface *eface; Type *typ; MSpan *s; - PageID k; + pageID k; bool keepworking; // Cache memory arena parameters in local vars. diff --git a/src/pkg/runtime/mheap.c b/src/pkg/runtime/mheap.c index a447bbc97..8bfb41ac6 100644 --- a/src/pkg/runtime/mheap.c +++ b/src/pkg/runtime/mheap.c @@ -279,7 +279,7 @@ MHeap_AllocSpanLocked(MHeap *h, uintptr npage) { uintptr n; MSpan *s, *t; - PageID p; + pageID p; // Try in fixed-size lists up to max. for(n=npage; n < nelem(h->free); n++) { @@ -380,7 +380,7 @@ MHeap_Grow(MHeap *h, uintptr npage) uintptr ask; void *v; MSpan *s; - PageID p; + pageID p; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes @@ -441,7 +441,7 @@ MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v) { MSpan *s; - PageID p, q; + pageID p, q; if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) return nil; @@ -514,7 +514,7 @@ static void MHeap_FreeSpanLocked(MHeap *h, MSpan *s, bool acctinuse, bool acctidle) { MSpan *t; - PageID p; + pageID p; switch(s->state) { case MSpanStack: @@ -639,7 +639,7 @@ runtime∕debug·freeOSMemory(void) // Initialize a new span with the given start and npages. void -runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages) +runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages) { span->next = nil; span->prev = nil; diff --git a/src/pkg/runtime/mprof.go b/src/pkg/runtime/mprof.go index 5d77c5629..d20bf2371 100644 --- a/src/pkg/runtime/mprof.go +++ b/src/pkg/runtime/mprof.go @@ -98,10 +98,10 @@ func record(r *MemProfileRecord, b *bucket) { r.FreeBytes = int64(b.data.mp.free_bytes) r.AllocObjects = int64(b.data.mp.allocs) r.FreeObjects = int64(b.data.mp.frees) - for i := 0; uint(i) < b.nstk && i < len(r.Stack0); i++ { + for i := 0; uintptr(i) < b.nstk && i < len(r.Stack0); i++ { r.Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) } - for i := b.nstk; i < uint(len(r.Stack0)); i++ { + for i := b.nstk; i < uintptr(len(r.Stack0)); i++ { r.Stack0[i] = 0 } } @@ -126,7 +126,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { p[idx].Count = int64(bp.count) p[idx].Cycles = int64(bp.cycles) i := 0 - for uint(i) < b.nstk && i < len(p[idx].Stack0) { + for uintptr(i) < b.nstk && i < len(p[idx].Stack0) { p[idx].Stack0[i] = *(*uintptr)(add(unsafe.Pointer(&b.stk), uintptr(i)*ptrSize)) i++ } @@ -146,8 +146,8 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { // If all is true, Stack formats stack traces of all other goroutines // into buf after the trace for the current goroutine. func Stack(buf []byte, all bool) int { - sp := gogetcallersp(unsafe.Pointer(&buf)) - pc := gogetcallerpc(unsafe.Pointer(&buf)) + sp := getcallersp(unsafe.Pointer(&buf)) + pc := getcallerpc(unsafe.Pointer(&buf)) mp := acquirem() gp := mp.curg if all { @@ -190,7 +190,7 @@ func Stack(buf []byte, all bool) int { // Most clients should use the runtime/pprof package instead // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { - first := (*m)(goatomicloadp(unsafe.Pointer(&allm))) + first := (*m)(atomicloadp(unsafe.Pointer(&allm))) for mp := first; mp != nil; mp = mp.alllink { n++ } diff --git a/src/pkg/runtime/mprof.goc b/src/pkg/runtime/mprof.goc index a96edee21..f76aae48c 100644 --- a/src/pkg/runtime/mprof.goc +++ b/src/pkg/runtime/mprof.goc @@ -230,12 +230,6 @@ runtime·blockevent(int64 cycles, int32 skip) } void -runtime·blockevent_m(void) -{ - runtime·blockevent(g->m->scalararg[0] + ((int64)g->m->scalararg[1]<<32), g->m->scalararg[2]); -} - -void runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uintptr, uintptr, uintptr)) { Bucket *b; diff --git a/src/pkg/runtime/print.go b/src/pkg/runtime/print.go index 4b94417c6..fd79bc8dd 100644 --- a/src/pkg/runtime/print.go +++ b/src/pkg/runtime/print.go @@ -19,7 +19,7 @@ var ( func printstring(s string) { mp := acquirem() - mp.scalararg[0] = uint(len(s)) + mp.scalararg[0] = uintptr(len(s)) mp.ptrarg[0] = (*stringStruct)(unsafe.Pointer(&s)).str onM(&printstring_m) releasem(mp) @@ -34,7 +34,7 @@ func printuint(x uint64) { func printhex(x uintptr) { mp := acquirem() - mp.scalararg[0] = uint(x) + mp.scalararg[0] = uintptr(x) onM(&printhex_m) releasem(mp) } diff --git a/src/pkg/runtime/proc.go b/src/pkg/runtime/proc.go index de58daa13..a201dc6c5 100644 --- a/src/pkg/runtime/proc.go +++ b/src/pkg/runtime/proc.go @@ -50,7 +50,7 @@ func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { gothrow("gopark: bad g status") } mp.waitlock = lock - mp.waitunlockf = *(*func(*g, unsafe.Pointer) uint8)(unsafe.Pointer(&unlockf)) + mp.waitunlockf = *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&unlockf)) gp.waitreason = reason releasem(mp) // can't do anything that might move the G between Ms here. @@ -70,16 +70,6 @@ func goready(gp *g) { releasem(mp) } -func goblockevent(cycles int64, skip int32) { - // TODO: convert to Go when we do mprof.goc - mp := acquirem() - mp.scalararg[0] = uint(uint32(cycles)) - mp.scalararg[1] = uint(cycles >> 32) - mp.scalararg[2] = uint(skip) - onM(&blockevent_m) - releasem(mp) -} - //go:nosplit func acquireSudog() *sudog { c := gomcache() diff --git a/src/pkg/runtime/rdebug.go b/src/pkg/runtime/rdebug.go index 3df73ce66..eef0f281f 100644 --- a/src/pkg/runtime/rdebug.go +++ b/src/pkg/runtime/rdebug.go @@ -6,27 +6,22 @@ package runtime func setMaxStack(in int) (out int) { out = int(maxstacksize) - maxstacksize = uint(in) + maxstacksize = uintptr(in) return out } func setGCPercent(in int32) (out int32) { mp := acquirem() - mp.scalararg[0] = uint(int(in)) + mp.scalararg[0] = uintptr(int(in)) onM(&setgcpercent_m) out = int32(int(mp.scalararg[0])) releasem(mp) return out } -func setPanicOnFault(newb bool) (old bool) { - new := uint8(0) - if newb { - new = 1 - } - +func setPanicOnFault(new bool) (old bool) { mp := acquirem() - old = mp.curg.paniconfault == 1 + old = mp.curg.paniconfault mp.curg.paniconfault = new releasem(mp) return old @@ -34,7 +29,7 @@ func setPanicOnFault(newb bool) (old bool) { func setMaxThreads(in int) (out int) { mp := acquirem() - mp.scalararg[0] = uint(in) + mp.scalararg[0] = uintptr(in) onM(&setmaxthreads_m) out = int(mp.scalararg[0]) releasem(mp) diff --git a/src/pkg/runtime/runtime.h b/src/pkg/runtime/runtime.h index ae098459f..bb9d10855 100644 --- a/src/pkg/runtime/runtime.h +++ b/src/pkg/runtime/runtime.h @@ -22,17 +22,10 @@ typedef int64 intptr; typedef int64 intgo; // Go's int typedef uint64 uintgo; // Go's uint #else -// Normally, "int" == "long int" == 32 bits. -// However, the C compiler uses this distinction -// to disambiguate true 32 bit ints (e.g. int32) -// from 32/64 bit ints (e.g. uintptr) so that it -// can generate the corresponding go type correctly. -typedef signed long int int32_x; -typedef unsigned long int uint32_x; -typedef uint32_x uintptr; -typedef int32_x intptr; -typedef int32_x intgo; // Go's int -typedef uint32_x uintgo; // Go's uint +typedef uint32 uintptr; +typedef int32 intptr; +typedef int32 intgo; // Go's int +typedef uint32 uintgo; // Go's uint #endif #ifdef _64BITREG diff --git a/src/pkg/runtime/sema.go b/src/pkg/runtime/sema.go index a9ed7150d..4674a843e 100644 --- a/src/pkg/runtime/sema.go +++ b/src/pkg/runtime/sema.go @@ -65,16 +65,16 @@ func semacquire(addr *uint32, profile bool) { t0 := int64(0) s.releasetime = 0 if profile && blockprofilerate > 0 { - t0 = gocputicks() + t0 = cputicks() s.releasetime = -1 } for { golock(&root.lock) // Add ourselves to nwait to disable "easy case" in semrelease. - goxadd(&root.nwait, 1) + xadd(&root.nwait, 1) // Check cansemacquire to avoid missed wakeup. if cansemacquire(addr) { - goxadd(&root.nwait, ^uint32(0)) + xadd(&root.nwait, -1) gounlock(&root.lock) break } @@ -87,25 +87,25 @@ func semacquire(addr *uint32, profile bool) { } } if s.releasetime > 0 { - goblockevent(int64(s.releasetime)-t0, 4) + blockevent(int64(s.releasetime)-t0, 3) } releaseSudog(s) } func semrelease(addr *uint32) { root := semroot(addr) - goxadd(addr, 1) + xadd(addr, 1) // Easy case: no waiters? // This check must happen after the xadd, to avoid a missed wakeup // (see loop in semacquire). - if goatomicload(&root.nwait) == 0 { + if atomicload(&root.nwait) == 0 { return } // Harder case: search for a waiter and wake it. golock(&root.lock) - if goatomicload(&root.nwait) == 0 { + if atomicload(&root.nwait) == 0 { // The count is already consumed by another goroutine, // so no need to wake up another goroutine. gounlock(&root.lock) @@ -114,7 +114,7 @@ func semrelease(addr *uint32) { s := root.head for ; s != nil; s = s.next { if s.elem == unsafe.Pointer(addr) { - goxadd(&root.nwait, ^uint32(0)) + xadd(&root.nwait, -1) root.dequeue(s) break } @@ -122,9 +122,7 @@ func semrelease(addr *uint32) { gounlock(&root.lock) if s != nil { if s.releasetime != 0 { - // TODO: Remove use of unsafe here. - releasetimep := (*int64)(unsafe.Pointer(&s.releasetime)) - *releasetimep = gocputicks() + s.releasetime = cputicks() } goready(s.g) } @@ -136,11 +134,11 @@ func semroot(addr *uint32) *semaRoot { func cansemacquire(addr *uint32) bool { for { - v := goatomicload(addr) + v := atomicload(addr) if v == 0 { return false } - if gocas(addr, v, v-1) { + if cas(addr, v, v-1) { return true } } @@ -208,7 +206,7 @@ func syncsemacquire(s *syncSema) { w.releasetime = 0 t0 := int64(0) if blockprofilerate > 0 { - t0 = gocputicks() + t0 = cputicks() w.releasetime = -1 } if s.tail == nil { @@ -219,7 +217,7 @@ func syncsemacquire(s *syncSema) { s.tail = w goparkunlock(&s.lock, "semacquire") if t0 != 0 { - goblockevent(int64(w.releasetime)-t0, 3) + blockevent(int64(w.releasetime)-t0, 2) } releaseSudog(w) } @@ -236,9 +234,7 @@ func syncsemrelease(s *syncSema, n uint32) { s.tail = nil } if wake.releasetime != 0 { - // TODO: Remove use of unsafe here. - releasetimep := (*int64)(unsafe.Pointer(&wake.releasetime)) - *releasetimep = gocputicks() + wake.releasetime = cputicks() } goready(wake.g) n-- diff --git a/src/pkg/runtime/sigqueue.go b/src/pkg/runtime/sigqueue.go index 5976e5735..c51ede026 100644 --- a/src/pkg/runtime/sigqueue.go +++ b/src/pkg/runtime/sigqueue.go @@ -16,21 +16,21 @@ func signal_recv() (m uint32) { if ok { return } - gonotetsleepg(&signote, -1) - gonoteclear(&signote) + notetsleepg(&signote, -1) + noteclear(&signote) } } func signal_enable(s uint32) { mp := acquirem() - mp.scalararg[0] = uint(s) + mp.scalararg[0] = uintptr(s) onM(&signal_enable_m) releasem(mp) } func signal_disable(s uint32) { mp := acquirem() - mp.scalararg[0] = uint(s) + mp.scalararg[0] = uintptr(s) onM(&signal_disable_m) releasem(mp) } diff --git a/src/pkg/runtime/slice.go b/src/pkg/runtime/slice.go index e01ea2d7f..c282125b4 100644 --- a/src/pkg/runtime/slice.go +++ b/src/pkg/runtime/slice.go @@ -47,7 +47,7 @@ func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct { } if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&t)) + callerpc := getcallerpc(unsafe.Pointer(&t)) fn := growslice pc := **(**uintptr)(unsafe.Pointer(&fn)) racereadrangepc(old.array, old.len*int(t.elem.size), callerpc, pc) @@ -104,7 +104,7 @@ func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int { } if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&to)) + callerpc := getcallerpc(unsafe.Pointer(&to)) fn := slicecopy pc := **(**uintptr)(unsafe.Pointer(&fn)) racewriterangepc(to.array, n*int(width), callerpc, pc) @@ -132,7 +132,7 @@ func slicestringcopy(to []byte, fm string) int { } if raceenabled { - callerpc := gogetcallerpc(unsafe.Pointer(&to)) + callerpc := getcallerpc(unsafe.Pointer(&to)) fn := slicestringcopy pc := **(**uintptr)(unsafe.Pointer(&fn)) racewriterangepc(unsafe.Pointer(&to[0]), n, callerpc, pc) diff --git a/src/pkg/runtime/string.go b/src/pkg/runtime/string.go index 983125f0c..72b732f84 100644 --- a/src/pkg/runtime/string.go +++ b/src/pkg/runtime/string.go @@ -64,7 +64,7 @@ func slicebytetostring(b []byte) string { fn := slicebytetostring racereadrangepc(unsafe.Pointer(&b[0]), len(b), - gogetcallerpc(unsafe.Pointer(&b)), + getcallerpc(unsafe.Pointer(&b)), **(**uintptr)(unsafe.Pointer(&fn))) } s, c := rawstring(len(b)) @@ -85,7 +85,7 @@ func slicebytetostringtmp(b []byte) string { fn := slicebytetostringtmp racereadrangepc(unsafe.Pointer(&b[0]), len(b), - gogetcallerpc(unsafe.Pointer(&b)), + getcallerpc(unsafe.Pointer(&b)), **(**uintptr)(unsafe.Pointer(&fn))) } return *(*string)(unsafe.Pointer(&b)) @@ -123,7 +123,7 @@ func slicerunetostring(a []rune) string { fn := slicerunetostring racereadrangepc(unsafe.Pointer(&a[0]), len(a)*int(unsafe.Sizeof(a[0])), - gogetcallerpc(unsafe.Pointer(&a)), + getcallerpc(unsafe.Pointer(&a)), **(**uintptr)(unsafe.Pointer(&fn))) } var dum [4]byte @@ -219,7 +219,7 @@ func rawstring(size int) (s string, b []byte) { for { ms := maxstring - if uintptr(size) <= uintptr(ms) || gocasx((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) { + if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) { return } } diff --git a/src/pkg/runtime/stubs.go b/src/pkg/runtime/stubs.go index 52d0c0e3a..1f3cc16e6 100644 --- a/src/pkg/runtime/stubs.go +++ b/src/pkg/runtime/stubs.go @@ -16,12 +16,6 @@ const ( ) //go:noescape -func gogetcallerpc(p unsafe.Pointer) uintptr - -//go:noescape -func gogetcallersp(p unsafe.Pointer) uintptr - -//go:noescape func racereadpc(addr unsafe.Pointer, callpc, pc uintptr) //go:noescape @@ -91,11 +85,12 @@ var ( setmaxthreads_m, ready_m, park_m, - blockevent_m, notewakeup_m, notetsleepg_m mFunction ) +func blockevent(int64, int32) + // memclr clears n bytes starting at ptr. // in memclr_*.s //go:noescape @@ -117,26 +112,6 @@ const ( concurrentSweep = true ) -// Atomic operations to read/write a pointer. -// in stubs.goc -func goatomicload(p *uint32) uint32 // return *p -func goatomicloadp(p unsafe.Pointer) unsafe.Pointer // return *p -func goatomicstore(p *uint32, v uint32) // *p = v -func goatomicstorep(p unsafe.Pointer, v unsafe.Pointer) // *p = v - -// in stubs.goc -// if *p == x { *p = y; return true } else { return false }, atomically -//go:noescape -func gocas(p *uint32, x uint32, y uint32) bool - -//go:noescape -func goxadd(p *uint32, x uint32) uint32 - -//go:noescape -func gocasx(p *uintptr, x uintptr, y uintptr) bool - -func goreadgogc() int32 -func gonanotime() int64 func gosched() func starttheworld() func stoptheworld() @@ -187,33 +162,6 @@ func noescape(p unsafe.Pointer) unsafe.Pointer { return unsafe.Pointer(x ^ 0) } -// gopersistentalloc allocates a permanent (not garbage collected) -// memory region of size n. Use wisely! -func gopersistentalloc(n uintptr) unsafe.Pointer - -func gocputicks() int64 - -func gonoteclear(n *note) { - n.key = 0 -} - -func gonotewakeup(n *note) { - mp := acquirem() - mp.ptrarg[0] = unsafe.Pointer(n) - onM(¬ewakeup_m) - releasem(mp) -} - -func gonotetsleepg(n *note, t int64) { - mp := acquirem() - mp.ptrarg[0] = unsafe.Pointer(n) - mp.scalararg[0] = uint(uint32(t)) // low 32 bits - mp.scalararg[1] = uint(t >> 32) // high 32 bits - releasem(mp) - mcall(¬etsleepg_m) - exitsyscall() -} - func exitsyscall() func goroutineheader(gp *g) @@ -231,22 +179,6 @@ func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32 func jmpdefer(fv *funcval, argp unsafe.Pointer) func exit1(code int32) func asminit() -func getcallersp(argp unsafe.Pointer) uintptr -func cas(ptr *uint32, old, new uint32) bool -func cas64(ptr *uint64, old, new uint64) bool -func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool -func xadd(ptr *uint32, delta int32) uint32 -func xadd64(ptr *uint64, delta int64) uint64 -func xchg(ptr *uint32, new uint32) uint32 -func xchg64(ptr *uint64, new uint64) uint64 -func xchgp(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer -func atomicstore(ptr *uint32, val uint32) -func atomicstore64(ptr *uint64, val uint64) -func atomicstorep(ptr *unsafe.Pointer, val unsafe.Pointer) -func atomicload(ptr *uint32) uint32 -func atomicload64(ptr *uint64) uint64 -func atomicloadp(ptr *unsafe.Pointer) unsafe.Pointer -func atomicor8(ptr *uint8, val uint8) func setg(gg *g) func exit(code int32) func breakpoint() @@ -257,10 +189,72 @@ func cputicks() int64 func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer func munmap(addr unsafe.Pointer, n uintptr) func madvise(addr unsafe.Pointer, n uintptr, flags int32) -func setcallerpc(argp unsafe.Pointer, pc uintptr) -func getcallerpc(argp unsafe.Pointer) uintptr func newstackcall(fv *funcval, addr unsafe.Pointer, size uint32) func procyield(cycles uint32) func osyield() func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) func cmpstring(s1, s2 string) int +func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer +func readgogc() int32 +func notetsleepg(n *note, ns int64) +func notetsleep(n *note, ns int64) +func notewakeup(n *note) +func notesleep(n *note) +func noteclear(n *note) + +//go:noescape +func cas(ptr *uint32, old, new uint32) bool + +//go:noescape +func cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +//go:noescape +func casuintptr(ptr *uintptr, old, new uintptr) bool + +//go:noescape +func xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer + +//go:noescape +func atomicstore(ptr *uint32, val uint32) + +//go:noescape +func atomicstore64(ptr *uint64, val uint64) + +//go:noescape +func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func atomicload(ptr *uint32) uint32 + +//go:noescape +func atomicload64(ptr *uint64) uint64 + +//go:noescape +func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func atomicor8(ptr *uint8, val uint8) + +//go:noescape +func setcallerpc(argp unsafe.Pointer, pc uintptr) + +//go:noescape +func getcallerpc(argp unsafe.Pointer) uintptr + +//go:noescape +func getcallersp(argp unsafe.Pointer) uintptr diff --git a/src/pkg/runtime/stubs.goc b/src/pkg/runtime/stubs.goc index 1cda75e39..ebf9cc105 100644 --- a/src/pkg/runtime/stubs.goc +++ b/src/pkg/runtime/stubs.goc @@ -32,11 +32,6 @@ func gounlock(p *Lock) { runtime·unlock(p); } -#pragma textflag NOSPLIT -func goreadgogc() (r int32) { - r = runtime·readgogc(); -} - // entry point for testing // TODO: mcall and run on M stack func gostringW(str Slice) (s String) { @@ -44,46 +39,6 @@ func gostringW(str Slice) (s String) { } #pragma textflag NOSPLIT -func gonanotime() (r int64) { - r = runtime·nanotime(); -} - -#pragma textflag NOSPLIT -func goatomicload(p *uint32) (v uint32) { - v = runtime·atomicload(p); -} - -#pragma textflag NOSPLIT -func goatomicloadp(p **byte) (v *byte) { - v = runtime·atomicloadp(p); -} - -#pragma textflag NOSPLIT -func goatomicstore(p *uint32, v uint32) { - runtime·atomicstore(p, v); -} - -#pragma textflag NOSPLIT -func goatomicstorep(p **byte, v *byte) { - runtime·atomicstorep(p, v); -} - -#pragma textflag NOSPLIT -func runtime·goxadd(p *uint32, x uint32) (ret uint32) { - ret = runtime·xadd(p, x); -} - -#pragma textflag NOSPLIT -func runtime·gocas(p *uint32, x uint32, y uint32) (ret bool) { - ret = runtime·cas(p, x, y); -} - -#pragma textflag NOSPLIT -func runtime·gocasx(p *uintptr, x uintptr, y uintptr) (ret bool) { - ret = runtime·casp((void**)p, (void*)x, (void*)y); -} - -#pragma textflag NOSPLIT func runtime·getg() (ret *G) { ret = g; } @@ -116,12 +71,6 @@ func GCMask(x Eface) (mask Slice) { } #pragma textflag NOSPLIT -func gopersistentalloc(size uintptr) (x *void) { - // TODO: used only for itabs for now. Need to make &mstats.other_sys arg parameterized. - x = runtime·persistentalloc(size, 0, &mstats.other_sys); -} - -#pragma textflag NOSPLIT func reflect·typelinks() (ret Slice) { extern Type *runtime·typelink[], *runtime·etypelink[]; ret.array = (byte*)runtime·typelink; diff --git a/src/pkg/runtime/syscall_windows.go b/src/pkg/runtime/syscall_windows.go index 272db6241..5ca9735ac 100644 --- a/src/pkg/runtime/syscall_windows.go +++ b/src/pkg/runtime/syscall_windows.go @@ -15,15 +15,11 @@ type callbacks struct { } func (c *wincallbackcontext) isCleanstack() bool { - return c.cleanstack == 1 + return c.cleanstack } func (c *wincallbackcontext) setCleanstack(cleanstack bool) { - if cleanstack { - c.cleanstack = 1 - } else { - c.cleanstack = 0 - } + c.cleanstack = cleanstack } var ( @@ -51,11 +47,11 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) { if len(ft.out) != 1 { panic("compilecallback: function must have one output parameter") } - uintptrSize := uint(unsafe.Sizeof(uintptr(0))) + uintptrSize := unsafe.Sizeof(uintptr(0)) if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize { panic("compilecallback: output parameter size is wrong") } - argsize := uint(0) + argsize := uintptr(0) for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] { if (*t).size != uintptrSize { panic("compilecallback: input parameter size is wrong") diff --git a/src/pkg/runtime/thunk.s b/src/pkg/runtime/thunk.s index 1f83438ef..7e7aa8433 100644 --- a/src/pkg/runtime/thunk.s +++ b/src/pkg/runtime/thunk.s @@ -12,7 +12,7 @@ #endif TEXT time·runtimeNano(SB),NOSPLIT,$0-0 - JMP runtime·gonanotime(SB) + JMP runtime·nanotime(SB) TEXT time·Sleep(SB),NOSPLIT,$0-0 JMP runtime·timeSleep(SB) diff --git a/src/pkg/runtime/time.go b/src/pkg/runtime/time.go index c9df3a364..b40952ebc 100644 --- a/src/pkg/runtime/time.go +++ b/src/pkg/runtime/time.go @@ -49,7 +49,7 @@ func timeSleep(ns int64) { } t := new(timer) - t.when = gonanotime() + ns + t.when = nanotime() + ns t.f = goroutineReady t.arg = getg() golock(&timers.lock) @@ -100,7 +100,7 @@ func addtimerLocked(t *timer) { // siftup moved to top: new earliest deadline. if timers.sleeping { timers.sleeping = false - gonotewakeup(&timers.waitnote) + notewakeup(&timers.waitnote) } if timers.rescheduling { timers.rescheduling = false @@ -149,11 +149,11 @@ func deltimer(t *timer) bool { // If addtimer inserts a new earlier event, addtimer1 wakes timerproc early. func timerproc() { timers.gp = getg() - timers.gp.issystem = 1 + timers.gp.issystem = true for { golock(&timers.lock) timers.sleeping = false - now := gonanotime() + now := nanotime() delta := int64(-1) for { if len(timers.t) == 0 { @@ -200,9 +200,9 @@ func timerproc() { } // At least one timer pending. Sleep until then. timers.sleeping = true - gonoteclear(&timers.waitnote) + noteclear(&timers.waitnote) gounlock(&timers.lock) - gonotetsleepg(&timers.waitnote, delta) + notetsleepg(&timers.waitnote, delta) } } |