summaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-10-29 12:25:24 -0400
committerRuss Cox <rsc@golang.org>2014-10-29 12:25:24 -0400
commit710efc5a3085f55968c120abb25206fb72c05d46 (patch)
tree433262b4bdac6643d923971ea21bb39cb69822a7 /src/runtime
parent40520a21d8b050635a417666db959f75d757fff5 (diff)
parentcc517ca5f7183e7f5d91bf75897b23cc0f4ed04f (diff)
downloadgo-710efc5a3085f55968c120abb25206fb72c05d46.tar.gz
[dev.garbage] all: merge dev.power64 (5ad5e85cfb99) into dev.garbage
The goal here is to get the big-endian fixes so that in some upcoming code movement for write barriers I don't make them unmergeable. LGTM=rlh R=rlh CC=golang-codereviews https://codereview.appspot.com/166890043
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/arch_power64.go8
-rw-r--r--src/runtime/arch_power64.h14
-rw-r--r--src/runtime/arch_power64le.go8
-rw-r--r--src/runtime/arch_power64le.h14
-rw-r--r--src/runtime/asm_386.s58
-rw-r--r--src/runtime/asm_amd64.s78
-rw-r--r--src/runtime/asm_amd64p32.s58
-rw-r--r--src/runtime/asm_arm.s24
-rw-r--r--src/runtime/asm_power64x.s981
-rw-r--r--src/runtime/atomic_power64x.s40
-rw-r--r--src/runtime/debug/stubs.s6
-rw-r--r--src/runtime/defs1_linux.go6
-rw-r--r--src/runtime/defs3_linux.go43
-rw-r--r--src/runtime/defs_linux.go11
-rw-r--r--src/runtime/defs_linux_power64.h204
-rw-r--r--src/runtime/defs_linux_power64le.h204
-rw-r--r--src/runtime/gcinfo_test.go6
-rw-r--r--src/runtime/malloc.go10
-rw-r--r--src/runtime/mem_linux.c16
-rw-r--r--src/runtime/memclr_386.s46
-rw-r--r--src/runtime/memclr_amd64.s44
-rw-r--r--src/runtime/memclr_plan9_386.s24
-rw-r--r--src/runtime/memclr_power64x.s20
-rw-r--r--src/runtime/memmove_power64x.s40
-rw-r--r--src/runtime/mgc0.c1
-rw-r--r--src/runtime/noasm.go (renamed from src/runtime/noasm_arm.go)2
-rw-r--r--src/runtime/os_linux.c37
-rw-r--r--src/runtime/panic.c2
-rw-r--r--src/runtime/panic.go4
-rw-r--r--src/runtime/proc.c4
-rw-r--r--src/runtime/race_amd64.s18
-rw-r--r--src/runtime/rt0_linux_power64.s17
-rw-r--r--src/runtime/rt0_linux_power64le.s14
-rw-r--r--src/runtime/runtime.c6
-rw-r--r--src/runtime/signal_linux_power64.h49
-rw-r--r--src/runtime/signal_linux_power64le.h49
-rw-r--r--src/runtime/signal_power64x.c137
-rw-r--r--src/runtime/string.go4
-rw-r--r--src/runtime/sys_darwin_386.s4
-rw-r--r--src/runtime/sys_darwin_amd64.s4
-rw-r--r--src/runtime/sys_dragonfly_386.s4
-rw-r--r--src/runtime/sys_freebsd_386.s4
-rw-r--r--src/runtime/sys_linux_amd64.s8
-rw-r--r--src/runtime/sys_linux_arm.s8
-rw-r--r--src/runtime/sys_linux_power64x.s383
-rw-r--r--src/runtime/sys_nacl_386.s4
-rw-r--r--src/runtime/sys_nacl_amd64p32.s1
-rw-r--r--src/runtime/sys_nacl_arm.s1
-rw-r--r--src/runtime/sys_openbsd_386.s4
-rw-r--r--src/runtime/sys_power64x.c38
-rw-r--r--src/runtime/sys_solaris_amd64.s12
-rw-r--r--src/runtime/sys_windows_386.s12
-rw-r--r--src/runtime/sys_windows_amd64.s12
-rw-r--r--src/runtime/thunk.s6
54 files changed, 2560 insertions, 252 deletions
diff --git a/src/runtime/arch_power64.go b/src/runtime/arch_power64.go
new file mode 100644
index 000000000..270cd7b95
--- /dev/null
+++ b/src/runtime/arch_power64.go
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type uintreg uint64
+type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_power64.h b/src/runtime/arch_power64.h
new file mode 100644
index 000000000..7cfb9da2f
--- /dev/null
+++ b/src/runtime/arch_power64.h
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+enum {
+ thechar = '9',
+ BigEndian = 1,
+ CacheLineSize = 64,
+ RuntimeGogoBytes = 64,
+ PhysPageSize = 65536,
+ PCQuantum = 4,
+ Int64Align = 8
+};
+
diff --git a/src/runtime/arch_power64le.go b/src/runtime/arch_power64le.go
new file mode 100644
index 000000000..270cd7b95
--- /dev/null
+++ b/src/runtime/arch_power64le.go
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+type uintreg uint64
+type intptr int64 // TODO(rsc): remove
diff --git a/src/runtime/arch_power64le.h b/src/runtime/arch_power64le.h
new file mode 100644
index 000000000..684ac9953
--- /dev/null
+++ b/src/runtime/arch_power64le.h
@@ -0,0 +1,14 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+enum {
+ thechar = '9',
+ BigEndian = 0,
+ CacheLineSize = 64,
+ RuntimeGogoBytes = 64,
+ PhysPageSize = 65536,
+ PCQuantum = 4,
+ Int64Align = 8
+};
+
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 0d46a9eff..2d102b273 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -486,11 +486,11 @@ TEXT runtime·cas64(SB), NOSPLIT, $0-21
MOVL new_hi+16(FP), CX
LOCK
CMPXCHG8B 0(BP)
- JNZ cas64_fail
+ JNZ fail
MOVL $1, AX
MOVB AX, ret+20(FP)
RET
-cas64_fail:
+fail:
MOVL $0, AX
MOVB AX, ret+20(FP)
RET
@@ -1356,29 +1356,29 @@ TEXT strings·IndexByte(SB),NOSPLIT,$0
// AX = 1/0/-1
TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
CMPL SI, DI
- JEQ cmp_allsame
+ JEQ allsame
CMPL BX, DX
MOVL DX, BP
CMOVLLT BX, BP // BP = min(alen, blen)
CMPL BP, $4
- JB cmp_small
+ JB small
TESTL $0x4000000, runtime·cpuid_edx(SB) // check for sse2
- JE cmp_mediumloop
-cmp_largeloop:
+ JE mediumloop
+largeloop:
CMPL BP, $16
- JB cmp_mediumloop
+ JB mediumloop
MOVOU (SI), X0
MOVOU (DI), X1
PCMPEQB X0, X1
PMOVMSKB X1, AX
XORL $0xffff, AX // convert EQ to NE
- JNE cmp_diff16 // branch if at least one byte is not equal
+ JNE diff16 // branch if at least one byte is not equal
ADDL $16, SI
ADDL $16, DI
SUBL $16, BP
- JMP cmp_largeloop
+ JMP largeloop
-cmp_diff16:
+diff16:
BSFL AX, BX // index of first byte that differs
XORL AX, AX
MOVB (SI)(BX*1), CX
@@ -1387,25 +1387,25 @@ cmp_diff16:
LEAL -1(AX*2), AX // convert 1/0 to +1/-1
RET
-cmp_mediumloop:
+mediumloop:
CMPL BP, $4
- JBE cmp_0through4
+ JBE _0through4
MOVL (SI), AX
MOVL (DI), CX
CMPL AX, CX
- JNE cmp_diff4
+ JNE diff4
ADDL $4, SI
ADDL $4, DI
SUBL $4, BP
- JMP cmp_mediumloop
+ JMP mediumloop
-cmp_0through4:
+_0through4:
MOVL -4(SI)(BP*1), AX
MOVL -4(DI)(BP*1), CX
CMPL AX, CX
- JEQ cmp_allsame
+ JEQ allsame
-cmp_diff4:
+diff4:
BSWAPL AX // reverse order of bytes
BSWAPL CX
XORL AX, CX // find bit differences
@@ -1416,37 +1416,37 @@ cmp_diff4:
RET
// 0-3 bytes in common
-cmp_small:
+small:
LEAL (BP*8), CX
NEGL CX
- JEQ cmp_allsame
+ JEQ allsame
// load si
CMPB SI, $0xfc
- JA cmp_si_high
+ JA si_high
MOVL (SI), SI
- JMP cmp_si_finish
-cmp_si_high:
+ JMP si_finish
+si_high:
MOVL -4(SI)(BP*1), SI
SHRL CX, SI
-cmp_si_finish:
+si_finish:
SHLL CX, SI
// same for di
CMPB DI, $0xfc
- JA cmp_di_high
+ JA di_high
MOVL (DI), DI
- JMP cmp_di_finish
-cmp_di_high:
+ JMP di_finish
+di_high:
MOVL -4(DI)(BP*1), DI
SHRL CX, DI
-cmp_di_finish:
+di_finish:
SHLL CX, DI
BSWAPL SI // reverse order of bytes
BSWAPL DI
XORL SI, DI // find bit differences
- JEQ cmp_allsame
+ JEQ allsame
BSRL DI, CX // index of highest bit difference
SHRL CX, SI // move a's bit to bottom
ANDL $1, SI // mask bit
@@ -1455,7 +1455,7 @@ cmp_di_finish:
// all the bytes in common are the same, so we just need
// to compare the lengths.
-cmp_allsame:
+allsame:
XORL AX, AX
XORL CX, CX
CMPL BX, DX
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index a9b082beb..ac9c58cf3 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -461,11 +461,11 @@ TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
- JNZ cas64_fail
+ JNZ fail
MOVL $1, AX
MOVB AX, ret+24(FP)
RET
-cas64_fail:
+fail:
MOVL $0, AX
MOVB AX, ret+24(FP)
RET
@@ -890,24 +890,24 @@ TEXT runtime·aeshashbody(SB),NOSPLIT,$0-32
MOVO runtime·aeskeysched+0(SB), X2
MOVO runtime·aeskeysched+16(SB), X3
CMPQ CX, $16
- JB aessmall
-aesloop:
+ JB small
+loop:
CMPQ CX, $16
- JBE aesloopend
+ JBE loopend
MOVOU (AX), X1
AESENC X2, X0
AESENC X1, X0
SUBQ $16, CX
ADDQ $16, AX
- JMP aesloop
+ JMP loop
// 1-16 bytes remaining
-aesloopend:
+loopend:
// This load may overlap with the previous load above.
// We'll hash some bytes twice, but that's ok.
MOVOU -16(AX)(CX*1), X1
JMP partial
// 0-15 bytes
-aessmall:
+small:
TESTQ CX, CX
JE finalize // 0 bytes
@@ -1050,18 +1050,18 @@ TEXT runtime·eqstring(SB),NOSPLIT,$0-33
MOVQ s1len+8(FP), AX
MOVQ s2len+24(FP), BX
CMPQ AX, BX
- JNE different
+ JNE noteq
MOVQ s1str+0(FP), SI
MOVQ s2str+16(FP), DI
CMPQ SI, DI
- JEQ same
+ JEQ eq
CALL runtime·memeqbody(SB)
MOVB AX, v+32(FP)
RET
-same:
+eq:
MOVB $1, v+32(FP)
RET
-different:
+noteq:
MOVB $0, v+32(FP)
RET
@@ -1184,29 +1184,29 @@ TEXT runtime·cmpbytes(SB),NOSPLIT,$0-56
// AX = 1/0/-1
TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
CMPQ SI, DI
- JEQ cmp_allsame
+ JEQ allsame
CMPQ BX, DX
MOVQ DX, BP
CMOVQLT BX, BP // BP = min(alen, blen) = # of bytes to compare
CMPQ BP, $8
- JB cmp_small
+ JB small
-cmp_loop:
+loop:
CMPQ BP, $16
- JBE cmp_0through16
+ JBE _0through16
MOVOU (SI), X0
MOVOU (DI), X1
PCMPEQB X0, X1
PMOVMSKB X1, AX
XORQ $0xffff, AX // convert EQ to NE
- JNE cmp_diff16 // branch if at least one byte is not equal
+ JNE diff16 // branch if at least one byte is not equal
ADDQ $16, SI
ADDQ $16, DI
SUBQ $16, BP
- JMP cmp_loop
+ JMP loop
// AX = bit mask of differences
-cmp_diff16:
+diff16:
BSFQ AX, BX // index of first byte that differs
XORQ AX, AX
MOVB (SI)(BX*1), CX
@@ -1216,21 +1216,21 @@ cmp_diff16:
RET
// 0 through 16 bytes left, alen>=8, blen>=8
-cmp_0through16:
+_0through16:
CMPQ BP, $8
- JBE cmp_0through8
+ JBE _0through8
MOVQ (SI), AX
MOVQ (DI), CX
CMPQ AX, CX
- JNE cmp_diff8
-cmp_0through8:
+ JNE diff8
+_0through8:
MOVQ -8(SI)(BP*1), AX
MOVQ -8(DI)(BP*1), CX
CMPQ AX, CX
- JEQ cmp_allsame
+ JEQ allsame
// AX and CX contain parts of a and b that differ.
-cmp_diff8:
+diff8:
BSWAPQ AX // reverse order of bytes
BSWAPQ CX
XORQ AX, CX
@@ -1241,44 +1241,44 @@ cmp_diff8:
RET
// 0-7 bytes in common
-cmp_small:
+small:
LEAQ (BP*8), CX // bytes left -> bits left
NEGQ CX // - bits lift (== 64 - bits left mod 64)
- JEQ cmp_allsame
+ JEQ allsame
// load bytes of a into high bytes of AX
CMPB SI, $0xf8
- JA cmp_si_high
+ JA si_high
MOVQ (SI), SI
- JMP cmp_si_finish
-cmp_si_high:
+ JMP si_finish
+si_high:
MOVQ -8(SI)(BP*1), SI
SHRQ CX, SI
-cmp_si_finish:
+si_finish:
SHLQ CX, SI
// load bytes of b in to high bytes of BX
CMPB DI, $0xf8
- JA cmp_di_high
+ JA di_high
MOVQ (DI), DI
- JMP cmp_di_finish
-cmp_di_high:
+ JMP di_finish
+di_high:
MOVQ -8(DI)(BP*1), DI
SHRQ CX, DI
-cmp_di_finish:
+di_finish:
SHLQ CX, DI
BSWAPQ SI // reverse order of bytes
BSWAPQ DI
XORQ SI, DI // find bit differences
- JEQ cmp_allsame
+ JEQ allsame
BSRQ DI, CX // index of highest bit difference
SHRQ CX, SI // move a's bit to bottom
ANDQ $1, SI // mask bit
LEAQ -1(SI*2), AX // 1/0 => +1/-1
RET
-cmp_allsame:
+allsame:
XORQ AX, AX
XORQ CX, CX
CMPQ BX, DX
@@ -1313,7 +1313,7 @@ TEXT runtime·indexbytebody(SB),NOSPLIT,$0
MOVQ SI, DI
CMPQ BX, $16
- JLT indexbyte_small
+ JLT small
// round up to first 16-byte boundary
TESTQ $15, SI
@@ -1371,7 +1371,7 @@ failure:
RET
// handle for lengths < 16
-indexbyte_small:
+small:
MOVQ BX, CX
REPN; SCASB
JZ success
diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s
index 28875bc55..de3ef3a23 100644
--- a/src/runtime/asm_amd64p32.s
+++ b/src/runtime/asm_amd64p32.s
@@ -444,11 +444,11 @@ TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
- JNZ cas64_fail
+ JNZ fail
MOVL $1, AX
MOVB AX, ret+24(FP)
RET
-cas64_fail:
+fail:
MOVL $0, AX
MOVB AX, ret+24(FP)
RET
@@ -834,29 +834,29 @@ TEXT runtime·cmpbytes(SB),NOSPLIT,$0-28
// AX = 1/0/-1
TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
CMPQ SI, DI
- JEQ cmp_allsame
+ JEQ allsame
CMPQ BX, DX
MOVQ DX, R8
CMOVQLT BX, R8 // R8 = min(alen, blen) = # of bytes to compare
CMPQ R8, $8
- JB cmp_small
+ JB small
-cmp_loop:
+loop:
CMPQ R8, $16
- JBE cmp_0through16
+ JBE _0through16
MOVOU (SI), X0
MOVOU (DI), X1
PCMPEQB X0, X1
PMOVMSKB X1, AX
XORQ $0xffff, AX // convert EQ to NE
- JNE cmp_diff16 // branch if at least one byte is not equal
+ JNE diff16 // branch if at least one byte is not equal
ADDQ $16, SI
ADDQ $16, DI
SUBQ $16, R8
- JMP cmp_loop
+ JMP loop
// AX = bit mask of differences
-cmp_diff16:
+diff16:
BSFQ AX, BX // index of first byte that differs
XORQ AX, AX
ADDQ BX, SI
@@ -868,23 +868,23 @@ cmp_diff16:
RET
// 0 through 16 bytes left, alen>=8, blen>=8
-cmp_0through16:
+_0through16:
CMPQ R8, $8
- JBE cmp_0through8
+ JBE _0through8
MOVQ (SI), AX
MOVQ (DI), CX
CMPQ AX, CX
- JNE cmp_diff8
-cmp_0through8:
+ JNE diff8
+_0through8:
ADDQ R8, SI
ADDQ R8, DI
MOVQ -8(SI), AX
MOVQ -8(DI), CX
CMPQ AX, CX
- JEQ cmp_allsame
+ JEQ allsame
// AX and CX contain parts of a and b that differ.
-cmp_diff8:
+diff8:
BSWAPQ AX // reverse order of bytes
BSWAPQ CX
XORQ AX, CX
@@ -895,46 +895,46 @@ cmp_diff8:
RET
// 0-7 bytes in common
-cmp_small:
+small:
LEAQ (R8*8), CX // bytes left -> bits left
NEGQ CX // - bits lift (== 64 - bits left mod 64)
- JEQ cmp_allsame
+ JEQ allsame
// load bytes of a into high bytes of AX
CMPB SI, $0xf8
- JA cmp_si_high
+ JA si_high
MOVQ (SI), SI
- JMP cmp_si_finish
-cmp_si_high:
+ JMP si_finish
+si_high:
ADDQ R8, SI
MOVQ -8(SI), SI
SHRQ CX, SI
-cmp_si_finish:
+si_finish:
SHLQ CX, SI
// load bytes of b in to high bytes of BX
CMPB DI, $0xf8
- JA cmp_di_high
+ JA di_high
MOVQ (DI), DI
- JMP cmp_di_finish
-cmp_di_high:
+ JMP di_finish
+di_high:
ADDQ R8, DI
MOVQ -8(DI), DI
SHRQ CX, DI
-cmp_di_finish:
+di_finish:
SHLQ CX, DI
BSWAPQ SI // reverse order of bytes
BSWAPQ DI
XORQ SI, DI // find bit differences
- JEQ cmp_allsame
+ JEQ allsame
BSRQ DI, CX // index of highest bit difference
SHRQ CX, SI // move a's bit to bottom
ANDQ $1, SI // mask bit
LEAQ -1(SI*2), AX // 1/0 => +1/-1
RET
-cmp_allsame:
+allsame:
XORQ AX, AX
XORQ CX, CX
CMPQ BX, DX
@@ -969,7 +969,7 @@ TEXT runtime·indexbytebody(SB),NOSPLIT,$0
MOVL SI, DI
CMPL BX, $16
- JLT indexbyte_small
+ JLT small
// round up to first 16-byte boundary
TESTL $15, SI
@@ -1027,7 +1027,7 @@ failure:
RET
// handle for lengths < 16
-indexbyte_small:
+small:
MOVL BX, CX
REPN; SCASB
JZ success
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index e94b4c1ff..9a58fdc51 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -492,7 +492,7 @@ TEXT asmcgocall<>(SB),NOSPLIT,$0-0
MOVW g_m(g), R8
MOVW m_g0(R8), R3
CMP R3, g
- BEQ asmcgocall_g0
+ BEQ g0
BL gosave<>(SB)
MOVW R0, R5
MOVW R3, R0
@@ -501,7 +501,7 @@ TEXT asmcgocall<>(SB),NOSPLIT,$0-0
MOVW (g_sched+gobuf_sp)(g), R13
// Now on a scheduling stack (a pthread-created stack).
-asmcgocall_g0:
+g0:
SUB $24, R13
BIC $0x7, R13 // alignment for gcc ABI
MOVW R4, 20(R13) // save old g
@@ -751,13 +751,13 @@ TEXT runtime·memeq(SB),NOSPLIT,$-4-13
ADD R1, R3, R6
MOVW $1, R0
MOVB R0, ret+12(FP)
-_next2:
+loop:
CMP R1, R6
RET.EQ
MOVBU.P 1(R1), R4
MOVBU.P 1(R2), R5
CMP R4, R5
- BEQ _next2
+ BEQ loop
MOVW $0, R0
MOVB R0, ret+12(FP)
@@ -780,13 +780,13 @@ TEXT runtime·eqstring(SB),NOSPLIT,$-4-17
CMP R2, R3
RET.EQ
ADD R2, R0, R6
-_eqnext:
+loop:
CMP R2, R6
RET.EQ
MOVBU.P 1(R2), R4
MOVBU.P 1(R3), R5
CMP R4, R5
- BEQ _eqnext
+ BEQ loop
MOVB R7, v+16(FP)
RET
@@ -801,26 +801,26 @@ TEXT bytes·Equal(SB),NOSPLIT,$0
MOVW b_len+16(FP), R3
CMP R1, R3 // unequal lengths are not equal
- B.NE _notequal
+ B.NE notequal
MOVW a+0(FP), R0
MOVW b+12(FP), R2
ADD R0, R1 // end
-_byteseq_next:
+loop:
CMP R0, R1
- B.EQ _equal // reached the end
+ B.EQ equal // reached the end
MOVBU.P 1(R0), R4
MOVBU.P 1(R2), R5
CMP R4, R5
- B.EQ _byteseq_next
+ B.EQ loop
-_notequal:
+notequal:
MOVW $0, R0
MOVBU R0, ret+24(FP)
RET
-_equal:
+equal:
MOVW $1, R0
MOVBU R0, ret+24(FP)
RET
diff --git a/src/runtime/asm_power64x.s b/src/runtime/asm_power64x.s
new file mode 100644
index 000000000..f77658032
--- /dev/null
+++ b/src/runtime/asm_power64x.s
@@ -0,0 +1,981 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+
+#include "zasm_GOOS_GOARCH.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+TEXT runtime·rt0_go(SB),NOSPLIT,$0
+ // initialize essential registers
+ BL runtime·reginit(SB)
+
+ SUB $24, R1
+ MOVW R3, 8(R1) // argc
+ MOVD R4, 16(R1) // argv
+
+ // create istack out of the given (operating system) stack.
+ // _cgo_init may update stackguard.
+ MOVD $runtime·g0(SB), g
+ MOVD $(-64*1024), R31
+ ADD R31, R1, R3
+ MOVD R3, g_stackguard0(g)
+ MOVD R3, g_stackguard1(g)
+ MOVD R3, (g_stack+stack_lo)(g)
+ MOVD R1, (g_stack+stack_hi)(g)
+
+ // TODO: if there is a _cgo_init, call it.
+ // TODO: add TLS
+
+ // set the per-goroutine and per-mach "registers"
+ MOVD $runtime·m0(SB), R3
+
+ // save m->g0 = g0
+ MOVD g, m_g0(R3)
+ // save m0 to g0->m
+ MOVD R3, g_m(g)
+
+ BL runtime·check(SB)
+
+ // args are already prepared
+ BL runtime·args(SB)
+ BL runtime·osinit(SB)
+ BL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOVD $runtime·main·f(SB), R3 // entry
+ MOVDU R3, -8(R1)
+ MOVDU R0, -8(R1)
+ MOVDU R0, -8(R1)
+ BL runtime·newproc(SB)
+ ADD $24, R1
+
+ // start this M
+ BL runtime·mstart(SB)
+
+ MOVD R0, 1(R0)
+ RETURN
+
+DATA runtime·main·f+0(SB)/8,$runtime·main(SB)
+GLOBL runtime·main·f(SB),RODATA,$8
+
+TEXT runtime·breakpoint(SB),NOSPLIT,$-8-0
+ MOVD R0, 2(R0) // TODO: TD
+ RETURN
+
+TEXT runtime·asminit(SB),NOSPLIT,$-8-0
+ RETURN
+
+TEXT runtime·reginit(SB),NOSPLIT,$-8-0
+ // set R0 to zero, it's expected by the toolchain
+ XOR R0, R0
+ // initialize essential FP registers
+ FMOVD $4503601774854144.0, F27
+ FMOVD $0.5, F29
+ FSUB F29, F29, F28
+ FADD F29, F29, F30
+ FADD F30, F30, F31
+ RETURN
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), NOSPLIT, $-8-8
+ MOVD gobuf+0(FP), R3
+ MOVD R1, gobuf_sp(R3)
+ MOVD LR, R31
+ MOVD R31, gobuf_pc(R3)
+ MOVD g, gobuf_g(R3)
+ MOVD R0, gobuf_lr(R3)
+ MOVD R0, gobuf_ret(R3)
+ MOVD R0, gobuf_ctxt(R3)
+ RETURN
+
+// void gogo(Gobuf*)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), NOSPLIT, $-8-8
+ MOVD gobuf+0(FP), R5
+ MOVD gobuf_g(R5), g // make sure g is not nil
+ MOVD 0(g), R4
+ MOVD gobuf_sp(R5), R1
+ MOVD gobuf_lr(R5), R31
+ MOVD R31, LR
+ MOVD gobuf_ret(R5), R3
+ MOVD gobuf_ctxt(R5), R11
+ MOVD R0, gobuf_sp(R5)
+ MOVD R0, gobuf_ret(R5)
+ MOVD R0, gobuf_lr(R5)
+ MOVD R0, gobuf_ctxt(R5)
+ CMP R0, R0 // set condition codes for == test, needed by stack split
+ MOVD gobuf_pc(R5), R31
+ MOVD R31, CTR
+ BR (CTR)
+
+// void mcall(fn func(*g))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), NOSPLIT, $-8-8
+ // Save caller state in g->sched
+ MOVD R1, (g_sched+gobuf_sp)(g)
+ MOVD LR, R31
+ MOVD R31, (g_sched+gobuf_pc)(g)
+ MOVD R0, (g_sched+gobuf_lr)(g)
+ MOVD g, (g_sched+gobuf_g)(g)
+
+ // Switch to m->g0 & its stack, call fn.
+ MOVD g, R3
+ MOVD g_m(g), R8
+ MOVD m_g0(R8), g
+ CMP g, R3
+ BNE 2(PC)
+ BR runtime·badmcall(SB)
+ MOVD fn+0(FP), R11 // context
+ MOVD 0(R11), R4 // code pointer
+ MOVD R4, CTR
+ MOVD (g_sched+gobuf_sp)(g), R1 // sp = m->g0->sched.sp
+ MOVDU R3, -8(R1)
+ MOVDU R0, -8(R1)
+ BL (CTR)
+ BR runtime·badmcall2(SB)
+
+// switchtoM is a dummy routine that onM leaves at the bottom
+// of the G stack. We need to distinguish the routine that
+// lives at the bottom of the G stack from the one that lives
+// at the top of the M stack because the one at the top of
+// the M stack terminates the stack walk (see topofstack()).
+TEXT runtime·switchtoM(SB), NOSPLIT, $0-0
+ UNDEF
+ BL (LR) // make sure this function is not leaf
+ RETURN
+
+// func onM_signalok(fn func())
+TEXT runtime·onM_signalok(SB), NOSPLIT, $8-8
+ MOVD g, R3 // R3 = g
+ MOVD g_m(R3), R4 // R4 = g->m
+ MOVD m_gsignal(R4), R4 // R4 = g->m->gsignal
+ MOVD fn+0(FP), R11 // context for call below
+ CMP R3, R4
+ BEQ onsignal
+ MOVD R11, 8(R1)
+ BL runtime·onM(SB)
+ RETURN
+
+onsignal:
+ MOVD 0(R11), R3 // code pointer
+ MOVD R3, CTR
+ BL (CTR)
+ RETURN
+
+// void onM(fn func())
+TEXT runtime·onM(SB), NOSPLIT, $0-8
+ MOVD fn+0(FP), R3 // R3 = fn
+ MOVD R3, R11 // context
+ MOVD g_m(g), R4 // R4 = m
+
+ MOVD m_g0(R4), R5 // R5 = g0
+ CMP g, R5
+ BEQ onm
+
+ MOVD m_curg(R4), R6
+ CMP g, R6
+ BEQ oncurg
+
+ // Not g0, not curg. Must be gsignal, but that's not allowed.
+ // Hide call from linker nosplit analysis.
+ MOVD $runtime·badonm(SB), R3
+ MOVD R3, CTR
+ BL (CTR)
+
+oncurg:
+ // save our state in g->sched. Pretend to
+ // be switchtoM if the G stack is scanned.
+ MOVD $runtime·switchtoM(SB), R6
+ ADD $8, R6 // get past prologue
+ MOVD R6, (g_sched+gobuf_pc)(g)
+ MOVD R1, (g_sched+gobuf_sp)(g)
+ MOVD R0, (g_sched+gobuf_lr)(g)
+ MOVD g, (g_sched+gobuf_g)(g)
+
+ // switch to g0
+ MOVD R5, g
+ MOVD (g_sched+gobuf_sp)(g), R3
+ // make it look like mstart called onM on g0, to stop traceback
+ SUB $8, R3
+ MOVD $runtime·mstart(SB), R4
+ MOVD R4, 0(R3)
+ MOVD R3, R1
+
+ // call target function
+ MOVD 0(R11), R3 // code pointer
+ MOVD R3, CTR
+ BL (CTR)
+
+ // switch back to g
+ MOVD g_m(g), R3
+ MOVD m_curg(R3), g
+ MOVD (g_sched+gobuf_sp)(g), R1
+ MOVD R0, (g_sched+gobuf_sp)(g)
+ RETURN
+
+onm:
+ // already on m stack, just call directly
+ MOVD 0(R11), R3 // code pointer
+ MOVD R3, CTR
+ BL (CTR)
+ RETURN
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// Caller has already loaded:
+// R3: framesize, R4: argsize, R5: LR
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+TEXT runtime·morestack(SB),NOSPLIT,$-8-0
+ // Cannot grow scheduler stack (m->g0).
+ MOVD g_m(g), R7
+ MOVD m_g0(R7), R8
+ CMP g, R8
+ BNE 2(PC)
+ BL runtime·abort(SB)
+
+ // Cannot grow signal stack (m->gsignal).
+ MOVD m_gsignal(R7), R8
+ CMP g, R8
+ BNE 2(PC)
+ BL runtime·abort(SB)
+
+ // Called from f.
+ // Set g->sched to context in f.
+ MOVD R11, (g_sched+gobuf_ctxt)(g)
+ MOVD R1, (g_sched+gobuf_sp)(g)
+ MOVD LR, R8
+ MOVD R8, (g_sched+gobuf_pc)(g)
+ MOVD R5, (g_sched+gobuf_lr)(g)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC
+ MOVD R1, (m_morebuf+gobuf_sp)(R7) // f's caller's SP
+ MOVD g, (m_morebuf+gobuf_g)(R7)
+
+ // Call newstack on m->g0's stack.
+ MOVD m_g0(R7), g
+ MOVD (g_sched+gobuf_sp)(g), R1
+ BL runtime·newstack(SB)
+
+ // Not reached, but make sure the return PC from the call to newstack
+ // is still in this function, and not the beginning of the next.
+ UNDEF
+
+TEXT runtime·morestack_noctxt(SB),NOSPLIT,$-8-0
+ MOVD R0, R11
+ BR runtime·morestack(SB)
+
+// reflectcall: call a function with the given argument list
+// func call(f *FuncVal, arg *byte, argsize, retoffset uint32).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ MOVD $MAXSIZE, R31; \
+ CMP R3, R31; \
+ BGT 4(PC); \
+ MOVD $NAME(SB), R31; \
+ MOVD R31, CTR; \
+ BR (CTR)
+// Note: can't just "BR NAME(SB)" - bad inlining results.
+
+TEXT ·reflectcall(SB), NOSPLIT, $-8-24
+ MOVW argsize+16(FP), R3
+ DISPATCH(runtime·call16, 16)
+ DISPATCH(runtime·call32, 32)
+ DISPATCH(runtime·call64, 64)
+ DISPATCH(runtime·call128, 128)
+ DISPATCH(runtime·call256, 256)
+ DISPATCH(runtime·call512, 512)
+ DISPATCH(runtime·call1024, 1024)
+ DISPATCH(runtime·call2048, 2048)
+ DISPATCH(runtime·call4096, 4096)
+ DISPATCH(runtime·call8192, 8192)
+ DISPATCH(runtime·call16384, 16384)
+ DISPATCH(runtime·call32768, 32768)
+ DISPATCH(runtime·call65536, 65536)
+ DISPATCH(runtime·call131072, 131072)
+ DISPATCH(runtime·call262144, 262144)
+ DISPATCH(runtime·call524288, 524288)
+ DISPATCH(runtime·call1048576, 1048576)
+ DISPATCH(runtime·call2097152, 2097152)
+ DISPATCH(runtime·call4194304, 4194304)
+ DISPATCH(runtime·call8388608, 8388608)
+ DISPATCH(runtime·call16777216, 16777216)
+ DISPATCH(runtime·call33554432, 33554432)
+ DISPATCH(runtime·call67108864, 67108864)
+ DISPATCH(runtime·call134217728, 134217728)
+ DISPATCH(runtime·call268435456, 268435456)
+ DISPATCH(runtime·call536870912, 536870912)
+ DISPATCH(runtime·call1073741824, 1073741824)
+ MOVD $runtime·badreflectcall(SB), R31
+ MOVD R31, CTR
+ BR (CTR)
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+ NO_LOCAL_POINTERS; \
+ /* copy arguments to stack */ \
+ MOVD argptr+8(FP), R3; \
+ MOVW argsize+16(FP), R4; \
+ MOVD R1, R5; \
+ ADD $(8-1), R5; \
+ SUB $1, R3; \
+ ADD R5, R4; \
+ CMP R5, R4; \
+ BEQ 4(PC); \
+ MOVBZU 1(R3), R6; \
+ MOVBZU R6, 1(R5); \
+ BR -4(PC); \
+ /* call function */ \
+ MOVD f+0(FP), R11; \
+ MOVD (R11), R31; \
+ MOVD R31, CTR; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ BL (CTR); \
+ /* copy return values back */ \
+ MOVD argptr+8(FP), R3; \
+ MOVW argsize+16(FP), R4; \
+ MOVW retoffset+20(FP), R6; \
+ MOVD R1, R5; \
+ ADD R6, R5; \
+ ADD R6, R3; \
+ SUB R6, R4; \
+ ADD $(8-1), R5; \
+ SUB $1, R3; \
+ ADD R5, R4; \
+ CMP R5, R4; \
+ BEQ 4(PC); \
+ MOVBZU 1(R5), R6; \
+ MOVBZU R6, 1(R3); \
+ BR -4(PC); \
+ RETURN
+
+CALLFN(·call16, 16)
+CALLFN(·call32, 32)
+CALLFN(·call64, 64)
+CALLFN(·call128, 128)
+CALLFN(·call256, 256)
+CALLFN(·call512, 512)
+CALLFN(·call1024, 1024)
+CALLFN(·call2048, 2048)
+CALLFN(·call4096, 4096)
+CALLFN(·call8192, 8192)
+CALLFN(·call16384, 16384)
+CALLFN(·call32768, 32768)
+CALLFN(·call65536, 65536)
+CALLFN(·call131072, 131072)
+CALLFN(·call262144, 262144)
+CALLFN(·call524288, 524288)
+CALLFN(·call1048576, 1048576)
+CALLFN(·call2097152, 2097152)
+CALLFN(·call4194304, 4194304)
+CALLFN(·call8388608, 8388608)
+CALLFN(·call16777216, 16777216)
+CALLFN(·call33554432, 33554432)
+CALLFN(·call67108864, 67108864)
+CALLFN(·call134217728, 134217728)
+CALLFN(·call268435456, 268435456)
+CALLFN(·call536870912, 536870912)
+CALLFN(·call1073741824, 1073741824)
+
+// bool cas(int32 *val, int32 old, int32 new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·cas(SB), NOSPLIT, $0-17
+ MOVD p+0(FP), R3
+ MOVW old+8(FP), R4
+ MOVW new+12(FP), R5
+cas_again:
+ SYNC
+ LWAR (R3), R6
+ CMPW R6, R4
+ BNE cas_fail
+ STWCCC R5, (R3)
+ BNE cas_again
+ MOVD $1, R3
+ SYNC
+ ISYNC
+ MOVB R3, ret+16(FP)
+ RETURN
+cas_fail:
+ MOVD $0, R3
+ BR -5(PC)
+
+// bool runtime·cas64(uint64 *val, uint64 old, uint64 new)
+// Atomically:
+// if(*val == *old){
+// *val = new;
+// return 1;
+// } else {
+// return 0;
+// }
+TEXT runtime·cas64(SB), NOSPLIT, $0-25
+ MOVD p+0(FP), R3
+ MOVD old+8(FP), R4
+ MOVD new+16(FP), R5
+cas64_again:
+ SYNC
+ LDAR (R3), R6
+ CMP R6, R4
+ BNE cas64_fail
+ STDCCC R5, (R3)
+ BNE cas64_again
+ MOVD $1, R3
+ SYNC
+ ISYNC
+ MOVB R3, ret+24(FP)
+ RETURN
+cas64_fail:
+ MOVD $0, R3
+ BR -5(PC)
+
+TEXT runtime·casuintptr(SB), NOSPLIT, $0-25
+ BR runtime·cas64(SB)
+
+TEXT runtime·atomicloaduintptr(SB), NOSPLIT, $-8-16
+ BR runtime·atomicload64(SB)
+
+TEXT runtime·atomicloaduint(SB), NOSPLIT, $-8-16
+ BR runtime·atomicload64(SB)
+
+TEXT runtime·atomicstoreuintptr(SB), NOSPLIT, $0-16
+ BR runtime·atomicstore64(SB)
+
+// bool casp(void **val, void *old, void *new)
+// Atomically:
+// if(*val == old){
+// *val = new;
+// return 1;
+// } else
+// return 0;
+TEXT runtime·casp(SB), NOSPLIT, $0-25
+ BR runtime·cas64(SB)
+
+// uint32 xadd(uint32 volatile *val, int32 delta)
+// Atomically:
+// *val += delta;
+// return *val;
+TEXT runtime·xadd(SB), NOSPLIT, $0-20
+ MOVD p+0(FP), R4
+ MOVW delta+8(FP), R5
+ SYNC
+ LWAR (R4), R3
+ ADD R5, R3
+ STWCCC R3, (R4)
+ BNE -4(PC)
+ SYNC
+ ISYNC
+ MOVW R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·xadd64(SB), NOSPLIT, $0-24
+ MOVD p+0(FP), R4
+ MOVD delta+8(FP), R5
+ SYNC
+ LDAR (R4), R3
+ ADD R5, R3
+ STDCCC R3, (R4)
+ BNE -4(PC)
+ SYNC
+ ISYNC
+ MOVD R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·xchg(SB), NOSPLIT, $0-20
+ MOVD p+0(FP), R4
+ MOVW new+8(FP), R5
+ SYNC
+ LWAR (R4), R3
+ STWCCC R5, (R4)
+ BNE -3(PC)
+ SYNC
+ ISYNC
+ MOVW R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·xchg64(SB), NOSPLIT, $0-24
+ MOVD p+0(FP), R4
+ MOVD new+8(FP), R5
+ SYNC
+ LDAR (R4), R3
+ STDCCC R5, (R4)
+ BNE -3(PC)
+ SYNC
+ ISYNC
+ MOVD R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·xchgp(SB), NOSPLIT, $0-24
+ BR runtime·xchg64(SB)
+
+TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
+ BR runtime·xchg64(SB)
+
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ RETURN
+
+TEXT runtime·atomicstorep(SB), NOSPLIT, $0-16
+ BR runtime·atomicstore64(SB)
+
+TEXT runtime·atomicstore(SB), NOSPLIT, $0-12
+ MOVD ptr+0(FP), R3
+ MOVW val+8(FP), R4
+ SYNC
+ MOVW R4, 0(R3)
+ RETURN
+
+TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R3
+ MOVD val+8(FP), R4
+ SYNC
+ MOVD R4, 0(R3)
+ RETURN
+
+// void runtime·atomicor8(byte volatile*, byte);
+TEXT runtime·atomicor8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVBZ val+8(FP), R4
+ // Align ptr down to 4 bytes so we can use 32-bit load/store.
+ // R5 = (R3 << 0) & ~3
+ RLDCR $0, R3, $~3, R5
+ // Compute val shift.
+#ifdef GOARCH_power64
+ // Big endian. ptr = ptr ^ 3
+ XOR $3, R3
+#endif
+ // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
+ RLDC $3, R3, $(3*8), R6
+ // Shift val for aligned ptr. R4 = val << R6
+ SLD R6, R4, R4
+
+atomicor8_again:
+ SYNC
+ LWAR (R5), R6
+ OR R4, R6
+ STWCCC R6, (R5)
+ BNE atomicor8_again
+ SYNC
+ ISYNC
+ RETURN
+
+// void jmpdefer(fv, sp);
+// called from deferreturn.
+// 1. grab stored LR for caller
+// 2. sub 4 bytes to get back to BL deferreturn
+// 3. BR to fn
+TEXT runtime·jmpdefer(SB), NOSPLIT, $-8-16
+ MOVD 0(R1), R31
+ SUB $4, R31
+ MOVD R31, LR
+
+ MOVD fv+0(FP), R11
+ MOVD argp+8(FP), R1
+ SUB $8, R1
+ MOVD 0(R11), R3
+ MOVD R3, CTR
+ BR (CTR)
+
+// Save state of caller into g->sched. Smashes R31.
+TEXT gosave<>(SB),NOSPLIT,$-8
+ MOVD LR, R31
+ MOVD R31, (g_sched+gobuf_pc)(g)
+ MOVD R1, (g_sched+gobuf_sp)(g)
+ MOVD R0, (g_sched+gobuf_lr)(g)
+ MOVD R0, (g_sched+gobuf_ret)(g)
+ MOVD R0, (g_sched+gobuf_ctxt)(g)
+ RETURN
+
+// asmcgocall(void(*fn)(void*), void *arg)
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.c for more details.
+TEXT ·asmcgocall(SB),NOSPLIT,$0-16
+ MOVD R0, 21(R0)
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// Turn the fn into a Go func (by taking its address) and call
+// cgocallback_gofunc.
+TEXT runtime·cgocallback(SB),NOSPLIT,$24-24
+ MOVD R0, 22(R0)
+
+// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
+// See cgocall.c for more details.
+TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-24
+ MOVD R0, 23(R0)
+
+// void setg(G*); set g. for use by needm.
+TEXT runtime·setg(SB), NOSPLIT, $0-8
+ MOVD R0, 24(R0)
+
+// void setg_gcc(G*); set g called from gcc.
+TEXT setg_gcc<>(SB),NOSPLIT,$0
+ MOVD R0, 25(R0)
+
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-8-16
+ MOVD 0(R1), R3
+ MOVD R3, ret+8(FP)
+ RETURN
+
+TEXT runtime·gogetcallerpc(SB),NOSPLIT,$-8-16
+ MOVD 0(R1), R3
+ MOVD R3,ret+8(FP)
+ RETURN
+
+TEXT runtime·setcallerpc(SB),NOSPLIT,$-8-16
+ MOVD pc+8(FP), R3
+ MOVD R3, 0(R1) // set calling pc
+ RETURN
+
+TEXT runtime·getcallersp(SB),NOSPLIT,$0-16
+ MOVD sp+0(FP), R3
+ SUB $8, R3
+ MOVD R3, ret+8(FP)
+ RETURN
+
+// func gogetcallersp(p unsafe.Pointer) uintptr
+TEXT runtime·gogetcallersp(SB),NOSPLIT,$0-16
+ MOVD sp+0(FP), R3
+ SUB $8, R3
+ MOVD R3,ret+8(FP)
+ RETURN
+
+TEXT runtime·abort(SB),NOSPLIT,$-8-0
+ MOVW (R0), R0
+ UNDEF
+
+#define TBRL 268
+#define TBRU 269 /* Time base Upper/Lower */
+
+// int64 runtime·cputicks(void)
+TEXT runtime·cputicks(SB),NOSPLIT,$0-8
+ MOVW SPR(TBRU), R4
+ MOVW SPR(TBRL), R3
+ MOVW SPR(TBRU), R5
+ CMPW R4, R5
+ BNE -4(PC)
+ SLD $32, R5
+ OR R5, R3
+ MOVD R3, ret+0(FP)
+ RETURN
+
+// AES hashing not implemented for Power
+TEXT runtime·aeshash(SB),NOSPLIT,$-8-0
+ MOVW (R0), R1
+TEXT runtime·aeshash32(SB),NOSPLIT,$-8-0
+ MOVW (R0), R1
+TEXT runtime·aeshash64(SB),NOSPLIT,$-8-0
+ MOVW (R0), R1
+TEXT runtime·aeshashstr(SB),NOSPLIT,$-8-0
+ MOVW (R0), R1
+
+TEXT runtime·memeq(SB),NOSPLIT,$-8-25
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R4
+ MOVD count+16(FP), R5
+ SUB $1, R3
+ SUB $1, R4
+ ADD R3, R5, R8
+loop:
+ CMP R3, R8
+ BNE 4(PC)
+ MOVD $1, R3
+ MOVB R3, ret+24(FP)
+ RETURN
+ MOVBZU 1(R3), R6
+ MOVBZU 1(R4), R7
+ CMP R6, R7
+ BEQ loop
+
+ MOVB R0, ret+24(FP)
+ RETURN
+
+// eqstring tests whether two strings are equal.
+// See runtime_test.go:eqstring_generic for
+// equivalent Go code.
+TEXT runtime·eqstring(SB),NOSPLIT,$0-33
+ MOVD s1len+8(FP), R4
+ MOVD s2len+24(FP), R5
+ CMP R4, R5
+ BNE noteq
+
+ MOVD s1str+0(FP), R3
+ MOVD s2str+16(FP), R4
+ SUB $1, R3
+ SUB $1, R4
+ ADD R3, R5, R8
+loop:
+ CMP R3, R8
+ BNE 4(PC)
+ MOVD $1, R3
+ MOVB R3, ret+32(FP)
+ RETURN
+ MOVBZU 1(R3), R6
+ MOVBZU 1(R4), R7
+ CMP R6, R7
+ BEQ loop
+noteq:
+ MOVB R0, ret+32(FP)
+ RETURN
+
+// TODO: share code with memeq?
+TEXT bytes·Equal(SB),NOSPLIT,$0-49
+ MOVD a_len+8(FP), R3
+ MOVD b_len+32(FP), R4
+
+ CMP R3, R4 // unequal lengths are not equal
+ BNE noteq
+
+ MOVD a+0(FP), R5
+ MOVD b+24(FP), R6
+ SUB $1, R5
+ SUB $1, R6
+ ADD R5, R3 // end-1
+
+loop:
+ CMP R5, R3
+ BEQ equal // reached the end
+ MOVBZU 1(R5), R4
+ MOVBZU 1(R6), R7
+ CMP R4, R7
+ BEQ loop
+
+noteq:
+ MOVBZ R0, ret+48(FP)
+ RETURN
+
+equal:
+ MOVD $1, R3
+ MOVBZ R3, ret+48(FP)
+ RETURN
+
+TEXT bytes·IndexByte(SB),NOSPLIT,$0-40
+ MOVD s+0(FP), R3
+ MOVD s_len+8(FP), R4
+ MOVBZ c+24(FP), R5 // byte to find
+ MOVD R3, R6 // store base for later
+ SUB $1, R3
+ ADD R3, R4 // end-1
+
+loop:
+ CMP R3, R4
+ BEQ notfound
+ MOVBZU 1(R3), R7
+ CMP R7, R5
+ BNE loop
+
+ SUB R6, R3 // remove base
+ MOVD R3, ret+32(FP)
+ RETURN
+
+notfound:
+ MOVD $-1, R3
+ MOVD R3, ret+32(FP)
+ RETURN
+
+TEXT strings·IndexByte(SB),NOSPLIT,$0
+ MOVD p+0(FP), R3
+ MOVD b_len+8(FP), R4
+ MOVBZ c+16(FP), R5 // byte to find
+ MOVD R3, R6 // store base for later
+ SUB $1, R3
+ ADD R3, R4 // end-1
+
+loop:
+ CMP R3, R4
+ BEQ notfound
+ MOVBZU 1(R3), R7
+ CMP R7, R5
+ BNE loop
+
+ SUB R6, R3 // remove base
+ MOVD R3, ret+24(FP)
+ RETURN
+
+notfound:
+ MOVD $-1, R3
+ MOVD R3, ret+24(FP)
+ RETURN
+
+
+// A Duff's device for zeroing memory.
+// The compiler jumps to computed addresses within
+// this routine to zero chunks of memory. Do not
+// change this code without also changing the code
+// in ../../cmd/9g/ggen.c:/^clearfat.
+// R0: always zero
+// R3 (aka REGRT1): ptr to memory to be zeroed - 8
+// R3 is updated as a side effect.
+TEXT runtime·duffzero(SB), NOSPLIT, $-8-0
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ MOVDU R0, 8(R3)
+ RETURN
+
+TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
+ MOVD g_m(g), R4
+ MOVWZ m_fastrand(R4), R3
+ ADD R3, R3
+ CMP R3, $0
+ BGE 2(PC)
+ XOR $0x88888eef, R3
+ MOVW R3, m_fastrand(R4)
+ MOVW R3, ret+0(FP)
+ RETURN
+
+TEXT runtime·return0(SB), NOSPLIT, $0
+ MOVW $0, R3
+ RETURN
+
+// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
+// Must obey the gcc calling convention.
+TEXT _cgo_topofstack(SB),NOSPLIT,$0
+ MOVD R0, 26(R0)
diff --git a/src/runtime/atomic_power64x.s b/src/runtime/atomic_power64x.s
new file mode 100644
index 000000000..e72871761
--- /dev/null
+++ b/src/runtime/atomic_power64x.s
@@ -0,0 +1,40 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+
+#include "textflag.h"
+
+// uint32 runtime·atomicload(uint32 volatile* addr)
+TEXT ·atomicload(SB),NOSPLIT,$-8-12
+ MOVD 0(FP), R3
+ SYNC
+ MOVWZ 0(R3), R3
+ CMPW R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVW R3, ret+8(FP)
+ RETURN
+
+// uint64 runtime·atomicload64(uint64 volatile* addr)
+TEXT ·atomicload64(SB),NOSPLIT,$-8-16
+ MOVD 0(FP), R3
+ SYNC
+ MOVD 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVD R3, ret+8(FP)
+ RETURN
+
+// void *runtime·atomicloadp(void *volatile *addr)
+TEXT ·atomicloadp(SB),NOSPLIT,$-8-16
+ MOVD 0(FP), R3
+ SYNC
+ MOVD 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVD R3, ret+8(FP)
+ RETURN
diff --git a/src/runtime/debug/stubs.s b/src/runtime/debug/stubs.s
index d56274f2d..1e883b72c 100644
--- a/src/runtime/debug/stubs.s
+++ b/src/runtime/debug/stubs.s
@@ -7,6 +7,12 @@
#ifdef GOARCH_arm
#define JMP B
#endif
+#ifdef GOARCH_power64
+#define JMP BR
+#endif
+#ifdef GOARCH_power64le
+#define JMP BR
+#endif
TEXT ·setMaxStack(SB),NOSPLIT,$0-0
JMP runtime·setMaxStack(SB)
diff --git a/src/runtime/defs1_linux.go b/src/runtime/defs1_linux.go
index 392cc4ab5..87c6e02a4 100644
--- a/src/runtime/defs1_linux.go
+++ b/src/runtime/defs1_linux.go
@@ -15,12 +15,14 @@ package runtime
/*
#include <ucontext.h>
#include <fcntl.h>
+#include <asm/signal.h>
*/
import "C"
const (
- O_RDONLY = C.O_RDONLY
- O_CLOEXEC = C.O_CLOEXEC
+ O_RDONLY = C.O_RDONLY
+ O_CLOEXEC = C.O_CLOEXEC
+ SA_RESTORER = C.SA_RESTORER
)
type Usigset C.__sigset_t
diff --git a/src/runtime/defs3_linux.go b/src/runtime/defs3_linux.go
new file mode 100644
index 000000000..3551a4fa9
--- /dev/null
+++ b/src/runtime/defs3_linux.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -cdefs
+
+GOARCH=power64 cgo -cdefs defs_linux.go defs3_linux.go > defs_linux_power64.h
+*/
+
+package runtime
+
+/*
+#define size_t __kernel_size_t
+#define sigset_t __sigset_t // rename the sigset_t here otherwise cgo will complain about "inconsistent definitions for C.sigset_t"
+#define _SYS_TYPES_H // avoid inclusion of sys/types.h
+#include <asm/ucontext.h>
+#include <asm-generic/fcntl.h>
+*/
+import "C"
+
+const (
+ O_RDONLY = C.O_RDONLY
+ O_CLOEXEC = C.O_CLOEXEC
+ SA_RESTORER = 0 // unused
+)
+
+type Usigset C.__sigset_t
+
+// types used in sigcontext
+type Ptregs C.struct_pt_regs
+type Gregset C.elf_gregset_t
+type FPregset C.elf_fpregset_t
+type Vreg C.elf_vrreg_t
+
+type SigaltstackT C.struct_sigaltstack
+
+// PPC64 uses sigcontext in place of mcontext in ucontext.
+// see http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/arch/powerpc/include/uapi/asm/ucontext.h
+type Sigcontext C.struct_sigcontext
+type Ucontext C.struct_ucontext
diff --git a/src/runtime/defs_linux.go b/src/runtime/defs_linux.go
index 8657dbb0e..553366a50 100644
--- a/src/runtime/defs_linux.go
+++ b/src/runtime/defs_linux.go
@@ -20,6 +20,7 @@ package runtime
// headers for things like ucontext_t, so that happens in
// a separate file, defs1.go.
+#define _SYS_TYPES_H // avoid inclusion of sys/types.h
#include <asm/posix_types.h>
#define size_t __kernel_size_t
#include <asm/signal.h>
@@ -28,7 +29,7 @@ package runtime
#include <asm-generic/errno.h>
#include <asm-generic/poll.h>
#include <linux/eventpoll.h>
-#undef size_t
+#include <linux/time.h>
*/
import "C"
@@ -48,10 +49,9 @@ const (
MADV_DONTNEED = C.MADV_DONTNEED
- SA_RESTART = C.SA_RESTART
- SA_ONSTACK = C.SA_ONSTACK
- SA_RESTORER = C.SA_RESTORER
- SA_SIGINFO = C.SA_SIGINFO
+ SA_RESTART = C.SA_RESTART
+ SA_ONSTACK = C.SA_ONSTACK
+ SA_SIGINFO = C.SA_SIGINFO
SIGHUP = C.SIGHUP
SIGINT = C.SIGINT
@@ -116,6 +116,7 @@ const (
EPOLL_CTL_MOD = C.EPOLL_CTL_MOD
)
+type Sigset C.sigset_t
type Timespec C.struct_timespec
type Timeval C.struct_timeval
type Sigaction C.struct_sigaction
diff --git a/src/runtime/defs_linux_power64.h b/src/runtime/defs_linux_power64.h
new file mode 100644
index 000000000..93742fa34
--- /dev/null
+++ b/src/runtime/defs_linux_power64.h
@@ -0,0 +1,204 @@
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_linux.go defs3_linux.go
+
+
+enum {
+ EINTR = 0x4,
+ EAGAIN = 0xb,
+ ENOMEM = 0xc,
+
+ PROT_NONE = 0x0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+
+ MAP_ANON = 0x20,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+
+ MADV_DONTNEED = 0x4,
+
+ SA_RESTART = 0x10000000,
+ SA_ONSTACK = 0x8000000,
+ SA_SIGINFO = 0x4,
+
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGBUS = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGUSR1 = 0xa,
+ SIGSEGV = 0xb,
+ SIGUSR2 = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGSTKFLT = 0x10,
+ SIGCHLD = 0x11,
+ SIGCONT = 0x12,
+ SIGSTOP = 0x13,
+ SIGTSTP = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGURG = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGIO = 0x1d,
+ SIGPWR = 0x1e,
+ SIGSYS = 0x1f,
+
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+
+ ITIMER_REAL = 0x0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+
+ EPOLLIN = 0x1,
+ EPOLLOUT = 0x4,
+ EPOLLERR = 0x8,
+ EPOLLHUP = 0x10,
+ EPOLLRDHUP = 0x2000,
+ EPOLLET = -0x80000000,
+ EPOLL_CLOEXEC = 0x80000,
+ EPOLL_CTL_ADD = 0x1,
+ EPOLL_CTL_DEL = 0x2,
+ EPOLL_CTL_MOD = 0x3,
+};
+
+typedef struct Sigset Sigset;
+typedef struct Timespec Timespec;
+typedef struct Timeval Timeval;
+typedef struct SigactionT SigactionT;
+typedef struct Siginfo Siginfo;
+typedef struct Itimerval Itimerval;
+typedef struct EpollEvent EpollEvent;
+
+#pragma pack on
+
+//struct Sigset {
+// uint64 sig[1];
+//};
+//typedef uint64 Sigset;
+
+struct Timespec {
+ int64 tv_sec;
+ int64 tv_nsec;
+};
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+struct SigactionT {
+ void *sa_handler;
+ uint64 sa_flags;
+ void *sa_restorer;
+ uint64 sa_mask;
+};
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ byte Pad_cgo_0[4];
+ byte _sifields[112];
+};
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+struct EpollEvent {
+ uint32 events;
+ byte Pad_cgo_0[4];
+ byte data[8]; // unaligned uintptr
+};
+
+
+#pragma pack off
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_linux.go defs3_linux.go
+
+
+enum {
+ O_RDONLY = 0x0,
+ O_CLOEXEC = 0x80000,
+ SA_RESTORER = 0,
+};
+
+typedef struct Ptregs Ptregs;
+typedef struct Vreg Vreg;
+typedef struct SigaltstackT SigaltstackT;
+typedef struct Sigcontext Sigcontext;
+typedef struct Ucontext Ucontext;
+
+#pragma pack on
+
+struct Ptregs {
+ uint64 gpr[32];
+ uint64 nip;
+ uint64 msr;
+ uint64 orig_gpr3;
+ uint64 ctr;
+ uint64 link;
+ uint64 xer;
+ uint64 ccr;
+ uint64 softe;
+ uint64 trap;
+ uint64 dar;
+ uint64 dsisr;
+ uint64 result;
+};
+typedef uint64 Gregset[48];
+typedef float64 FPregset[33];
+struct Vreg {
+ uint32 u[4];
+};
+
+struct SigaltstackT {
+ byte *ss_sp;
+ int32 ss_flags;
+ byte Pad_cgo_0[4];
+ uint64 ss_size;
+};
+
+struct Sigcontext {
+ uint64 _unused[4];
+ int32 signal;
+ int32 _pad0;
+ uint64 handler;
+ uint64 oldmask;
+ Ptregs *regs;
+ uint64 gp_regs[48];
+ float64 fp_regs[33];
+ Vreg *v_regs;
+ int64 vmx_reserve[101];
+};
+struct Ucontext {
+ uint64 uc_flags;
+ Ucontext *uc_link;
+ SigaltstackT uc_stack;
+ uint64 uc_sigmask;
+ uint64 __unused[15];
+ Sigcontext uc_mcontext;
+};
+
+
+#pragma pack off
diff --git a/src/runtime/defs_linux_power64le.h b/src/runtime/defs_linux_power64le.h
new file mode 100644
index 000000000..93742fa34
--- /dev/null
+++ b/src/runtime/defs_linux_power64le.h
@@ -0,0 +1,204 @@
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_linux.go defs3_linux.go
+
+
+enum {
+ EINTR = 0x4,
+ EAGAIN = 0xb,
+ ENOMEM = 0xc,
+
+ PROT_NONE = 0x0,
+ PROT_READ = 0x1,
+ PROT_WRITE = 0x2,
+ PROT_EXEC = 0x4,
+
+ MAP_ANON = 0x20,
+ MAP_PRIVATE = 0x2,
+ MAP_FIXED = 0x10,
+
+ MADV_DONTNEED = 0x4,
+
+ SA_RESTART = 0x10000000,
+ SA_ONSTACK = 0x8000000,
+ SA_SIGINFO = 0x4,
+
+ SIGHUP = 0x1,
+ SIGINT = 0x2,
+ SIGQUIT = 0x3,
+ SIGILL = 0x4,
+ SIGTRAP = 0x5,
+ SIGABRT = 0x6,
+ SIGBUS = 0x7,
+ SIGFPE = 0x8,
+ SIGKILL = 0x9,
+ SIGUSR1 = 0xa,
+ SIGSEGV = 0xb,
+ SIGUSR2 = 0xc,
+ SIGPIPE = 0xd,
+ SIGALRM = 0xe,
+ SIGSTKFLT = 0x10,
+ SIGCHLD = 0x11,
+ SIGCONT = 0x12,
+ SIGSTOP = 0x13,
+ SIGTSTP = 0x14,
+ SIGTTIN = 0x15,
+ SIGTTOU = 0x16,
+ SIGURG = 0x17,
+ SIGXCPU = 0x18,
+ SIGXFSZ = 0x19,
+ SIGVTALRM = 0x1a,
+ SIGPROF = 0x1b,
+ SIGWINCH = 0x1c,
+ SIGIO = 0x1d,
+ SIGPWR = 0x1e,
+ SIGSYS = 0x1f,
+
+ FPE_INTDIV = 0x1,
+ FPE_INTOVF = 0x2,
+ FPE_FLTDIV = 0x3,
+ FPE_FLTOVF = 0x4,
+ FPE_FLTUND = 0x5,
+ FPE_FLTRES = 0x6,
+ FPE_FLTINV = 0x7,
+ FPE_FLTSUB = 0x8,
+
+ BUS_ADRALN = 0x1,
+ BUS_ADRERR = 0x2,
+ BUS_OBJERR = 0x3,
+
+ SEGV_MAPERR = 0x1,
+ SEGV_ACCERR = 0x2,
+
+ ITIMER_REAL = 0x0,
+ ITIMER_VIRTUAL = 0x1,
+ ITIMER_PROF = 0x2,
+
+ EPOLLIN = 0x1,
+ EPOLLOUT = 0x4,
+ EPOLLERR = 0x8,
+ EPOLLHUP = 0x10,
+ EPOLLRDHUP = 0x2000,
+ EPOLLET = -0x80000000,
+ EPOLL_CLOEXEC = 0x80000,
+ EPOLL_CTL_ADD = 0x1,
+ EPOLL_CTL_DEL = 0x2,
+ EPOLL_CTL_MOD = 0x3,
+};
+
+typedef struct Sigset Sigset;
+typedef struct Timespec Timespec;
+typedef struct Timeval Timeval;
+typedef struct SigactionT SigactionT;
+typedef struct Siginfo Siginfo;
+typedef struct Itimerval Itimerval;
+typedef struct EpollEvent EpollEvent;
+
+#pragma pack on
+
+//struct Sigset {
+// uint64 sig[1];
+//};
+//typedef uint64 Sigset;
+
+struct Timespec {
+ int64 tv_sec;
+ int64 tv_nsec;
+};
+struct Timeval {
+ int64 tv_sec;
+ int64 tv_usec;
+};
+struct SigactionT {
+ void *sa_handler;
+ uint64 sa_flags;
+ void *sa_restorer;
+ uint64 sa_mask;
+};
+struct Siginfo {
+ int32 si_signo;
+ int32 si_errno;
+ int32 si_code;
+ byte Pad_cgo_0[4];
+ byte _sifields[112];
+};
+struct Itimerval {
+ Timeval it_interval;
+ Timeval it_value;
+};
+struct EpollEvent {
+ uint32 events;
+ byte Pad_cgo_0[4];
+ byte data[8]; // unaligned uintptr
+};
+
+
+#pragma pack off
+// Created by cgo -cdefs - DO NOT EDIT
+// cgo -cdefs defs_linux.go defs3_linux.go
+
+
+enum {
+ O_RDONLY = 0x0,
+ O_CLOEXEC = 0x80000,
+ SA_RESTORER = 0,
+};
+
+typedef struct Ptregs Ptregs;
+typedef struct Vreg Vreg;
+typedef struct SigaltstackT SigaltstackT;
+typedef struct Sigcontext Sigcontext;
+typedef struct Ucontext Ucontext;
+
+#pragma pack on
+
+struct Ptregs {
+ uint64 gpr[32];
+ uint64 nip;
+ uint64 msr;
+ uint64 orig_gpr3;
+ uint64 ctr;
+ uint64 link;
+ uint64 xer;
+ uint64 ccr;
+ uint64 softe;
+ uint64 trap;
+ uint64 dar;
+ uint64 dsisr;
+ uint64 result;
+};
+typedef uint64 Gregset[48];
+typedef float64 FPregset[33];
+struct Vreg {
+ uint32 u[4];
+};
+
+struct SigaltstackT {
+ byte *ss_sp;
+ int32 ss_flags;
+ byte Pad_cgo_0[4];
+ uint64 ss_size;
+};
+
+struct Sigcontext {
+ uint64 _unused[4];
+ int32 signal;
+ int32 _pad0;
+ uint64 handler;
+ uint64 oldmask;
+ Ptregs *regs;
+ uint64 gp_regs[48];
+ float64 fp_regs[33];
+ Vreg *v_regs;
+ int64 vmx_reserve[101];
+};
+struct Ucontext {
+ uint64 uc_flags;
+ Ucontext *uc_link;
+ SigaltstackT uc_stack;
+ uint64 uc_sigmask;
+ uint64 __unused[15];
+ Sigcontext uc_mcontext;
+};
+
+
+#pragma pack off
diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go
index e74d8c2c0..1a33f3b3b 100644
--- a/src/runtime/gcinfo_test.go
+++ b/src/runtime/gcinfo_test.go
@@ -153,6 +153,12 @@ func infoBigStruct() []byte {
BitsScalar, BitsScalar, BitsDead, BitsScalar, BitsScalar, // t int; y uint16; u uint64
BitsPointer, BitsDead, // i string
}
+ case "power64", "power64le":
+ return []byte{
+ BitsPointer, BitsScalar, BitsScalar, BitsScalar,
+ BitsMultiWord, BitsSlice, BitsScalar, BitsScalar,
+ BitsScalar, BitsScalar, BitsMultiWord, BitsString,
+ }
default:
panic("unknown arch")
}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c56e03886..020f87a7a 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -4,9 +4,7 @@
package runtime
-import (
- "unsafe"
-)
+import "unsafe"
const (
debugMalloc = false
@@ -261,8 +259,10 @@ func mallocgc(size uintptr, typ *_type, flags int) unsafe.Pointer {
goto marked
}
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
- // Check whether the program is already unrolled.
- if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
+ // Check whether the program is already unrolled
+ // by checking if the unroll flag byte is set
+ maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
+ if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
mp := acquirem()
mp.ptrarg[0] = unsafe.Pointer(typ)
onM(unrollgcprog_m)
diff --git a/src/runtime/mem_linux.c b/src/runtime/mem_linux.c
index bfb405607..52e02b34e 100644
--- a/src/runtime/mem_linux.c
+++ b/src/runtime/mem_linux.c
@@ -11,7 +11,7 @@
enum
{
- _PAGE_SIZE = 4096,
+ _PAGE_SIZE = PhysPageSize,
EACCES = 13,
};
@@ -36,8 +36,9 @@ addrspace_free(void *v, uintptr n)
errval = runtime·mincore((int8*)v + off, chunk, vec);
// ENOMEM means unmapped, which is what we want.
// Anything else we assume means the pages are mapped.
- if (errval != -ENOMEM)
+ if (errval != -ENOMEM && errval != ENOMEM) {
return 0;
+ }
}
return 1;
}
@@ -48,12 +49,15 @@ mmap_fixed(byte *v, uintptr n, int32 prot, int32 flags, int32 fd, uint32 offset)
void *p;
p = runtime·mmap(v, n, prot, flags, fd, offset);
- if(p != v && addrspace_free(v, n)) {
+ if(p != v) {
+ if(p > (void*)4096) {
+ runtime·munmap(p, n);
+ p = nil;
+ }
// On some systems, mmap ignores v without
// MAP_FIXED, so retry if the address space is free.
- if(p > (void*)4096)
- runtime·munmap(p, n);
- p = runtime·mmap(v, n, prot, flags|MAP_FIXED, fd, offset);
+ if(addrspace_free(v, n))
+ p = runtime·mmap(v, n, prot, flags|MAP_FIXED, fd, offset);
}
return p;
}
diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s
index 1520aea2e..3f20b69c8 100644
--- a/src/runtime/memclr_386.s
+++ b/src/runtime/memclr_386.s
@@ -15,31 +15,31 @@ TEXT runtime·memclr(SB), NOSPLIT, $0-8
XORL AX, AX
// MOVOU seems always faster than REP STOSL.
-clr_tail:
+tail:
TESTL BX, BX
- JEQ clr_0
+ JEQ _0
CMPL BX, $2
- JBE clr_1or2
+ JBE _1or2
CMPL BX, $4
- JBE clr_3or4
+ JBE _3or4
CMPL BX, $8
- JBE clr_5through8
+ JBE _5through8
CMPL BX, $16
- JBE clr_9through16
+ JBE _9through16
TESTL $0x4000000, runtime·cpuid_edx(SB) // check for sse2
JEQ nosse2
PXOR X0, X0
CMPL BX, $32
- JBE clr_17through32
+ JBE _17through32
CMPL BX, $64
- JBE clr_33through64
+ JBE _33through64
CMPL BX, $128
- JBE clr_65through128
+ JBE _65through128
CMPL BX, $256
- JBE clr_129through256
+ JBE _129through256
// TODO: use branch table and BSR to make this just a single dispatch
-clr_loop:
+loop:
MOVOU X0, 0(DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
@@ -59,40 +59,40 @@ clr_loop:
SUBL $256, BX
ADDL $256, DI
CMPL BX, $256
- JAE clr_loop
- JMP clr_tail
+ JAE loop
+ JMP tail
-clr_1or2:
+_1or2:
MOVB AX, (DI)
MOVB AX, -1(DI)(BX*1)
RET
-clr_0:
+_0:
RET
-clr_3or4:
+_3or4:
MOVW AX, (DI)
MOVW AX, -2(DI)(BX*1)
RET
-clr_5through8:
+_5through8:
MOVL AX, (DI)
MOVL AX, -4(DI)(BX*1)
RET
-clr_9through16:
+_9through16:
MOVL AX, (DI)
MOVL AX, 4(DI)
MOVL AX, -8(DI)(BX*1)
MOVL AX, -4(DI)(BX*1)
RET
-clr_17through32:
+_17through32:
MOVOU X0, (DI)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_33through64:
+_33through64:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_65through128:
+_65through128:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
@@ -102,7 +102,7 @@ clr_65through128:
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_129through256:
+_129through256:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
@@ -126,5 +126,5 @@ nosse2:
REP
STOSL
ANDL $3, BX
- JNE clr_tail
+ JNE tail
RET
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index 94a2c7f23..ec24f1db2 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -15,30 +15,30 @@ TEXT runtime·memclr(SB), NOSPLIT, $0-16
XORQ AX, AX
// MOVOU seems always faster than REP STOSQ.
-clr_tail:
+tail:
TESTQ BX, BX
- JEQ clr_0
+ JEQ _0
CMPQ BX, $2
- JBE clr_1or2
+ JBE _1or2
CMPQ BX, $4
- JBE clr_3or4
+ JBE _3or4
CMPQ BX, $8
- JBE clr_5through8
+ JBE _5through8
CMPQ BX, $16
- JBE clr_9through16
+ JBE _9through16
PXOR X0, X0
CMPQ BX, $32
- JBE clr_17through32
+ JBE _17through32
CMPQ BX, $64
- JBE clr_33through64
+ JBE _33through64
CMPQ BX, $128
- JBE clr_65through128
+ JBE _65through128
CMPQ BX, $256
- JBE clr_129through256
+ JBE _129through256
// TODO: use branch table and BSR to make this just a single dispatch
// TODO: for really big clears, use MOVNTDQ.
-clr_loop:
+loop:
MOVOU X0, 0(DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
@@ -58,38 +58,38 @@ clr_loop:
SUBQ $256, BX
ADDQ $256, DI
CMPQ BX, $256
- JAE clr_loop
- JMP clr_tail
+ JAE loop
+ JMP tail
-clr_1or2:
+_1or2:
MOVB AX, (DI)
MOVB AX, -1(DI)(BX*1)
RET
-clr_0:
+_0:
RET
-clr_3or4:
+_3or4:
MOVW AX, (DI)
MOVW AX, -2(DI)(BX*1)
RET
-clr_5through8:
+_5through8:
MOVL AX, (DI)
MOVL AX, -4(DI)(BX*1)
RET
-clr_9through16:
+_9through16:
MOVQ AX, (DI)
MOVQ AX, -8(DI)(BX*1)
RET
-clr_17through32:
+_17through32:
MOVOU X0, (DI)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_33through64:
+_33through64:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_65through128:
+_65through128:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
@@ -99,7 +99,7 @@ clr_65through128:
MOVOU X0, -32(DI)(BX*1)
MOVOU X0, -16(DI)(BX*1)
RET
-clr_129through256:
+_129through256:
MOVOU X0, (DI)
MOVOU X0, 16(DI)
MOVOU X0, 32(DI)
diff --git a/src/runtime/memclr_plan9_386.s b/src/runtime/memclr_plan9_386.s
index b4b671f77..50f327b4e 100644
--- a/src/runtime/memclr_plan9_386.s
+++ b/src/runtime/memclr_plan9_386.s
@@ -10,40 +10,40 @@ TEXT runtime·memclr(SB), NOSPLIT, $0-8
MOVL n+4(FP), BX
XORL AX, AX
-clr_tail:
+tail:
TESTL BX, BX
- JEQ clr_0
+ JEQ _0
CMPL BX, $2
- JBE clr_1or2
+ JBE _1or2
CMPL BX, $4
- JBE clr_3or4
+ JBE _3or4
CMPL BX, $8
- JBE clr_5through8
+ JBE _5through8
CMPL BX, $16
- JBE clr_9through16
+ JBE _9through16
MOVL BX, CX
SHRL $2, CX
REP
STOSL
ANDL $3, BX
- JNE clr_tail
+ JNE tail
RET
-clr_1or2:
+_1or2:
MOVB AX, (DI)
MOVB AX, -1(DI)(BX*1)
RET
-clr_0:
+_0:
RET
-clr_3or4:
+_3or4:
MOVW AX, (DI)
MOVW AX, -2(DI)(BX*1)
RET
-clr_5through8:
+_5through8:
MOVL AX, (DI)
MOVL AX, -4(DI)(BX*1)
RET
-clr_9through16:
+_9through16:
MOVL AX, (DI)
MOVL AX, 4(DI)
MOVL AX, -8(DI)(BX*1)
diff --git a/src/runtime/memclr_power64x.s b/src/runtime/memclr_power64x.s
new file mode 100644
index 000000000..dfad64b6f
--- /dev/null
+++ b/src/runtime/memclr_power64x.s
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+
+#include "textflag.h"
+
+// void runtime·memclr(void*, uintptr)
+TEXT runtime·memclr(SB),NOSPLIT,$0-16
+ MOVD ptr+0(FP), R3
+ MOVD n+8(FP), R4
+ CMP R4, $0
+ BEQ done
+ SUB $1, R3
+ MOVD R4, CTR
+ MOVBU R0, 1(R3)
+ BC 25, 0, -1(PC) // bdnz+ $-4
+done:
+ RETURN
diff --git a/src/runtime/memmove_power64x.s b/src/runtime/memmove_power64x.s
new file mode 100644
index 000000000..2b04d8319
--- /dev/null
+++ b/src/runtime/memmove_power64x.s
@@ -0,0 +1,40 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+
+#include "textflag.h"
+
+// void runtime·memmove(void*, void*, uintptr)
+TEXT runtime·memmove(SB), NOSPLIT, $-8-24
+ MOVD to+0(FP), R3
+ MOVD from+8(FP), R4
+ MOVD n+16(FP), R5
+ CMP R5, $0
+ BNE check
+ RETURN
+
+check:
+ CMP R3, R4
+ BGT backward
+
+ SUB $1, R3
+ ADD R3, R5
+ SUB $1, R4
+loop:
+ MOVBU 1(R4), R6
+ MOVBU R6, 1(R3)
+ CMP R3, R5
+ BNE loop
+ RETURN
+
+backward:
+ ADD R5, R4
+ ADD R3, R5
+loop1:
+ MOVBU -1(R4), R6
+ MOVBU R6, -1(R5)
+ CMP R3, R5
+ BNE loop1
+ RETURN
diff --git a/src/runtime/mgc0.c b/src/runtime/mgc0.c
index bcc5a2f39..f76d7c05c 100644
--- a/src/runtime/mgc0.c
+++ b/src/runtime/mgc0.c
@@ -2065,6 +2065,7 @@ runtime·unrollgcprog_m(void)
prog = (byte*)typ->gc[1];
unrollgcprog1(mask, prog, &pos, false, true);
}
+
// atomic way to say mask[0] = 1
x = *(uintptr*)mask;
((byte*)&x)[0] = 1;
diff --git a/src/runtime/noasm_arm.go b/src/runtime/noasm.go
index dd3ef8267..43c16860b 100644
--- a/src/runtime/noasm_arm.go
+++ b/src/runtime/noasm.go
@@ -5,6 +5,8 @@
// Routines that are implemented in assembly in asm_{amd64,386}.s
// but are implemented in Go for arm.
+// +build arm power64 power64le
+
package runtime
func cmpstring(s1, s2 string) int {
diff --git a/src/runtime/os_linux.c b/src/runtime/os_linux.c
index 0d8ffc995..9bd123d59 100644
--- a/src/runtime/os_linux.c
+++ b/src/runtime/os_linux.c
@@ -49,9 +49,22 @@ runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
runtime·futex(addr, FUTEX_WAIT, val, nil, nil, 0);
return;
}
- // NOTE: tv_nsec is int64 on amd64, so this assumes a little-endian system.
+
+ // It's difficult to live within the no-split stack limits here.
+ // On ARM and 386, a 64-bit divide invokes a general software routine
+ // that needs more stack than we can afford. So we use timediv instead.
+ // But on real 64-bit systems, where words are larger but the stack limit
+ // is not, even timediv is too heavy, and we really need to use just an
+ // ordinary machine instruction.
+ // Sorry for the #ifdef.
+ // For what it's worth, the #ifdef eliminated an implicit little-endian assumption.
+#ifdef _64BIT
+ ts.tv_sec = ns / 1000000000LL;
+ ts.tv_nsec = ns % 1000000000LL;
+#else
ts.tv_nsec = 0;
ts.tv_sec = runtime·timediv(ns, 1000000000LL, (int32*)&ts.tv_nsec);
+#endif
runtime·futex(addr, FUTEX_WAIT, val, &ts, nil, 0);
}
@@ -98,19 +111,22 @@ static int32
getproccount(void)
{
uintptr buf[16], t;
- int32 r, cnt, i;
+ int32 r, n, i;
- cnt = 0;
r = runtime·sched_getaffinity(0, sizeof(buf), buf);
- if(r > 0)
+ if(r <= 0)
+ return 1;
+ n = 0;
for(i = 0; i < r/sizeof(buf[0]); i++) {
t = buf[i];
- t = t - ((t >> 1) & 0x5555555555555555ULL);
- t = (t & 0x3333333333333333ULL) + ((t >> 2) & 0x3333333333333333ULL);
- cnt += (int32)((((t + (t >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+ while(t != 0) {
+ n += t&1;
+ t >>= 1;
+ }
}
-
- return cnt ? cnt : 1;
+ if(n < 1)
+ n = 1;
+ return n;
}
// Clone, the Linux rfork.
@@ -298,7 +314,8 @@ runtime·setsig(int32 i, GoSighandler *fn, bool restart)
if(fn == runtime·sighandler)
fn = (void*)runtime·sigtramp;
sa.sa_handler = fn;
- if(runtime·rt_sigaction(i, &sa, nil, sizeof(sa.sa_mask)) != 0)
+ // Qemu rejects rt_sigaction of SIGRTMAX (64).
+ if(runtime·rt_sigaction(i, &sa, nil, sizeof(sa.sa_mask)) != 0 && i != 64)
runtime·throw("rt_sigaction failure");
}
diff --git a/src/runtime/panic.c b/src/runtime/panic.c
index 24eb6dbfe..46683b2b0 100644
--- a/src/runtime/panic.c
+++ b/src/runtime/panic.c
@@ -70,7 +70,7 @@ runtime·recovery_m(G *gp)
// (The pc we're returning to does pop pop
// before it tests the return value.)
// On the arm there are 2 saved LRs mixed in too.
- if(thechar == '5')
+ if(thechar == '5' || thechar == '9')
gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr);
else
gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr);
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 685ff5ca0..91b5da294 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -61,7 +61,7 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
// we can only call nosplit routines.
argp := uintptr(unsafe.Pointer(&fn))
argp += unsafe.Sizeof(fn)
- if GOARCH == "arm" {
+ if GOARCH == "arm" || GOARCH == "power64" || GOARCH == "power64le" {
argp += ptrSize // skip caller's saved link register
}
mp := acquirem()
@@ -494,12 +494,12 @@ func throw(s *byte) {
//go:nosplit
func gothrow(s string) {
+ print("fatal error: ", s, "\n")
gp := getg()
if gp.m.throwing == 0 {
gp.m.throwing = 1
}
startpanic()
- print("fatal error: ", s, "\n")
dopanic(0)
*(*int)(nil) = 0 // not reached
}
diff --git a/src/runtime/proc.c b/src/runtime/proc.c
index ab6812329..9626bd101 100644
--- a/src/runtime/proc.c
+++ b/src/runtime/proc.c
@@ -2128,7 +2128,7 @@ runtime·newproc(int32 siz, FuncVal* fn, ...)
byte *argp;
void (*mfn)(void);
- if(thechar == '5')
+ if(thechar == '5' || thechar == '9')
argp = (byte*)(&fn+2); // skip caller's saved LR
else
argp = (byte*)(&fn+1);
@@ -2188,7 +2188,7 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
sp -= siz;
runtime·memmove(sp, argp, narg);
- if(thechar == '5') {
+ if(thechar == '5' || thechar == '9') {
// caller's LR
sp -= sizeof(void*);
*(void**)sp = nil;
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index bdea28c7c..15b18ff8f 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -140,20 +140,20 @@ TEXT racecalladdr<>(SB), NOSPLIT, $0-0
MOVQ g_racectx(R14), RARG0 // goroutine context
// Check that addr is within [arenastart, arenaend) or within [noptrdata, enoptrbss).
CMPQ RARG1, runtime·racearenastart(SB)
- JB racecalladdr_data
+ JB data
CMPQ RARG1, runtime·racearenaend(SB)
- JB racecalladdr_call
-racecalladdr_data:
+ JB call
+data:
MOVQ $runtime·noptrdata(SB), R13
CMPQ RARG1, R13
- JB racecalladdr_ret
+ JB ret
MOVQ $runtime·enoptrbss(SB), R13
CMPQ RARG1, R13
- JAE racecalladdr_ret
-racecalladdr_call:
+ JAE ret
+call:
MOVQ AX, AX // w/o this 6a miscompiles this function
JMP racecall<>(SB)
-racecalladdr_ret:
+ret:
RET
// func runtime·racefuncenter(pc uintptr)
@@ -335,9 +335,9 @@ TEXT racecall<>(SB), NOSPLIT, $0-0
MOVQ SP, R12 // callee-saved, preserved across the CALL
MOVQ m_g0(R13), R10
CMPQ R10, R14
- JE racecall_cont // already on g0
+ JE call // already on g0
MOVQ (g_sched+gobuf_sp)(R10), SP
-racecall_cont:
+call:
ANDQ $~15, SP // alignment for gcc ABI
CALL AX
MOVQ R12, SP
diff --git a/src/runtime/rt0_linux_power64.s b/src/runtime/rt0_linux_power64.s
new file mode 100644
index 000000000..970b6a673
--- /dev/null
+++ b/src/runtime/rt0_linux_power64.s
@@ -0,0 +1,17 @@
+#include "textflag.h"
+
+// actually a function descriptor for _main<>(SB)
+TEXT _rt0_power64_linux(SB),NOSPLIT,$0
+ DWORD $_main<>(SB)
+ DWORD $0
+ DWORD $0
+
+TEXT _main<>(SB),NOSPLIT,$-8
+ MOVD 0(R1), R3 // argc
+ ADD $8, R1, R4 // argv
+ BR main(SB)
+
+TEXT main(SB),NOSPLIT,$-8
+ MOVD $runtime·rt0_go(SB), R31
+ MOVD R31, CTR
+ BR (CTR)
diff --git a/src/runtime/rt0_linux_power64le.s b/src/runtime/rt0_linux_power64le.s
new file mode 100644
index 000000000..85ce84733
--- /dev/null
+++ b/src/runtime/rt0_linux_power64le.s
@@ -0,0 +1,14 @@
+#include "textflag.h"
+
+TEXT _rt0_power64le_linux(SB),NOSPLIT,$0
+ BR _main<>(SB)
+
+TEXT _main<>(SB),NOSPLIT,$-8
+ MOVD 0(R1), R3 // argc
+ ADD $8, R1, R4 // argv
+ BR main(SB)
+
+TEXT main(SB),NOSPLIT,$-8
+ MOVD $runtime·rt0_go(SB), R31
+ MOVD R31, CTR
+ BR (CTR)
diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c
index c823691ec..f19f8e4be 100644
--- a/src/runtime/runtime.c
+++ b/src/runtime/runtime.c
@@ -185,6 +185,7 @@ runtime·check(void)
float64 j, j1;
byte *k, *k1;
uint16* l;
+ byte m[4];
struct x1 {
byte x;
};
@@ -236,6 +237,11 @@ runtime·check(void)
if(k != k1)
runtime·throw("casp3");
+ m[0] = m[1] = m[2] = m[3] = 0x1;
+ runtime·atomicor8(&m[1], 0xf0);
+ if (m[0] != 0x1 || m[1] != 0xf1 || m[2] != 0x1 || m[3] != 0x1)
+ runtime·throw("atomicor8");
+
*(uint64*)&j = ~0ULL;
if(j == j)
runtime·throw("float64nan");
diff --git a/src/runtime/signal_linux_power64.h b/src/runtime/signal_linux_power64.h
new file mode 100644
index 000000000..840648920
--- /dev/null
+++ b/src/runtime/signal_linux_power64.h
@@ -0,0 +1,49 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_REGS(ctxt) (*((Sigcontext*)&((Ucontext*)(ctxt))->uc_mcontext)->regs)
+
+#define SIG_R0(info, ctxt) (SIG_REGS(ctxt).gpr[0])
+#define SIG_R1(info, ctxt) (SIG_REGS(ctxt).gpr[1])
+#define SIG_R2(info, ctxt) (SIG_REGS(ctxt).gpr[2])
+#define SIG_R3(info, ctxt) (SIG_REGS(ctxt).gpr[3])
+#define SIG_R4(info, ctxt) (SIG_REGS(ctxt).gpr[4])
+#define SIG_R5(info, ctxt) (SIG_REGS(ctxt).gpr[5])
+#define SIG_R6(info, ctxt) (SIG_REGS(ctxt).gpr[6])
+#define SIG_R7(info, ctxt) (SIG_REGS(ctxt).gpr[7])
+#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).gpr[8])
+#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).gpr[9])
+#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).gpr[10])
+#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).gpr[11])
+#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).gpr[12])
+#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).gpr[13])
+#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).gpr[14])
+#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).gpr[15])
+#define SIG_R16(info, ctxt) (SIG_REGS(ctxt).gpr[16])
+#define SIG_R17(info, ctxt) (SIG_REGS(ctxt).gpr[17])
+#define SIG_R18(info, ctxt) (SIG_REGS(ctxt).gpr[18])
+#define SIG_R19(info, ctxt) (SIG_REGS(ctxt).gpr[19])
+#define SIG_R20(info, ctxt) (SIG_REGS(ctxt).gpr[20])
+#define SIG_R21(info, ctxt) (SIG_REGS(ctxt).gpr[21])
+#define SIG_R22(info, ctxt) (SIG_REGS(ctxt).gpr[22])
+#define SIG_R23(info, ctxt) (SIG_REGS(ctxt).gpr[23])
+#define SIG_R24(info, ctxt) (SIG_REGS(ctxt).gpr[24])
+#define SIG_R25(info, ctxt) (SIG_REGS(ctxt).gpr[25])
+#define SIG_R26(info, ctxt) (SIG_REGS(ctxt).gpr[26])
+#define SIG_R27(info, ctxt) (SIG_REGS(ctxt).gpr[27])
+#define SIG_R28(info, ctxt) (SIG_REGS(ctxt).gpr[28])
+#define SIG_R29(info, ctxt) (SIG_REGS(ctxt).gpr[29])
+#define SIG_R30(info, ctxt) (SIG_REGS(ctxt).gpr[30])
+#define SIG_R31(info, ctxt) (SIG_REGS(ctxt).gpr[31])
+
+#define SIG_SP(info, ctxt) (SIG_REGS(ctxt).gpr[1])
+#define SIG_PC(info, ctxt) (SIG_REGS(ctxt).nip)
+#define SIG_TRAP(info, ctxt) (SIG_REGS(ctxt).trap)
+#define SIG_CTR(info, ctxt) (SIG_REGS(ctxt).ctr)
+#define SIG_LINK(info, ctxt) (SIG_REGS(ctxt).link)
+#define SIG_XER(info, ctxt) (SIG_REGS(ctxt).xer)
+#define SIG_CCR(info, ctxt) (SIG_REGS(ctxt).ccr)
+
+#define SIG_CODE0(info, ctxt) ((uintptr)(info)->si_code)
+#define SIG_FAULT(info, ctxt) (SIG_REGS(ctxt).dar)
diff --git a/src/runtime/signal_linux_power64le.h b/src/runtime/signal_linux_power64le.h
new file mode 100644
index 000000000..840648920
--- /dev/null
+++ b/src/runtime/signal_linux_power64le.h
@@ -0,0 +1,49 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define SIG_REGS(ctxt) (*((Sigcontext*)&((Ucontext*)(ctxt))->uc_mcontext)->regs)
+
+#define SIG_R0(info, ctxt) (SIG_REGS(ctxt).gpr[0])
+#define SIG_R1(info, ctxt) (SIG_REGS(ctxt).gpr[1])
+#define SIG_R2(info, ctxt) (SIG_REGS(ctxt).gpr[2])
+#define SIG_R3(info, ctxt) (SIG_REGS(ctxt).gpr[3])
+#define SIG_R4(info, ctxt) (SIG_REGS(ctxt).gpr[4])
+#define SIG_R5(info, ctxt) (SIG_REGS(ctxt).gpr[5])
+#define SIG_R6(info, ctxt) (SIG_REGS(ctxt).gpr[6])
+#define SIG_R7(info, ctxt) (SIG_REGS(ctxt).gpr[7])
+#define SIG_R8(info, ctxt) (SIG_REGS(ctxt).gpr[8])
+#define SIG_R9(info, ctxt) (SIG_REGS(ctxt).gpr[9])
+#define SIG_R10(info, ctxt) (SIG_REGS(ctxt).gpr[10])
+#define SIG_R11(info, ctxt) (SIG_REGS(ctxt).gpr[11])
+#define SIG_R12(info, ctxt) (SIG_REGS(ctxt).gpr[12])
+#define SIG_R13(info, ctxt) (SIG_REGS(ctxt).gpr[13])
+#define SIG_R14(info, ctxt) (SIG_REGS(ctxt).gpr[14])
+#define SIG_R15(info, ctxt) (SIG_REGS(ctxt).gpr[15])
+#define SIG_R16(info, ctxt) (SIG_REGS(ctxt).gpr[16])
+#define SIG_R17(info, ctxt) (SIG_REGS(ctxt).gpr[17])
+#define SIG_R18(info, ctxt) (SIG_REGS(ctxt).gpr[18])
+#define SIG_R19(info, ctxt) (SIG_REGS(ctxt).gpr[19])
+#define SIG_R20(info, ctxt) (SIG_REGS(ctxt).gpr[20])
+#define SIG_R21(info, ctxt) (SIG_REGS(ctxt).gpr[21])
+#define SIG_R22(info, ctxt) (SIG_REGS(ctxt).gpr[22])
+#define SIG_R23(info, ctxt) (SIG_REGS(ctxt).gpr[23])
+#define SIG_R24(info, ctxt) (SIG_REGS(ctxt).gpr[24])
+#define SIG_R25(info, ctxt) (SIG_REGS(ctxt).gpr[25])
+#define SIG_R26(info, ctxt) (SIG_REGS(ctxt).gpr[26])
+#define SIG_R27(info, ctxt) (SIG_REGS(ctxt).gpr[27])
+#define SIG_R28(info, ctxt) (SIG_REGS(ctxt).gpr[28])
+#define SIG_R29(info, ctxt) (SIG_REGS(ctxt).gpr[29])
+#define SIG_R30(info, ctxt) (SIG_REGS(ctxt).gpr[30])
+#define SIG_R31(info, ctxt) (SIG_REGS(ctxt).gpr[31])
+
+#define SIG_SP(info, ctxt) (SIG_REGS(ctxt).gpr[1])
+#define SIG_PC(info, ctxt) (SIG_REGS(ctxt).nip)
+#define SIG_TRAP(info, ctxt) (SIG_REGS(ctxt).trap)
+#define SIG_CTR(info, ctxt) (SIG_REGS(ctxt).ctr)
+#define SIG_LINK(info, ctxt) (SIG_REGS(ctxt).link)
+#define SIG_XER(info, ctxt) (SIG_REGS(ctxt).xer)
+#define SIG_CCR(info, ctxt) (SIG_REGS(ctxt).ccr)
+
+#define SIG_CODE0(info, ctxt) ((uintptr)(info)->si_code)
+#define SIG_FAULT(info, ctxt) (SIG_REGS(ctxt).dar)
diff --git a/src/runtime/signal_power64x.c b/src/runtime/signal_power64x.c
new file mode 100644
index 000000000..89c5c7848
--- /dev/null
+++ b/src/runtime/signal_power64x.c
@@ -0,0 +1,137 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build power64 power64le
+
+#include "runtime.h"
+#include "defs_GOOS_GOARCH.h"
+#include "os_GOOS.h"
+#include "signal_GOOS_GOARCH.h"
+#include "signals_GOOS.h"
+
+void
+runtime·dumpregs(Siginfo *info, void *ctxt)
+{
+ USED(info); USED(ctxt);
+ runtime·printf("r0 %X\t", SIG_R0(info, ctxt));
+ runtime·printf("r1 %X\n", SIG_R1(info, ctxt));
+ runtime·printf("r2 %X\t", SIG_R2(info, ctxt));
+ runtime·printf("r3 %X\n", SIG_R3(info, ctxt));
+ runtime·printf("r4 %X\t", SIG_R4(info, ctxt));
+ runtime·printf("r5 %X\n", SIG_R5(info, ctxt));
+ runtime·printf("r6 %X\t", SIG_R6(info, ctxt));
+ runtime·printf("r7 %X\n", SIG_R7(info, ctxt));
+ runtime·printf("r8 %X\t", SIG_R8(info, ctxt));
+ runtime·printf("r9 %X\n", SIG_R9(info, ctxt));
+ runtime·printf("r10 %X\t", SIG_R10(info, ctxt));
+ runtime·printf("r11 %X\n", SIG_R11(info, ctxt));
+ runtime·printf("r12 %X\t", SIG_R12(info, ctxt));
+ runtime·printf("r13 %X\n", SIG_R13(info, ctxt));
+ runtime·printf("r14 %X\t", SIG_R14(info, ctxt));
+ runtime·printf("r15 %X\n", SIG_R15(info, ctxt));
+ runtime·printf("r16 %X\t", SIG_R16(info, ctxt));
+ runtime·printf("r17 %X\n", SIG_R17(info, ctxt));
+ runtime·printf("r18 %X\t", SIG_R18(info, ctxt));
+ runtime·printf("r19 %X\n", SIG_R19(info, ctxt));
+ runtime·printf("r20 %X\t", SIG_R20(info, ctxt));
+ runtime·printf("r21 %X\n", SIG_R21(info, ctxt));
+ runtime·printf("r22 %X\t", SIG_R22(info, ctxt));
+ runtime·printf("r23 %X\n", SIG_R23(info, ctxt));
+ runtime·printf("r24 %X\t", SIG_R24(info, ctxt));
+ runtime·printf("r25 %X\n", SIG_R25(info, ctxt));
+ runtime·printf("r26 %X\t", SIG_R26(info, ctxt));
+ runtime·printf("r27 %X\n", SIG_R27(info, ctxt));
+ runtime·printf("r28 %X\t", SIG_R28(info, ctxt));
+ runtime·printf("r29 %X\n", SIG_R29(info, ctxt));
+ runtime·printf("r30 %X\t", SIG_R30(info, ctxt));
+ runtime·printf("r31 %X\n", SIG_R31(info, ctxt));
+ runtime·printf("pc %X\t", SIG_PC(info, ctxt));
+ runtime·printf("ctr %X\n", SIG_CTR(info, ctxt));
+ runtime·printf("link %X\t", SIG_LINK(info, ctxt));
+ runtime·printf("xer %X\n", SIG_XER(info, ctxt));
+ runtime·printf("ccr %X\t", SIG_CCR(info, ctxt));
+ runtime·printf("trap %X\n", SIG_TRAP(info, ctxt));
+}
+
+void
+runtime·sighandler(int32 sig, Siginfo *info, void *ctxt, G *gp)
+{
+ SigTab *t;
+ bool crash;
+
+ if(sig == SIGPROF) {
+ runtime·sigprof((uint8*)SIG_PC(info, ctxt), (uint8*)SIG_SP(info, ctxt), (uint8*)SIG_LINK(info, ctxt), gp, g->m);
+ return;
+ }
+ t = &runtime·sigtab[sig];
+ if(SIG_CODE0(info, ctxt) != SI_USER && (t->flags & SigPanic)) {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp->sig = sig;
+ gp->sigcode0 = SIG_CODE0(info, ctxt);
+ gp->sigcode1 = SIG_FAULT(info, ctxt);
+ gp->sigpc = SIG_PC(info, ctxt);
+
+ // We arrange link, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save LINK to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ SIG_SP(info, ctxt) -= sizeof(uintptr);
+ *(uintptr*)SIG_SP(info, ctxt) = SIG_LINK(info, ctxt);
+ // Don't bother saving PC if it's zero, which is
+ // probably a call to a nil func: the old link register
+ // is more useful in the stack trace.
+ if(gp->sigpc != 0)
+ SIG_LINK(info, ctxt) = gp->sigpc;
+ // In case we are panicking from external C code
+ SIG_R0(info, ctxt) = 0;
+ SIG_R30(info, ctxt) = (uintptr)gp;
+ SIG_PC(info, ctxt) = (uintptr)runtime·sigpanic;
+ return;
+ }
+
+ if(SIG_CODE0(info, ctxt) == SI_USER || (t->flags & SigNotify))
+ if(runtime·sigsend(sig))
+ return;
+ if(t->flags & SigKill)
+ runtime·exit(2);
+ if(!(t->flags & SigThrow))
+ return;
+
+ g->m->throwing = 1;
+ g->m->caughtsig = gp;
+ if(runtime·panicking) // traceback already printed
+ runtime·exit(2);
+ runtime·panicking = 1;
+
+ if(sig < 0 || sig >= NSIG)
+ runtime·printf("Signal %d\n", sig);
+ else
+ runtime·printf("%s\n", runtime·sigtab[sig].name);
+
+ runtime·printf("PC=%x\n", SIG_PC(info, ctxt));
+ if(g->m->lockedg != nil && g->m->ncgo > 0 && gp == g->m->g0) {
+ runtime·printf("signal arrived during cgo execution\n");
+ gp = g->m->lockedg;
+ }
+ runtime·printf("\n");
+
+ if(runtime·gotraceback(&crash)){
+ runtime·goroutineheader(gp);
+ runtime·traceback(SIG_PC(info, ctxt), SIG_SP(info, ctxt), SIG_LINK(info, ctxt), gp);
+ runtime·tracebackothers(gp);
+ runtime·printf("\n");
+ runtime·dumpregs(info, ctxt);
+ }
+
+ if(crash)
+ runtime·crash();
+
+ runtime·exit(2);
+}
diff --git a/src/runtime/string.go b/src/runtime/string.go
index 0809f89bc..882281605 100644
--- a/src/runtime/string.go
+++ b/src/runtime/string.go
@@ -39,22 +39,18 @@ func concatstrings(a []string) string {
return s
}
-//go:nosplit
func concatstring2(a [2]string) string {
return concatstrings(a[:])
}
-//go:nosplit
func concatstring3(a [3]string) string {
return concatstrings(a[:])
}
-//go:nosplit
func concatstring4(a [4]string) string {
return concatstrings(a[:])
}
-//go:nosplit
func concatstring5(a [5]string) string {
return concatstrings(a[:])
}
diff --git a/src/runtime/sys_darwin_386.s b/src/runtime/sys_darwin_386.s
index a961c71a8..3bf8b1d41 100644
--- a/src/runtime/sys_darwin_386.s
+++ b/src/runtime/sys_darwin_386.s
@@ -248,7 +248,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$40
MOVL BX, 0(SP)
MOVL $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVL DI, 20(SP)
@@ -275,7 +275,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$40
MOVL 20(SP), DI
MOVL DI, g(CX)
-sigtramp_ret:
+ret:
// call sigreturn
MOVL context+16(FP), CX
MOVL style+4(FP), BX
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index bd397d72a..8a8928e06 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -211,7 +211,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$64
MOVL DX, 0(SP)
MOVQ $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVQ R10, 48(SP)
@@ -233,7 +233,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$64
MOVQ 48(SP), R10
MOVQ R10, g(BX)
-sigtramp_ret:
+ret:
// call sigreturn
MOVL $(0x2000000+184), AX // sigreturn(ucontext, infostyle)
MOVQ 32(SP), DI // saved ucontext
diff --git a/src/runtime/sys_dragonfly_386.s b/src/runtime/sys_dragonfly_386.s
index 161eaec19..71ece9ecb 100644
--- a/src/runtime/sys_dragonfly_386.s
+++ b/src/runtime/sys_dragonfly_386.s
@@ -217,7 +217,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL BX, 0(SP)
MOVL $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVL DI, 20(SP)
@@ -243,7 +243,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL 20(SP), BX
MOVL BX, g(CX)
-sigtramp_ret:
+ret:
// call sigreturn
MOVL context+8(FP), AX
MOVL $0, 0(SP) // syscall gap
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index 2c40fc433..66d03c27d 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -197,7 +197,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL BX, 0(SP)
MOVL $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVL DI, 20(SP)
@@ -223,7 +223,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL 20(SP), BX
MOVL BX, g(CX)
-sigtramp_ret:
+ret:
// call sigreturn
MOVL context+8(FP), AX
MOVL $0, 0(SP) // syscall gap
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 33b91e872..d8d86ffad 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -115,7 +115,7 @@ TEXT time·now(SB),NOSPLIT,$16
// That leaves 104 for the gettime code to use. Hope that's enough!
MOVQ runtime·__vdso_clock_gettime_sym(SB), AX
CMPQ AX, $0
- JEQ fallback_gtod
+ JEQ fallback
MOVL $0, DI // CLOCK_REALTIME
LEAQ 0(SP), SI
CALL AX
@@ -124,7 +124,7 @@ TEXT time·now(SB),NOSPLIT,$16
MOVQ AX, sec+0(FP)
MOVL DX, nsec+8(FP)
RET
-fallback_gtod:
+fallback:
LEAQ 0(SP), DI
MOVQ $0, SI
MOVQ runtime·__vdso_gettimeofday_sym(SB), AX
@@ -141,7 +141,7 @@ TEXT runtime·nanotime(SB),NOSPLIT,$16
// See comment above in time.now.
MOVQ runtime·__vdso_clock_gettime_sym(SB), AX
CMPQ AX, $0
- JEQ fallback_gtod_nt
+ JEQ fallback
MOVL $1, DI // CLOCK_MONOTONIC
LEAQ 0(SP), SI
CALL AX
@@ -153,7 +153,7 @@ TEXT runtime·nanotime(SB),NOSPLIT,$16
ADDQ DX, AX
MOVQ AX, ret+0(FP)
RET
-fallback_gtod_nt:
+fallback:
LEAQ 0(SP), DI
MOVQ $0, SI
MOVQ runtime·__vdso_gettimeofday_sym(SB), AX
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index bd285f399..033a03642 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -373,20 +373,20 @@ TEXT cas<>(SB),NOSPLIT,$0
TEXT runtime·cas(SB),NOSPLIT,$0
MOVW ptr+0(FP), R2
MOVW old+4(FP), R0
-casagain:
+loop:
MOVW new+8(FP), R1
BL cas<>(SB)
- BCC cascheck
+ BCC check
MOVW $1, R0
MOVB R0, ret+12(FP)
RET
-cascheck:
+check:
// Kernel lies; double-check.
MOVW ptr+0(FP), R2
MOVW old+4(FP), R0
MOVW 0(R2), R3
CMP R0, R3
- BEQ casagain
+ BEQ loop
MOVW $0, R0
MOVB R0, ret+12(FP)
RET
diff --git a/src/runtime/sys_linux_power64x.s b/src/runtime/sys_linux_power64x.s
new file mode 100644
index 000000000..fb24d3e79
--- /dev/null
+++ b/src/runtime/sys_linux_power64x.s
@@ -0,0 +1,383 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build power64 power64le
+
+//
+// System calls and other sys.stuff for Power64, Linux
+//
+
+#include "zasm_GOOS_GOARCH.h"
+#include "textflag.h"
+
+#define SYS_exit 1
+#define SYS_read 3
+#define SYS_write 4
+#define SYS_open 5
+#define SYS_close 6
+#define SYS_fcntl 55
+#define SYS_gettimeofday 78
+#define SYS_select 82 // always return -ENOSYS
+#define SYS_mmap 90
+#define SYS_munmap 91
+#define SYS_setitimer 104
+#define SYS_clone 120
+#define SYS_newselect 142
+#define SYS_sched_yield 158
+#define SYS_rt_sigreturn 172
+#define SYS_rt_sigaction 173
+#define SYS_rt_sigprocmask 174
+#define SYS_sigaltstack 185
+#define SYS_ugetrlimit 190
+#define SYS_madvise 205
+#define SYS_mincore 206
+#define SYS_gettid 207
+#define SYS_tkill 208
+#define SYS_futex 221
+#define SYS_sched_getaffinity 223
+#define SYS_exit_group 234
+#define SYS_epoll_create 236
+#define SYS_epoll_ctl 237
+#define SYS_epoll_wait 238
+#define SYS_clock_gettime 246
+#define SYS_epoll_create1 315
+
+TEXT runtime·exit(SB),NOSPLIT,$-8-4
+ MOVW code+0(FP), R3
+ SYSCALL $SYS_exit_group
+ RETURN
+
+TEXT runtime·exit1(SB),NOSPLIT,$-8-4
+ MOVW code+0(FP), R3
+ SYSCALL $SYS_exit
+ RETURN
+
+TEXT runtime·open(SB),NOSPLIT,$-8-20
+ MOVD name+0(FP), R3
+ MOVW mode+8(FP), R4
+ MOVW perm+12(FP), R5
+ SYSCALL $SYS_open
+ MOVW R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·close(SB),NOSPLIT,$-8-12
+ MOVW fd+0(FP), R3
+ SYSCALL $SYS_close
+ MOVW R3, ret+8(FP)
+ RETURN
+
+TEXT runtime·write(SB),NOSPLIT,$-8-28
+ MOVD fd+0(FP), R3
+ MOVD p+8(FP), R4
+ MOVW n+16(FP), R5
+ SYSCALL $SYS_write
+ MOVW R3, ret+24(FP)
+ RETURN
+
+TEXT runtime·read(SB),NOSPLIT,$-8-28
+ MOVW fd+0(FP), R3
+ MOVD p+8(FP), R4
+ MOVW n+16(FP), R5
+ SYSCALL $SYS_read
+ MOVW R3, ret+24(FP)
+ RETURN
+
+TEXT runtime·getrlimit(SB),NOSPLIT,$-8-20
+ MOVW kind+0(FP), R3
+ MOVD limit+8(FP), R4
+ SYSCALL $SYS_ugetrlimit
+ MOVW R3, ret+16(FP)
+ RETURN
+
+TEXT runtime·usleep(SB),NOSPLIT,$16-4
+ MOVW usec+0(FP), R3
+ MOVD R3, R5
+ MOVW $1000000, R4
+ DIVD R4, R3
+ MOVD R3, 8(R1)
+ MULLD R3, R4
+ SUB R4, R5
+ MOVD R5, 16(R1)
+
+ // select(0, 0, 0, 0, &tv)
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ MOVW $0, R6
+ ADD $8, R1, R7
+ SYSCALL $SYS_newselect
+ RETURN
+
+TEXT runtime·raise(SB),NOSPLIT,$-8
+ SYSCALL $SYS_gettid
+ MOVW R3, R3 // arg 1 tid
+ MOVW sig+0(FP), R4 // arg 2
+ SYSCALL $SYS_tkill
+ RETURN
+
+TEXT runtime·setitimer(SB),NOSPLIT,$-8-24
+ MOVW mode+0(FP), R3
+ MOVD new+8(FP), R4
+ MOVD old+16(FP), R5
+ SYSCALL $SYS_setitimer
+ RETURN
+
+TEXT runtime·mincore(SB),NOSPLIT,$-8-28
+ MOVD addr+0(FP), R3
+ MOVD n+8(FP), R4
+ MOVD dst+16(FP), R5
+ SYSCALL $SYS_mincore
+ MOVW R3, ret+24(FP)
+ RETURN
+
+// func now() (sec int64, nsec int32)
+TEXT time·now(SB),NOSPLIT,$16
+ MOVD $0(R1), R3
+ MOVD $0, R4
+ SYSCALL $SYS_gettimeofday
+ MOVD 0(R1), R3 // sec
+ MOVD 8(R1), R5 // usec
+ MOVD $1000, R4
+ MULLD R4, R5
+ MOVD R3, sec+0(FP)
+ MOVW R5, nsec+8(FP)
+ RETURN
+
+TEXT runtime·nanotime(SB),NOSPLIT,$16
+ MOVW $1, R3 // CLOCK_MONOTONIC
+ MOVD $0(R1), R4
+ SYSCALL $SYS_clock_gettime
+ MOVD 0(R1), R3 // sec
+ MOVD 8(R1), R5 // nsec
+ // sec is in R3, nsec in R5
+ // return nsec in R3
+ MOVD $1000000000, R4
+ MULLD R4, R3
+ ADD R5, R3
+ MOVD R3, ret+0(FP)
+ RETURN
+
+TEXT runtime·rtsigprocmask(SB),NOSPLIT,$-8-28
+ MOVW sig+0(FP), R3
+ MOVD new+8(FP), R4
+ MOVD old+16(FP), R5
+ MOVW size+24(FP), R6
+ SYSCALL $SYS_rt_sigprocmask
+ BVC 2(PC)
+ MOVD R0, 0xf1(R0) // crash
+ RETURN
+
+TEXT runtime·rt_sigaction(SB),NOSPLIT,$-8-36
+ MOVD sig+0(FP), R3
+ MOVD new+8(FP), R4
+ MOVD old+16(FP), R5
+ MOVD size+24(FP), R6
+ SYSCALL $SYS_rt_sigaction
+ MOVW R3, ret+32(FP)
+ RETURN
+
+#ifdef GOARCH_power64le
+// power64le doesn't need function descriptors
+TEXT runtime·sigtramp(SB),NOSPLIT,$64
+#else
+// function descriptor for the real sigtramp
+TEXT runtime·sigtramp(SB),NOSPLIT,$-8
+ DWORD $runtime·_sigtramp(SB)
+ DWORD $0
+ DWORD $0
+TEXT runtime·_sigtramp(SB),NOSPLIT,$64
+#endif
+ // initialize essential registers (just in case)
+ BL runtime·reginit(SB)
+
+ // check that g exists
+ CMP g, $0
+ BNE 6(PC)
+ MOVD R3, 8(R1)
+ MOVD $runtime·badsignal(SB), R31
+ MOVD R31, CTR
+ BL (CTR)
+ RETURN
+
+ // save g
+ MOVD g, 40(R1)
+ MOVD g, R6
+
+ // g = m->gsignal
+ MOVD g_m(g), R7
+ MOVD m_gsignal(R7), g
+
+ MOVW R3, 8(R1)
+ MOVD R4, 16(R1)
+ MOVD R5, 24(R1)
+ MOVD R6, 32(R1)
+
+ BL runtime·sighandler(SB)
+
+ // restore g
+ MOVD 40(R1), g
+
+ RETURN
+
+TEXT runtime·mmap(SB),NOSPLIT,$-8
+ MOVD addr+0(FP), R3
+ MOVD n+8(FP), R4
+ MOVW prot+16(FP), R5
+ MOVW flags+20(FP), R6
+ MOVW fd+24(FP), R7
+ MOVW off+28(FP), R8
+
+ SYSCALL $SYS_mmap
+ MOVD R3, ret+32(FP)
+ RETURN
+
+TEXT runtime·munmap(SB),NOSPLIT,$-8
+ MOVD addr+0(FP), R3
+ MOVD n+8(FP), R4
+ SYSCALL $SYS_munmap
+ BVC 2(PC)
+ MOVD R0, 0xf3(R0)
+ RETURN
+
+TEXT runtime·madvise(SB),NOSPLIT,$-8
+ MOVD addr+0(FP), R3
+ MOVD n+8(FP), R4
+ MOVW flags+16(FP), R5
+ SYSCALL $SYS_madvise
+ // ignore failure - maybe pages are locked
+ RETURN
+
+// int64 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),NOSPLIT,$-8
+ MOVD addr+0(FP), R3
+ MOVW op+8(FP), R4
+ MOVW val+12(FP), R5
+ MOVD ts+16(FP), R6
+ MOVD addr2+24(FP), R7
+ MOVW val3+32(FP), R8
+ SYSCALL $SYS_futex
+ MOVW R3, ret+40(FP)
+ RETURN
+
+// int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
+TEXT runtime·clone(SB),NOSPLIT,$-8
+ MOVW flags+0(FP), R3
+ MOVD stk+8(FP), R4
+
+ // Copy mp, gp, fn off parent stack for use by child.
+ // Careful: Linux system call clobbers ???.
+ MOVD mm+16(FP), R7
+ MOVD gg+24(FP), R8
+ MOVD fn+32(FP), R12
+
+ MOVD R7, -8(R4)
+ MOVD R8, -16(R4)
+ MOVD R12, -24(R4)
+ MOVD $1234, R7
+ MOVD R7, -32(R4)
+
+ SYSCALL $SYS_clone
+
+ // In parent, return.
+ CMP R3, $0
+ BEQ 3(PC)
+ MOVW R3, ret+40(FP)
+ RETURN
+
+ // In child, on new stack.
+ // initialize essential registers
+ BL runtime·reginit(SB)
+ MOVD -32(R1), R7
+ CMP R7, $1234
+ BEQ 2(PC)
+ MOVD R0, 0(R0)
+
+ // Initialize m->procid to Linux tid
+ SYSCALL $SYS_gettid
+
+ MOVD -24(R1), R12
+ MOVD -16(R1), R8
+ MOVD -8(R1), R7
+
+ MOVD R3, m_procid(R7)
+
+ // TODO: setup TLS.
+
+ // In child, set up new stack
+ MOVD R7, g_m(R8)
+ MOVD R8, g
+ //CALL runtime·stackcheck(SB)
+
+ // Call fn
+ MOVD R12, CTR
+ BL (CTR)
+
+ // It shouldn't return. If it does, exit
+ MOVW $111, R3
+ SYSCALL $SYS_exit_group
+ BR -2(PC) // keep exiting
+
+TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
+ MOVD new+0(FP), R3
+ MOVD old+8(FP), R4
+ SYSCALL $SYS_sigaltstack
+ BVC 2(PC)
+ MOVD R0, 0xf1(R0) // crash
+ RETURN
+
+TEXT runtime·osyield(SB),NOSPLIT,$-8
+ SYSCALL $SYS_sched_yield
+ RETURN
+
+TEXT runtime·sched_getaffinity(SB),NOSPLIT,$-8
+ MOVD pid+0(FP), R3
+ MOVD len+8(FP), R4
+ MOVD buf+16(FP), R5
+ SYSCALL $SYS_sched_getaffinity
+ MOVW R3, ret+24(FP)
+ RETURN
+
+// int32 runtime·epollcreate(int32 size);
+TEXT runtime·epollcreate(SB),NOSPLIT,$-8
+ MOVW size+0(FP), R3
+ SYSCALL $SYS_epoll_create
+ MOVW R3, ret+8(FP)
+ RETURN
+
+// int32 runtime·epollcreate1(int32 flags);
+TEXT runtime·epollcreate1(SB),NOSPLIT,$-8
+ MOVW flags+0(FP), R3
+ SYSCALL $SYS_epoll_create1
+ MOVW R3, ret+8(FP)
+ RETURN
+
+// func epollctl(epfd, op, fd int32, ev *epollEvent) int
+TEXT runtime·epollctl(SB),NOSPLIT,$-8
+ MOVW epfd+0(FP), R3
+ MOVW op+4(FP), R4
+ MOVW fd+8(FP), R5
+ MOVD ev+16(FP), R6
+ SYSCALL $SYS_epoll_ctl
+ MOVW R3, ret+24(FP)
+ RETURN
+
+// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
+TEXT runtime·epollwait(SB),NOSPLIT,$-8
+ MOVW epfd+0(FP), R3
+ MOVD ev+8(FP), R4
+ MOVW nev+16(FP), R5
+ MOVW timeout+20(FP), R6
+ SYSCALL $SYS_epoll_wait
+ MOVW R3, ret+24(FP)
+ RETURN
+
+// void runtime·closeonexec(int32 fd);
+TEXT runtime·closeonexec(SB),NOSPLIT,$-8
+ MOVW fd+0(FP), R3 // fd
+ MOVD $2, R4 // F_SETFD
+ MOVD $1, R5 // FD_CLOEXEC
+ SYSCALL $SYS_fcntl
+ RETURN
diff --git a/src/runtime/sys_nacl_386.s b/src/runtime/sys_nacl_386.s
index 47985f31f..16cd721d9 100644
--- a/src/runtime/sys_nacl_386.s
+++ b/src/runtime/sys_nacl_386.s
@@ -293,7 +293,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0
MOVL $0, 0(SP)
MOVL $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVL DI, 20(SP)
@@ -317,7 +317,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0
MOVL 20(SP), BX
MOVL BX, g(CX)
-sigtramp_ret:
+ret:
// Enable exceptions again.
NACL_SYSCALL(SYS_exception_clear_flag)
diff --git a/src/runtime/sys_nacl_amd64p32.s b/src/runtime/sys_nacl_amd64p32.s
index 4eb4aacdd..9cfbef6ef 100644
--- a/src/runtime/sys_nacl_amd64p32.s
+++ b/src/runtime/sys_nacl_amd64p32.s
@@ -338,7 +338,6 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$80
MOVL 20(SP), BX
MOVL BX, g(CX)
-sigtramp_ret:
// Enable exceptions again.
NACL_SYSCALL(SYS_exception_clear_flag)
diff --git a/src/runtime/sys_nacl_arm.s b/src/runtime/sys_nacl_arm.s
index d354ab483..432deadf4 100644
--- a/src/runtime/sys_nacl_arm.s
+++ b/src/runtime/sys_nacl_arm.s
@@ -269,7 +269,6 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$80
// restore g
MOVW 20(R13), g
-sigtramp_ret:
// Enable exceptions again.
NACL_SYSCALL(SYS_exception_clear_flag)
diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s
index 5cda7768a..b1ae5ecee 100644
--- a/src/runtime/sys_openbsd_386.s
+++ b/src/runtime/sys_openbsd_386.s
@@ -186,7 +186,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL BX, 0(SP)
MOVL $runtime·badsignal(SB), AX
CALL AX
- JMP sigtramp_ret
+ JMP ret
// save g
MOVL DI, 20(SP)
@@ -212,7 +212,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$44
MOVL 20(SP), BX
MOVL BX, g(CX)
-sigtramp_ret:
+ret:
// call sigreturn
MOVL context+8(FP), AX
MOVL $0, 0(SP) // syscall gap
diff --git a/src/runtime/sys_power64x.c b/src/runtime/sys_power64x.c
new file mode 100644
index 000000000..79d976255
--- /dev/null
+++ b/src/runtime/sys_power64x.c
@@ -0,0 +1,38 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build power64 power64le
+
+#include "runtime.h"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+void
+runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
+{
+ if(gobuf->lr != 0)
+ runtime·throw("invalid use of gostartcall");
+ gobuf->lr = gobuf->pc;
+ gobuf->pc = (uintptr)fn;
+ gobuf->ctxt = ctxt;
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+void
+runtime·rewindmorestack(Gobuf *gobuf)
+{
+ uint32 inst;
+
+ inst = *(uint32*)gobuf->pc;
+ if((gobuf->pc&3) == 0 && (inst>>24) == 0x4b && (inst&3) == 0) {
+ //runtime·printf("runtime: rewind pc=%p to pc=%p\n", gobuf->pc, gobuf->pc + ((int32)(inst<<8)>>8));
+ gobuf->pc += (int32)(inst<<8)>>8;
+ return;
+ }
+ runtime·printf("runtime: pc=%p %x\n", gobuf->pc, inst);
+ runtime·throw("runtime: misuse of rewindmorestack");
+}
+
diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s
index 0ebdab6ee..3981893b0 100644
--- a/src/runtime/sys_solaris_amd64.s
+++ b/src/runtime/sys_solaris_amd64.s
@@ -287,24 +287,24 @@ TEXT runtime·usleep1(SB),NOSPLIT,$0
// Execute call on m->g0.
get_tls(R15)
CMPQ R15, $0
- JE usleep1_noswitch
+ JE noswitch
MOVQ g(R15), R13
CMPQ R13, $0
- JE usleep1_noswitch
+ JE noswitch
MOVQ g_m(R13), R13
CMPQ R13, $0
- JE usleep1_noswitch
+ JE noswitch
// TODO(aram): do something about the cpu profiler here.
MOVQ m_g0(R13), R14
CMPQ g(R15), R14
- JNE usleep1_switch
+ JNE switch
// executing on m->g0 already
CALL AX
RET
-usleep1_switch:
+switch:
// Switch to m->g0 stack and back.
MOVQ (g_sched+gobuf_sp)(R14), R14
MOVQ SP, -8(R14)
@@ -313,7 +313,7 @@ usleep1_switch:
MOVQ 0(SP), SP
RET
-usleep1_noswitch:
+noswitch:
// Not a Go-managed thread. Do not switch stack.
CALL AX
RET
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index 932fe9dd2..13fb5bdc9 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -106,7 +106,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
MOVL g_m(DX), BX
MOVL m_g0(BX), BX
CMPL DX, BX
- JEQ sigtramp_g0
+ JEQ g0
// switch to the g0 stack
get_tls(BP)
@@ -123,7 +123,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
MOVL SP, 36(DI)
MOVL DI, SP
-sigtramp_g0:
+g0:
MOVL 0(CX), BX // ExceptionRecord*
MOVL 4(CX), CX // Context*
MOVL BX, 0(SP)
@@ -383,12 +383,12 @@ TEXT runtime·usleep1(SB),NOSPLIT,$0
MOVL m_g0(BP), SI
CMPL g(CX), SI
- JNE usleep1_switch
+ JNE switch
// executing on m->g0 already
CALL AX
- JMP usleep1_ret
+ JMP ret
-usleep1_switch:
+switch:
// Switch to m->g0 stack and back.
MOVL (g_sched+gobuf_sp)(SI), SI
MOVL SP, -4(SI)
@@ -396,7 +396,7 @@ usleep1_switch:
CALL AX
MOVL 0(SP), SP
-usleep1_ret:
+ret:
get_tls(CX)
MOVL g(CX), BP
MOVL g_m(BP), BP
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index e6190ce68..8b95f6d6c 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -138,7 +138,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
MOVQ g_m(DX), BX
MOVQ m_g0(BX), BX
CMPQ DX, BX
- JEQ sigtramp_g0
+ JEQ g0
// switch to g0 stack
get_tls(BP)
@@ -157,7 +157,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0-0
MOVQ SP, 104(DI)
MOVQ DI, SP
-sigtramp_g0:
+g0:
MOVQ 0(CX), BX // ExceptionRecord*
MOVQ 8(CX), CX // Context*
MOVQ BX, 0(SP)
@@ -407,12 +407,12 @@ TEXT runtime·usleep1(SB),NOSPLIT,$0
MOVQ m_g0(R13), R14
CMPQ g(R15), R14
- JNE usleep1_switch
+ JNE switch
// executing on m->g0 already
CALL AX
- JMP usleep1_ret
+ JMP ret
-usleep1_switch:
+switch:
// Switch to m->g0 stack and back.
MOVQ (g_sched+gobuf_sp)(R14), R14
MOVQ SP, -8(R14)
@@ -420,7 +420,7 @@ usleep1_switch:
CALL AX
MOVQ 0(SP), SP
-usleep1_ret:
+ret:
MOVQ $0, m_libcallsp(R13)
RET
diff --git a/src/runtime/thunk.s b/src/runtime/thunk.s
index 0a0f147c4..1a5b65502 100644
--- a/src/runtime/thunk.s
+++ b/src/runtime/thunk.s
@@ -10,6 +10,12 @@
#ifdef GOARCH_arm
#define JMP B
#endif
+#ifdef GOARCH_power64
+#define JMP BR
+#endif
+#ifdef GOARCH_power64le
+#define JMP BR
+#endif
TEXT net·runtimeNano(SB),NOSPLIT,$0-0
JMP runtime·nanotime(SB)