summaryrefslogtreecommitdiff
path: root/src/runtime/asm_amd64.s
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-10-28 21:50:16 -0400
committerRuss Cox <rsc@golang.org>2014-10-28 21:50:16 -0400
commit05d42f8a61328aa7ea55887f601286b597caf0da (patch)
tree79010d336cdcf748fe4c23cbe7d8487478072c50 /src/runtime/asm_amd64.s
parentc9851bece1a56434cf60ad6b98694a027599d1f6 (diff)
downloadgo-05d42f8a61328aa7ea55887f601286b597caf0da.tar.gz
[dev.power64] cmd/5a, cmd/6a, cmd/8a, cmd/9a: make labels function-scoped
I removed support for jumping between functions years ago, as part of doing the instruction layout for each function separately. Given that, it makes sense to treat labels as function-scoped. This lets each function have its own 'loop' label, for example. Makes the assembly much cleaner and removes the last reason anyone would reach for the 123(PC) form instead. Note that this is on the dev.power64 branch, but it changes all the assemblers. The change will ship in Go 1.5 (perhaps after being ported into the new assembler). Came up as part of CL 167730043. LGTM=r R=r CC=austin, dave, golang-codereviews, minux https://codereview.appspot.com/159670043
Diffstat (limited to 'src/runtime/asm_amd64.s')
-rw-r--r--src/runtime/asm_amd64.s78
1 files changed, 39 insertions, 39 deletions
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 709834180..7a0fdfa73 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -461,11 +461,11 @@ TEXT runtime·cas64(SB), NOSPLIT, $0-25
MOVQ new+16(FP), CX
LOCK
CMPXCHGQ CX, 0(BX)
- JNZ cas64_fail
+ JNZ fail
MOVL $1, AX
MOVB AX, ret+24(FP)
RET
-cas64_fail:
+fail:
MOVL $0, AX
MOVB AX, ret+24(FP)
RET
@@ -876,24 +876,24 @@ TEXT runtime·aeshashbody(SB),NOSPLIT,$0-32
MOVO runtime·aeskeysched+0(SB), X2
MOVO runtime·aeskeysched+16(SB), X3
CMPQ CX, $16
- JB aessmall
-aesloop:
+ JB small
+loop:
CMPQ CX, $16
- JBE aesloopend
+ JBE loopend
MOVOU (AX), X1
AESENC X2, X0
AESENC X1, X0
SUBQ $16, CX
ADDQ $16, AX
- JMP aesloop
+ JMP loop
// 1-16 bytes remaining
-aesloopend:
+loopend:
// This load may overlap with the previous load above.
// We'll hash some bytes twice, but that's ok.
MOVOU -16(AX)(CX*1), X1
JMP partial
// 0-15 bytes
-aessmall:
+small:
TESTQ CX, CX
JE finalize // 0 bytes
@@ -1036,18 +1036,18 @@ TEXT runtime·eqstring(SB),NOSPLIT,$0-33
MOVQ s1len+8(FP), AX
MOVQ s2len+24(FP), BX
CMPQ AX, BX
- JNE different
+ JNE noteq
MOVQ s1str+0(FP), SI
MOVQ s2str+16(FP), DI
CMPQ SI, DI
- JEQ same
+ JEQ eq
CALL runtime·memeqbody(SB)
MOVB AX, v+32(FP)
RET
-same:
+eq:
MOVB $1, v+32(FP)
RET
-different:
+noteq:
MOVB $0, v+32(FP)
RET
@@ -1170,29 +1170,29 @@ TEXT runtime·cmpbytes(SB),NOSPLIT,$0-56
// AX = 1/0/-1
TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
CMPQ SI, DI
- JEQ cmp_allsame
+ JEQ allsame
CMPQ BX, DX
MOVQ DX, BP
CMOVQLT BX, BP // BP = min(alen, blen) = # of bytes to compare
CMPQ BP, $8
- JB cmp_small
+ JB small
-cmp_loop:
+loop:
CMPQ BP, $16
- JBE cmp_0through16
+ JBE _0through16
MOVOU (SI), X0
MOVOU (DI), X1
PCMPEQB X0, X1
PMOVMSKB X1, AX
XORQ $0xffff, AX // convert EQ to NE
- JNE cmp_diff16 // branch if at least one byte is not equal
+ JNE diff16 // branch if at least one byte is not equal
ADDQ $16, SI
ADDQ $16, DI
SUBQ $16, BP
- JMP cmp_loop
+ JMP loop
// AX = bit mask of differences
-cmp_diff16:
+diff16:
BSFQ AX, BX // index of first byte that differs
XORQ AX, AX
MOVB (SI)(BX*1), CX
@@ -1202,21 +1202,21 @@ cmp_diff16:
RET
// 0 through 16 bytes left, alen>=8, blen>=8
-cmp_0through16:
+_0through16:
CMPQ BP, $8
- JBE cmp_0through8
+ JBE _0through8
MOVQ (SI), AX
MOVQ (DI), CX
CMPQ AX, CX
- JNE cmp_diff8
-cmp_0through8:
+ JNE diff8
+_0through8:
MOVQ -8(SI)(BP*1), AX
MOVQ -8(DI)(BP*1), CX
CMPQ AX, CX
- JEQ cmp_allsame
+ JEQ allsame
// AX and CX contain parts of a and b that differ.
-cmp_diff8:
+diff8:
BSWAPQ AX // reverse order of bytes
BSWAPQ CX
XORQ AX, CX
@@ -1227,44 +1227,44 @@ cmp_diff8:
RET
// 0-7 bytes in common
-cmp_small:
+small:
LEAQ (BP*8), CX // bytes left -> bits left
NEGQ CX // - bits lift (== 64 - bits left mod 64)
- JEQ cmp_allsame
+ JEQ allsame
// load bytes of a into high bytes of AX
CMPB SI, $0xf8
- JA cmp_si_high
+ JA si_high
MOVQ (SI), SI
- JMP cmp_si_finish
-cmp_si_high:
+ JMP si_finish
+si_high:
MOVQ -8(SI)(BP*1), SI
SHRQ CX, SI
-cmp_si_finish:
+si_finish:
SHLQ CX, SI
// load bytes of b in to high bytes of BX
CMPB DI, $0xf8
- JA cmp_di_high
+ JA di_high
MOVQ (DI), DI
- JMP cmp_di_finish
-cmp_di_high:
+ JMP di_finish
+di_high:
MOVQ -8(DI)(BP*1), DI
SHRQ CX, DI
-cmp_di_finish:
+di_finish:
SHLQ CX, DI
BSWAPQ SI // reverse order of bytes
BSWAPQ DI
XORQ SI, DI // find bit differences
- JEQ cmp_allsame
+ JEQ allsame
BSRQ DI, CX // index of highest bit difference
SHRQ CX, SI // move a's bit to bottom
ANDQ $1, SI // mask bit
LEAQ -1(SI*2), AX // 1/0 => +1/-1
RET
-cmp_allsame:
+allsame:
XORQ AX, AX
XORQ CX, CX
CMPQ BX, DX
@@ -1299,7 +1299,7 @@ TEXT runtime·indexbytebody(SB),NOSPLIT,$0
MOVQ SI, DI
CMPQ BX, $16
- JLT indexbyte_small
+ JLT small
// round up to first 16-byte boundary
TESTQ $15, SI
@@ -1357,7 +1357,7 @@ failure:
RET
// handle for lengths < 16
-indexbyte_small:
+small:
MOVQ BX, CX
REPN; SCASB
JZ success