summaryrefslogtreecommitdiff
path: root/libbb
diff options
context:
space:
mode:
authorDenys Vlasenko <vda.linux@googlemail.com>2022-01-23 09:27:30 +0100
committerDenys Vlasenko <vda.linux@googlemail.com>2022-01-23 12:57:27 +0100
commit39369ff460f3e2dbfec7f6be181b2fb98f3c1867 (patch)
tree1a67a6376490c729fb58944002cdcabb262b2f50 /libbb
parent1e825acf8d715fe49af040cb02f9e96c26955832 (diff)
downloadbusybox-39369ff460f3e2dbfec7f6be181b2fb98f3c1867.tar.gz
libbb/sha1: use SSE2 in unrolled x86-64 code. ~10% faster
function old new delta .rodata 108241 108305 +64 sha1_process_block64 3502 3495 -7 ------------------------------------------------------------------------------ (add/remove: 5/0 grow/shrink: 1/1 up/down: 64/-7) Total: 57 bytes Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
Diffstat (limited to 'libbb')
-rw-r--r--libbb/hash_md5_sha_x86-64.S992
-rwxr-xr-xlibbb/hash_md5_sha_x86-64.S.sh440
2 files changed, 854 insertions, 578 deletions
diff --git a/libbb/hash_md5_sha_x86-64.S b/libbb/hash_md5_sha_x86-64.S
index 87fb616a1..069a18719 100644
--- a/libbb/hash_md5_sha_x86-64.S
+++ b/libbb/hash_md5_sha_x86-64.S
@@ -20,16 +20,10 @@ sha1_process_block64:
# eax..edx: a..d
# ebp: e
# esi,edi: temps
-# -32+4*n(%rsp),r8...r15: W[0..7,8..15]
-# (TODO: actually W[0..7] are used a bit more often, put _them_ into r8..r15?)
- movl $3, %eax
-1:
- movq (%rdi,%rax,8), %rsi
- bswapq %rsi
- rolq $32, %rsi
- movq %rsi, -32(%rsp,%rax,8)
- decl %eax
- jns 1b
+# xmm0..xmm3: W[]
+# xmm4,xmm5: temps
+# xmm6: current round constant
+# -64(%rsp): area for passing RCONST + W[] from vector to integer units
movl 80(%rdi), %eax # a = ctx->hash[0]
movl 84(%rdi), %ebx # b = ctx->hash[1]
@@ -37,587 +31,709 @@ sha1_process_block64:
movl 92(%rdi), %edx # d = ctx->hash[3]
movl 96(%rdi), %ebp # e = ctx->hash[4]
+ movaps rconst0x5A827999(%rip), %xmm6
+
+ # For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15
+ # instead of spilling them to stack.
+ # (We lose parallelized addition of RCONST, but LEA
+ # can do two additions at once, so...)
+ movq 4*0(%rdi), %rsi
+ movq 4*2(%rdi), %r10
+ bswapq %rsi
+ bswapq %r10
+ rolq $32, %rsi # rsi = W[1]:W[0]
+ rolq $32, %r10
+ movq %rsi, %xmm0
+ movq %r10, %xmm4
+ punpcklqdq %xmm4, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3])
+ movaps %xmm0, %xmm4
+ paddd %xmm6, %xmm4
+ movups %xmm4, -64+4*0(%rsp)
+
+ movq 4*4(%rdi), %r8
+ movq 4*6(%rdi), %r10
+ bswapq %r8
+ bswapq %r10
+ rolq $32, %r8
+ rolq $32, %r10
+ movq %r8, %xmm1
+ movq %r10, %xmm4
+ punpcklqdq %xmm4, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7])
+ movaps %xmm1, %xmm4
+ paddd %xmm6, %xmm4
+ movups %xmm4, -64+4*4(%rsp)
+
movq 4*8(%rdi), %r8
movq 4*10(%rdi), %r10
bswapq %r8
bswapq %r10
+ movl %r8d, %r9d # r9d = W[9]
+ rolq $32, %r8 # r8 = W[9]:W[8]
+ movl %r10d, %r11d # r11d = W[11]
+ rolq $32, %r10 # r10 = W[11]:W[10]
+ movq %r8, %xmm2
+ movq %r10, %xmm4
+ punpcklqdq %xmm4, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11])
+
movq 4*12(%rdi), %r12
movq 4*14(%rdi), %r14
bswapq %r12
bswapq %r14
- movl %r8d, %r9d
- shrq $32, %r8
- movl %r10d, %r11d
- shrq $32, %r10
- movl %r12d, %r13d
- shrq $32, %r12
- movl %r14d, %r15d
- shrq $32, %r14
+ movl %r12d, %r13d # r13d = W[13]
+ rolq $32, %r12 # r12 = W[13]:W[12]
+ movl %r14d, %r15d # r15d = W[15]
+ rolq $32, %r14 # r14 = W[15]:W[14]
+ movq %r12, %xmm3
+ movq %r14, %xmm4
+ punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15])
# 0
- # W[0], already in %esi
+ leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
andl %ebx, %edi # &b
xorl %edx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n]
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 1
- movl -32+4*1(%rsp), %esi # W[n]
+ addl -64+4*1(%rsp), %edx # e += RCONST + W[n]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
andl %eax, %edi # &b
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rdx,%rsi), %edx # e += RCONST + W[n]
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 2
- movl -32+4*2(%rsp), %esi # W[n]
+ addl -64+4*2(%rsp), %ecx # e += RCONST + W[n]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
andl %ebp, %edi # &b
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rcx,%rsi), %ecx # e += RCONST + W[n]
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 3
- movl -32+4*3(%rsp), %esi # W[n]
+ addl -64+4*3(%rsp), %ebx # e += RCONST + W[n]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
andl %edx, %edi # &b
xorl %eax, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbx,%rsi), %ebx # e += RCONST + W[n]
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 4
- movl -32+4*4(%rsp), %esi # W[n]
+ addl -64+4*4(%rsp), %eax # e += RCONST + W[n]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
andl %ecx, %edi # &b
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rax,%rsi), %eax # e += RCONST + W[n]
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 5
- movl -32+4*5(%rsp), %esi # W[n]
+ addl -64+4*5(%rsp), %ebp # e += RCONST + W[n]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
andl %ebx, %edi # &b
xorl %edx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbp,%rsi), %ebp # e += RCONST + W[n]
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 6
- movl -32+4*6(%rsp), %esi # W[n]
+ addl -64+4*6(%rsp), %edx # e += RCONST + W[n]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
andl %eax, %edi # &b
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rdx,%rsi), %edx # e += RCONST + W[n]
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 7
- movl -32+4*7(%rsp), %esi # W[n]
+ addl -64+4*7(%rsp), %ecx # e += RCONST + W[n]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
andl %ebp, %edi # &b
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rcx,%rsi), %ecx # e += RCONST + W[n]
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
+# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
+ movaps %xmm3, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm0, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm1, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm2, %xmm0 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm0 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm0, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm0, %xmm0 # shift left by 1
+ psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm0 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm0 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm0, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*0(%rsp)
# 8
- # W[n], in %r8
+ leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
andl %edx, %edi # &b
xorl %eax, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbx,%r8), %ebx # e += RCONST + W[n]
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 9
- # W[n], in %r9
+ leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
andl %ecx, %edi # &b
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rax,%r9), %eax # e += RCONST + W[n]
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 10
- # W[n], in %r10
+ leal 0x5A827999(%rbp,%r10), %ebp # e += RCONST + W[n]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
andl %ebx, %edi # &b
xorl %edx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbp,%r10), %ebp # e += RCONST + W[n]
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 11
- # W[n], in %r11
+ leal 0x5A827999(%rdx,%r11), %edx # e += RCONST + W[n]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
andl %eax, %edi # &b
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rdx,%r11), %edx # e += RCONST + W[n]
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
+ movaps rconst0x6ED9EBA1(%rip), %xmm6
+# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
+ movaps %xmm0, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm1, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm2, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm3, %xmm1 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm1 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm1, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm1, %xmm1 # shift left by 1
+ psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm1 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm1 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm1, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*1(%rsp)
# 12
- # W[n], in %r12
+ leal 0x5A827999(%rcx,%r12), %ecx # e += RCONST + W[n]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
andl %ebp, %edi # &b
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rcx,%r12), %ecx # e += RCONST + W[n]
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 13
- # W[n], in %r13
+ leal 0x5A827999(%rbx,%r13), %ebx # e += RCONST + W[n]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
andl %edx, %edi # &b
xorl %eax, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbx,%r13), %ebx # e += RCONST + W[n]
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 14
- # W[n], in %r14
+ leal 0x5A827999(%rax,%r14), %eax # e += RCONST + W[n]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
andl %ecx, %edi # &b
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rax,%r14), %eax # e += RCONST + W[n]
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 15
- # W[n], in %r15
+ leal 0x5A827999(%rbp,%r15), %ebp # e += RCONST + W[n]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
andl %ebx, %edi # &b
xorl %edx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbp,%r15), %ebp # e += RCONST + W[n]
addl %edi, %ebp # e += (((c ^ d) & b) ^ d)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
+# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
+ movaps %xmm1, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm2, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm3, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm0, %xmm2 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm2 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm2, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm2, %xmm2 # shift left by 1
+ psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm2 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm2 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm2, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*2(%rsp)
# 16
- movl %r13d, %esi # W[(n+13) & 15]
- xorl %r8d, %esi # ^W[(n+8) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*0(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*0(%rsp) # store to W[n & 15]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
andl %eax, %edi # &b
xorl %ecx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*0(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (((c ^ d) & b) ^ d)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 17
- movl %r14d, %esi # W[(n+13) & 15]
- xorl %r9d, %esi # ^W[(n+8) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*1(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*1(%rsp) # store to W[n & 15]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
andl %ebp, %edi # &b
xorl %ebx, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*1(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (((c ^ d) & b) ^ d)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 18
- movl %r15d, %esi # W[(n+13) & 15]
- xorl %r10d, %esi # ^W[(n+8) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*2(%rsp) # store to W[n & 15]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
andl %edx, %edi # &b
xorl %eax, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*2(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (((c ^ d) & b) ^ d)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 19
- movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
- xorl %r11d, %esi # ^W[(n+8) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*3(%rsp) # store to W[n & 15]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
andl %ecx, %edi # &b
xorl %ebp, %edi # (((c ^ d) & b) ^ d)
- leal 0x5A827999(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*3(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (((c ^ d) & b) ^ d)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
+# PREP %xmm3 %xmm0 %xmm1 %xmm2 -64+16*3(%rsp)
+ movaps %xmm2, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm3, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm0, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm1, %xmm3 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm3 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm3, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm3, %xmm3 # shift left by 1
+ psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm3 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm3 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm3, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*3(%rsp)
# 20
- movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
- xorl %r12d, %esi # ^W[(n+8) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*4(%rsp) # store to W[n & 15]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*4(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 21
- movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
- xorl %r13d, %esi # ^W[(n+8) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*5(%rsp) # store to W[n & 15]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*5(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 22
- movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
- xorl %r14d, %esi # ^W[(n+8) & 15]
- xorl %r8d, %esi # ^W[(n+2) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*6(%rsp) # store to W[n & 15]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*6(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 23
- movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
- xorl %r15d, %esi # ^W[(n+8) & 15]
- xorl %r9d, %esi # ^W[(n+2) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*7(%rsp) # store to W[n & 15]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*7(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
+# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
+ movaps %xmm3, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm0, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm1, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm2, %xmm0 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm0 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm0, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm0, %xmm0 # shift left by 1
+ psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm0 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm0 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm0, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*0(%rsp)
# 24
- xorl -32+4*5(%rsp), %r8d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*0(%rsp), %r8d # ^W[(n+8) & 15]
- xorl %r10d, %r8d # ^W[(n+2) & 15]
- roll %r8d #
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal 0x6ED9EBA1(%rax,%r8), %eax # e += RCONST + W[n & 15]
+ addl -64+4*8(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 25
- xorl -32+4*6(%rsp), %r9d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*1(%rsp), %r9d # ^W[(n+8) & 15]
- xorl %r11d, %r9d # ^W[(n+2) & 15]
- roll %r9d #
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal 0x6ED9EBA1(%rbp,%r9), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*9(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 26
- xorl -32+4*7(%rsp), %r10d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*2(%rsp), %r10d # ^W[(n+8) & 15]
- xorl %r12d, %r10d # ^W[(n+2) & 15]
- roll %r10d #
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal 0x6ED9EBA1(%rdx,%r10), %edx # e += RCONST + W[n & 15]
+ addl -64+4*10(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 27
- xorl %r8d, %r11d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*3(%rsp), %r11d # ^W[(n+8) & 15]
- xorl %r13d, %r11d # ^W[(n+2) & 15]
- roll %r11d #
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal 0x6ED9EBA1(%rcx,%r11), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*11(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
+# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
+ movaps %xmm0, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm1, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm2, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm3, %xmm1 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm1 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm1, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm1, %xmm1 # shift left by 1
+ psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm1 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm1 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm1, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*1(%rsp)
# 28
- xorl %r9d, %r12d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*4(%rsp), %r12d # ^W[(n+8) & 15]
- xorl %r14d, %r12d # ^W[(n+2) & 15]
- roll %r12d #
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal 0x6ED9EBA1(%rbx,%r12), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*12(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 29
- xorl %r10d, %r13d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*5(%rsp), %r13d # ^W[(n+8) & 15]
- xorl %r15d, %r13d # ^W[(n+2) & 15]
- roll %r13d #
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal 0x6ED9EBA1(%rax,%r13), %eax # e += RCONST + W[n & 15]
+ addl -64+4*13(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 30
- xorl %r11d, %r14d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*6(%rsp), %r14d # ^W[(n+8) & 15]
- xorl -32+4*0(%rsp), %r14d # ^W[(n+2) & 15]
- roll %r14d #
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal 0x6ED9EBA1(%rbp,%r14), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*14(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 31
- xorl %r12d, %r15d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*7(%rsp), %r15d # ^W[(n+8) & 15]
- xorl -32+4*1(%rsp), %r15d # ^W[(n+2) & 15]
- roll %r15d #
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal 0x6ED9EBA1(%rdx,%r15), %edx # e += RCONST + W[n & 15]
+ addl -64+4*15(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
+ movaps rconst0x8F1BBCDC(%rip), %xmm6
+# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
+ movaps %xmm1, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm2, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm3, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm0, %xmm2 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm2 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm2, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm2, %xmm2 # shift left by 1
+ psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm2 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm2 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm2, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*2(%rsp)
# 32
- movl %r13d, %esi # W[(n+13) & 15]
- xorl %r8d, %esi # ^W[(n+8) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*0(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*0(%rsp) # store to W[n & 15]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*0(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 33
- movl %r14d, %esi # W[(n+13) & 15]
- xorl %r9d, %esi # ^W[(n+8) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*1(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*1(%rsp) # store to W[n & 15]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*1(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 34
- movl %r15d, %esi # W[(n+13) & 15]
- xorl %r10d, %esi # ^W[(n+8) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*2(%rsp) # store to W[n & 15]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*2(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 35
- movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
- xorl %r11d, %esi # ^W[(n+8) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*3(%rsp) # store to W[n & 15]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal 0x6ED9EBA1(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*3(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
+# PREP %xmm3 %xmm0 %xmm1 %xmm2 -64+16*3(%rsp)
+ movaps %xmm2, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm3, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm0, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm1, %xmm3 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm3 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm3, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm3, %xmm3 # shift left by 1
+ psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm3 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm3 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm3, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*3(%rsp)
# 36
- movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
- xorl %r12d, %esi # ^W[(n+8) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*4(%rsp) # store to W[n & 15]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal 0x6ED9EBA1(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*4(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 37
- movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
- xorl %r13d, %esi # ^W[(n+8) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*5(%rsp) # store to W[n & 15]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal 0x6ED9EBA1(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*5(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 38
- movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
- xorl %r14d, %esi # ^W[(n+8) & 15]
- xorl %r8d, %esi # ^W[(n+2) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*6(%rsp) # store to W[n & 15]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal 0x6ED9EBA1(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*6(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 39
- movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
- xorl %r15d, %esi # ^W[(n+8) & 15]
- xorl %r9d, %esi # ^W[(n+2) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*7(%rsp) # store to W[n & 15]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal 0x6ED9EBA1(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*7(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
+# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
+ movaps %xmm3, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm0, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm1, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm2, %xmm0 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm0 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm0, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm0, %xmm0 # shift left by 1
+ psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm0 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm0 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm0, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*0(%rsp)
# 40
movl %ebx, %edi # di: b
movl %ebx, %esi # si: b
@@ -625,12 +741,8 @@ sha1_process_block64:
andl %ecx, %esi # si: b & c
andl %edx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*5(%rsp), %r8d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*0(%rsp), %r8d # ^W[(n+8) & 15]
- xorl %r10d, %r8d # ^W[(n+2) & 15]
- roll %r8d #
addl %edi, %ebp # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbp,%r8), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*8(%rsp), %ebp # e += RCONST + W[n & 15]
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
@@ -642,12 +754,8 @@ sha1_process_block64:
andl %ebx, %esi # si: b & c
andl %ecx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*6(%rsp), %r9d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*1(%rsp), %r9d # ^W[(n+8) & 15]
- xorl %r11d, %r9d # ^W[(n+2) & 15]
- roll %r9d #
addl %edi, %edx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rdx,%r9), %edx # e += RCONST + W[n & 15]
+ addl -64+4*9(%rsp), %edx # e += RCONST + W[n & 15]
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
@@ -659,12 +767,8 @@ sha1_process_block64:
andl %eax, %esi # si: b & c
andl %ebx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*7(%rsp), %r10d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*2(%rsp), %r10d # ^W[(n+8) & 15]
- xorl %r12d, %r10d # ^W[(n+2) & 15]
- roll %r10d #
addl %edi, %ecx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rcx,%r10), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*10(%rsp), %ecx # e += RCONST + W[n & 15]
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
@@ -676,16 +780,37 @@ sha1_process_block64:
andl %ebp, %esi # si: b & c
andl %eax, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r8d, %r11d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*3(%rsp), %r11d # ^W[(n+8) & 15]
- xorl %r13d, %r11d # ^W[(n+2) & 15]
- roll %r11d #
addl %edi, %ebx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbx,%r11), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*11(%rsp), %ebx # e += RCONST + W[n & 15]
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
+# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
+ movaps %xmm0, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm1, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm2, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm3, %xmm1 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm1 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm1, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm1, %xmm1 # shift left by 1
+ psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm1 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm1 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm1, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*1(%rsp)
# 44
movl %ecx, %edi # di: b
movl %ecx, %esi # si: b
@@ -693,12 +818,8 @@ sha1_process_block64:
andl %edx, %esi # si: b & c
andl %ebp, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r9d, %r12d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*4(%rsp), %r12d # ^W[(n+8) & 15]
- xorl %r14d, %r12d # ^W[(n+2) & 15]
- roll %r12d #
addl %edi, %eax # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rax,%r12), %eax # e += RCONST + W[n & 15]
+ addl -64+4*12(%rsp), %eax # e += RCONST + W[n & 15]
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
@@ -710,12 +831,8 @@ sha1_process_block64:
andl %ecx, %esi # si: b & c
andl %edx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r10d, %r13d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*5(%rsp), %r13d # ^W[(n+8) & 15]
- xorl %r15d, %r13d # ^W[(n+2) & 15]
- roll %r13d #
addl %edi, %ebp # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbp,%r13), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*13(%rsp), %ebp # e += RCONST + W[n & 15]
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
@@ -727,12 +844,8 @@ sha1_process_block64:
andl %ebx, %esi # si: b & c
andl %ecx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r11d, %r14d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*6(%rsp), %r14d # ^W[(n+8) & 15]
- xorl -32+4*0(%rsp), %r14d # ^W[(n+2) & 15]
- roll %r14d #
addl %edi, %edx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rdx,%r14), %edx # e += RCONST + W[n & 15]
+ addl -64+4*14(%rsp), %edx # e += RCONST + W[n & 15]
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
@@ -744,16 +857,37 @@ sha1_process_block64:
andl %eax, %esi # si: b & c
andl %ebx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r12d, %r15d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*7(%rsp), %r15d # ^W[(n+8) & 15]
- xorl -32+4*1(%rsp), %r15d # ^W[(n+2) & 15]
- roll %r15d #
addl %edi, %ecx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rcx,%r15), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*15(%rsp), %ecx # e += RCONST + W[n & 15]
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
+# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
+ movaps %xmm1, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm2, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm3, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm0, %xmm2 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm2 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm2, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm2, %xmm2 # shift left by 1
+ psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm2 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm2 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm2, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*2(%rsp)
# 48
movl %edx, %edi # di: b
movl %edx, %esi # si: b
@@ -761,14 +895,8 @@ sha1_process_block64:
andl %ebp, %esi # si: b & c
andl %eax, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl %r13d, %esi # W[(n+13) & 15]
- xorl %r8d, %esi # ^W[(n+8) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*0(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*0(%rsp) # store to W[n & 15]
addl %edi, %ebx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*0(%rsp), %ebx # e += RCONST + W[n & 15]
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
@@ -780,14 +908,8 @@ sha1_process_block64:
andl %edx, %esi # si: b & c
andl %ebp, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl %r14d, %esi # W[(n+13) & 15]
- xorl %r9d, %esi # ^W[(n+8) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*1(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*1(%rsp) # store to W[n & 15]
addl %edi, %eax # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*1(%rsp), %eax # e += RCONST + W[n & 15]
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
@@ -799,14 +921,8 @@ sha1_process_block64:
andl %ecx, %esi # si: b & c
andl %edx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl %r15d, %esi # W[(n+13) & 15]
- xorl %r10d, %esi # ^W[(n+8) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*2(%rsp) # store to W[n & 15]
addl %edi, %ebp # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*2(%rsp), %ebp # e += RCONST + W[n & 15]
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
@@ -818,18 +934,38 @@ sha1_process_block64:
andl %ebx, %esi # si: b & c
andl %ecx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
- xorl %r11d, %esi # ^W[(n+8) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*3(%rsp) # store to W[n & 15]
addl %edi, %edx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*3(%rsp), %edx # e += RCONST + W[n & 15]
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
+ movaps rconst0xCA62C1D6(%rip), %xmm6
+# PREP %xmm3 %xmm0 %xmm1 %xmm2 -64+16*3(%rsp)
+ movaps %xmm2, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm3, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm0, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm1, %xmm3 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm3 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm3, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm3, %xmm3 # shift left by 1
+ psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm3 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm3 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm3, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*3(%rsp)
# 52
movl %ebp, %edi # di: b
movl %ebp, %esi # si: b
@@ -837,14 +973,8 @@ sha1_process_block64:
andl %eax, %esi # si: b & c
andl %ebx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
- xorl %r12d, %esi # ^W[(n+8) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*4(%rsp) # store to W[n & 15]
addl %edi, %ecx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*4(%rsp), %ecx # e += RCONST + W[n & 15]
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
@@ -856,14 +986,8 @@ sha1_process_block64:
andl %ebp, %esi # si: b & c
andl %eax, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
- xorl %r13d, %esi # ^W[(n+8) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*5(%rsp) # store to W[n & 15]
addl %edi, %ebx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*5(%rsp), %ebx # e += RCONST + W[n & 15]
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
@@ -875,14 +999,8 @@ sha1_process_block64:
andl %edx, %esi # si: b & c
andl %ebp, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
- xorl %r14d, %esi # ^W[(n+8) & 15]
- xorl %r8d, %esi # ^W[(n+2) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*6(%rsp) # store to W[n & 15]
addl %edi, %eax # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*6(%rsp), %eax # e += RCONST + W[n & 15]
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
@@ -894,18 +1012,37 @@ sha1_process_block64:
andl %ecx, %esi # si: b & c
andl %edx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
- xorl %r15d, %esi # ^W[(n+8) & 15]
- xorl %r9d, %esi # ^W[(n+2) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*7(%rsp) # store to W[n & 15]
addl %edi, %ebp # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*7(%rsp), %ebp # e += RCONST + W[n & 15]
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
+# PREP %xmm0 %xmm1 %xmm2 %xmm3 -64+16*0(%rsp)
+ movaps %xmm3, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm0, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm1, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm2, %xmm0 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm0 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm0, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm0, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm0, %xmm0 # shift left by 1
+ psubd %xmm4, %xmm0 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm0 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm0 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm0, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*0(%rsp)
# 56
movl %eax, %edi # di: b
movl %eax, %esi # si: b
@@ -913,12 +1050,8 @@ sha1_process_block64:
andl %ebx, %esi # si: b & c
andl %ecx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*5(%rsp), %r8d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*0(%rsp), %r8d # ^W[(n+8) & 15]
- xorl %r10d, %r8d # ^W[(n+2) & 15]
- roll %r8d #
addl %edi, %edx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rdx,%r8), %edx # e += RCONST + W[n & 15]
+ addl -64+4*8(%rsp), %edx # e += RCONST + W[n & 15]
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
@@ -930,12 +1063,8 @@ sha1_process_block64:
andl %eax, %esi # si: b & c
andl %ebx, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*6(%rsp), %r9d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*1(%rsp), %r9d # ^W[(n+8) & 15]
- xorl %r11d, %r9d # ^W[(n+2) & 15]
- roll %r9d #
addl %edi, %ecx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rcx,%r9), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*9(%rsp), %ecx # e += RCONST + W[n & 15]
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
@@ -947,12 +1076,8 @@ sha1_process_block64:
andl %ebp, %esi # si: b & c
andl %eax, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl -32+4*7(%rsp), %r10d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*2(%rsp), %r10d # ^W[(n+8) & 15]
- xorl %r12d, %r10d # ^W[(n+2) & 15]
- roll %r10d #
addl %edi, %ebx # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rbx,%r10), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*10(%rsp), %ebx # e += RCONST + W[n & 15]
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
@@ -964,307 +1089,282 @@ sha1_process_block64:
andl %edx, %esi # si: b & c
andl %ebp, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
- xorl %r8d, %r11d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*3(%rsp), %r11d # ^W[(n+8) & 15]
- xorl %r13d, %r11d # ^W[(n+2) & 15]
- roll %r11d #
addl %edi, %eax # += ((b | c) & d) | (b & c)
- leal -0x70E44324(%rax,%r11), %eax # e += RCONST + W[n & 15]
+ addl -64+4*11(%rsp), %eax # e += RCONST + W[n & 15]
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
+# PREP %xmm1 %xmm2 %xmm3 %xmm0 -64+16*1(%rsp)
+ movaps %xmm0, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm1, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm2, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm3, %xmm1 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm1 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm1, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm1, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm1, %xmm1 # shift left by 1
+ psubd %xmm4, %xmm1 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm1 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm1 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm1, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*1(%rsp)
# 60
- xorl %r9d, %r12d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*4(%rsp), %r12d # ^W[(n+8) & 15]
- xorl %r14d, %r12d # ^W[(n+2) & 15]
- roll %r12d #
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal -0x359D3E2A(%rbp,%r12), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*12(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 61
- xorl %r10d, %r13d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*5(%rsp), %r13d # ^W[(n+8) & 15]
- xorl %r15d, %r13d # ^W[(n+2) & 15]
- roll %r13d #
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal -0x359D3E2A(%rdx,%r13), %edx # e += RCONST + W[n & 15]
+ addl -64+4*13(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 62
- xorl %r11d, %r14d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*6(%rsp), %r14d # ^W[(n+8) & 15]
- xorl -32+4*0(%rsp), %r14d # ^W[(n+2) & 15]
- roll %r14d #
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal -0x359D3E2A(%rcx,%r14), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*14(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 63
- xorl %r12d, %r15d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*7(%rsp), %r15d # ^W[(n+8) & 15]
- xorl -32+4*1(%rsp), %r15d # ^W[(n+2) & 15]
- roll %r15d #
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal -0x359D3E2A(%rbx,%r15), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*15(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
+# PREP %xmm2 %xmm3 %xmm0 %xmm1 -64+16*2(%rsp)
+ movaps %xmm1, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm2, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm3, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm0, %xmm2 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm2 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm2, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm2, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm2, %xmm2 # shift left by 1
+ psubd %xmm4, %xmm2 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm2 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm2 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm2, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*2(%rsp)
# 64
- movl %r13d, %esi # W[(n+13) & 15]
- xorl %r8d, %esi # ^W[(n+8) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*0(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*0(%rsp) # store to W[n & 15]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal -0x359D3E2A(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*0(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 65
- movl %r14d, %esi # W[(n+13) & 15]
- xorl %r9d, %esi # ^W[(n+8) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*1(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*1(%rsp) # store to W[n & 15]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal -0x359D3E2A(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*1(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 66
- movl %r15d, %esi # W[(n+13) & 15]
- xorl %r10d, %esi # ^W[(n+8) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*2(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*2(%rsp) # store to W[n & 15]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal -0x359D3E2A(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*2(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 67
- movl -32+4*0(%rsp), %esi # W[(n+13) & 15]
- xorl %r11d, %esi # ^W[(n+8) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*3(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*3(%rsp) # store to W[n & 15]
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal -0x359D3E2A(%rcx,%rsi), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*3(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
+# PREP %xmm3 %xmm0 %xmm1 %xmm2 -64+16*3(%rsp)
+ movaps %xmm2, %xmm4
+ psrldq $4, %xmm4 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+ pshufd $0x4e, %xmm3, %xmm5 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq %xmm0, %xmm5 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+ xorps %xmm1, %xmm3 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps %xmm4, %xmm5 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps %xmm5, %xmm3 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps %xmm3, %xmm5
+ xorps %xmm4, %xmm4 # rol(W0,1):
+ pcmpgtd %xmm3, %xmm4 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd %xmm3, %xmm3 # shift left by 1
+ psubd %xmm4, %xmm3 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+ pslldq $12, %xmm5 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps %xmm5, %xmm4
+ pslld $2, %xmm5
+ psrld $30, %xmm4
+# xorps %xmm4, %xmm5 # rol((0,0,0,unrotW[0]),2)
+ xorps %xmm4, %xmm3 # same result, but does not depend on/does not modify T2
+ xorps %xmm5, %xmm3 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+ movaps %xmm3, %xmm5
+ paddd %xmm6, %xmm5
+ movups %xmm5, -64+16*3(%rsp)
# 68
- movl -32+4*1(%rsp), %esi # W[(n+13) & 15]
- xorl %r12d, %esi # ^W[(n+8) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*4(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*4(%rsp) # store to W[n & 15]
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal -0x359D3E2A(%rbx,%rsi), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*4(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 69
- movl -32+4*2(%rsp), %esi # W[(n+13) & 15]
- xorl %r13d, %esi # ^W[(n+8) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[(n+2) & 15]
- xorl -32+4*5(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*5(%rsp) # store to W[n & 15]
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal -0x359D3E2A(%rax,%rsi), %eax # e += RCONST + W[n & 15]
+ addl -64+4*5(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 70
- movl -32+4*3(%rsp), %esi # W[(n+13) & 15]
- xorl %r14d, %esi # ^W[(n+8) & 15]
- xorl %r8d, %esi # ^W[(n+2) & 15]
- xorl -32+4*6(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*6(%rsp) # store to W[n & 15]
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal -0x359D3E2A(%rbp,%rsi), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*6(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 71
- movl -32+4*4(%rsp), %esi # W[(n+13) & 15]
- xorl %r15d, %esi # ^W[(n+8) & 15]
- xorl %r9d, %esi # ^W[(n+2) & 15]
- xorl -32+4*7(%rsp), %esi # ^W[n & 15]
- roll %esi #
- movl %esi, -32+4*7(%rsp) # store to W[n & 15]
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal -0x359D3E2A(%rdx,%rsi), %edx # e += RCONST + W[n & 15]
+ addl -64+4*7(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 72
- xorl -32+4*5(%rsp), %r8d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*0(%rsp), %r8d # ^W[(n+8) & 15]
- xorl %r10d, %r8d # ^W[(n+2) & 15]
- roll %r8d #
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal -0x359D3E2A(%rcx,%r8), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*8(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 73
- xorl -32+4*6(%rsp), %r9d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*1(%rsp), %r9d # ^W[(n+8) & 15]
- xorl %r11d, %r9d # ^W[(n+2) & 15]
- roll %r9d #
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal -0x359D3E2A(%rbx,%r9), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*9(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 74
- xorl -32+4*7(%rsp), %r10d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*2(%rsp), %r10d # ^W[(n+8) & 15]
- xorl %r12d, %r10d # ^W[(n+2) & 15]
- roll %r10d #
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal -0x359D3E2A(%rax,%r10), %eax # e += RCONST + W[n & 15]
+ addl -64+4*10(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %eax # e += rotl32(a,5)
rorl $2, %ecx # b = rotl32(b,30)
# 75
- xorl %r8d, %r11d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*3(%rsp), %r11d # ^W[(n+8) & 15]
- xorl %r13d, %r11d # ^W[(n+2) & 15]
- roll %r11d #
movl %ecx, %edi # c
xorl %edx, %edi # ^d
xorl %ebx, %edi # ^b
- leal -0x359D3E2A(%rbp,%r11), %ebp # e += RCONST + W[n & 15]
+ addl -64+4*11(%rsp), %ebp # e += RCONST + W[n & 15]
addl %edi, %ebp # e += (c ^ d ^ b)
movl %eax, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebp # e += rotl32(a,5)
rorl $2, %ebx # b = rotl32(b,30)
# 76
- xorl %r9d, %r12d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*4(%rsp), %r12d # ^W[(n+8) & 15]
- xorl %r14d, %r12d # ^W[(n+2) & 15]
- roll %r12d #
movl %ebx, %edi # c
xorl %ecx, %edi # ^d
xorl %eax, %edi # ^b
- leal -0x359D3E2A(%rdx,%r12), %edx # e += RCONST + W[n & 15]
+ addl -64+4*12(%rsp), %edx # e += RCONST + W[n & 15]
addl %edi, %edx # e += (c ^ d ^ b)
movl %ebp, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %edx # e += rotl32(a,5)
rorl $2, %eax # b = rotl32(b,30)
# 77
- xorl %r10d, %r13d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*5(%rsp), %r13d # ^W[(n+8) & 15]
- xorl %r15d, %r13d # ^W[(n+2) & 15]
- roll %r13d #
movl %eax, %edi # c
xorl %ebx, %edi # ^d
xorl %ebp, %edi # ^b
- leal -0x359D3E2A(%rcx,%r13), %ecx # e += RCONST + W[n & 15]
+ addl -64+4*13(%rsp), %ecx # e += RCONST + W[n & 15]
addl %edi, %ecx # e += (c ^ d ^ b)
movl %edx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ecx # e += rotl32(a,5)
rorl $2, %ebp # b = rotl32(b,30)
# 78
- xorl %r11d, %r14d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*6(%rsp), %r14d # ^W[(n+8) & 15]
- xorl -32+4*0(%rsp), %r14d # ^W[(n+2) & 15]
- roll %r14d #
movl %ebp, %edi # c
xorl %eax, %edi # ^d
xorl %edx, %edi # ^b
- leal -0x359D3E2A(%rbx,%r14), %ebx # e += RCONST + W[n & 15]
+ addl -64+4*14(%rsp), %ebx # e += RCONST + W[n & 15]
addl %edi, %ebx # e += (c ^ d ^ b)
movl %ecx, %esi #
roll $5, %esi # rotl32(a,5)
addl %esi, %ebx # e += rotl32(a,5)
rorl $2, %edx # b = rotl32(b,30)
# 79
- xorl %r12d, %r15d # W[n & 15] ^= W[(n+13) & 15]
- xorl -32+4*7(%rsp), %r15d # ^W[(n+8) & 15]
- xorl -32+4*1(%rsp), %r15d # ^W[(n+2) & 15]
- roll %r15d #
movl %edx, %edi # c
xorl %ebp, %edi # ^d
xorl %ecx, %edi # ^b
- leal -0x359D3E2A(%rax,%r15), %eax # e += RCONST + W[n & 15]
+ addl -64+4*15(%rsp), %eax # e += RCONST + W[n & 15]
addl %edi, %eax # e += (c ^ d ^ b)
movl %ebx, %esi #
roll $5, %esi # rotl32(a,5)
@@ -1286,4 +1386,28 @@ sha1_process_block64:
ret
.size sha1_process_block64, .-sha1_process_block64
+
+ .section .rodata.cst16.sha1const, "aM", @progbits, 16
+ .align 16
+rconst0x5A827999:
+ .long 0x5A827999
+ .long 0x5A827999
+ .long 0x5A827999
+ .long 0x5A827999
+rconst0x6ED9EBA1:
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+rconst0x8F1BBCDC:
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+rconst0xCA62C1D6:
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+
#endif
diff --git a/libbb/hash_md5_sha_x86-64.S.sh b/libbb/hash_md5_sha_x86-64.S.sh
index 901896e6e..87c2d0800 100755
--- a/libbb/hash_md5_sha_x86-64.S.sh
+++ b/libbb/hash_md5_sha_x86-64.S.sh
@@ -6,33 +6,103 @@
# also contains the diff of the generated file.
exec >hash_md5_sha_x86-64.S
-# There is a way to use XMM registers (which always exist for x86-64!) for W[]
-# For example, if we load W as follows:
-# %xmm0: w[0x0] w[0x1] w[0x2] w[0x3]
-# %xmm4: w[0x4] w[0x5] w[0x6] w[0x7]
-# %xmm8: w[0x8] w[0x9] w[0xa] w[0xb]
-# %xmm12: w[0xc] w[0xd] w[0xe] w[0xf]
-# then the xor'ing operation to generate next W[0..3] is:
-# movaps %xmm0, %xmmT2
-# palignr $0x8, %xmm4, %xmmT2 # form (w[0x2],w[0x3],w[0x4],w[0x5])
-# # Right-shifts xmm4:xmmT2 by 8 bytes. Writes shifted result to xmmT2. SSSE3 insn.
-# movaps %xmm0, %xmmT13
-# palignr $0x4,%xmm0,%xmmT13 # form (w[0xd],w[0xe],w[0xf],w[0x0])
-# xmm0 = xmm0 ^ t2 ^ xmm8 ^ t13
-# xmm0 = rol32(xmm0,1) # no such insn, have to use pslld+psrld+or
-# and then results can be extracted for use:
-# movd %xmm0, %esi # new W[0]
-# pextrd $1, %xmm0, %esi # new W[1]
-# # SSE4.1 insn. Can use EXTRACTPS (also SSE4.1)
-# pextrd $2, %xmm0, %esi # new W[2]
-# pextrd $3, %xmm0, %esi # new W[3]
-# ... but this requires SSE4.1 and SSSE3, which are not universally available on x86-64.
+# Based on http://arctic.org/~dean/crypto/sha1.html.
+# ("This SHA1 implementation is public domain.")
+#
+# x86-64 has at least SSE2 vector insns always available.
+# We can use them without any CPUID checks (and without a need
+# for a fallback code if needed insns are not available).
+# This code uses them to calculate W[] ahead of time.
+#
+# Unfortunately, results are passed from vector unit to
+# integer ALUs on the stack. MOVD/Q insns to move them directly
+# from vector to integer registers are slower than store-to-load
+# forwarding in LSU (on Skylake at least).
+#
+# The win against a purely integer code is small on Skylake,
+# only about 7-8%. We offload about 1/3 of our operations to the vector unit.
+# It can do 4 ops at once in one 128-bit register,
+# but we have to use x2 of them because of W[0] complication,
+# SSE2 has no "rotate each word by N bits" insns,
+# moving data to/from vector unit is clunky, and Skylake
+# has four integer ALUs unified with three vector ALUs,
+# which makes pure integer code rather fast, and makes
+# vector ops compete with integer ones.
+#
+# Zen3, with its separate vector ALUs, wins more, about 12%.
+
+xmmT1="%xmm4"
+xmmT2="%xmm5"
+xmmRCONST="%xmm6"
+T=`printf '\t'`
+
+# SSE instructions are longer than 4 bytes on average.
+# Intel CPUs (up to Tiger Lake at least) can't decode
+# more than 16 bytes of code in one cycle.
+# By interleaving SSE code and integer code
+# we mostly achieve a situation where 16-byte decode fetch window
+# contains 4 (or more) insns.
+#
+# However. On Skylake, there was no observed difference,
+# but on Zen3, non-interleaved code is ~3% faster
+# (822 Mb/s versus 795 Mb/s hashing speed).
+# Off for now:
+interleave=false
+
+INTERLEAVE() {
+ $interleave || \
+ {
+ # Generate non-interleaved code
+ # (it should work correctly too)
+ echo "$1"
+ echo "$2"
+ return
+ }
+ (
+ echo "$1" | grep -v '^$' >"$0.temp1"
+ echo "$2" | grep -v '^$' >"$0.temp2"
+ exec 3<"$0.temp1"
+ exec 4<"$0.temp2"
+ IFS=''
+ while :; do
+ line1=''
+ line2=''
+ while :; do
+ read -r line1 <&3
+ if test "${line1:0:1}" != "#" && test "${line1:0:2}" != "$T#"; then
+ break
+ fi
+ echo "$line1"
+ done
+ while :; do
+ read -r line2 <&4
+ if test "${line2:0:4}" = "${T}lea"; then
+ # We use 7-8 byte long forms of LEA.
+ # Do not interleave them with SSE insns
+ # which are also long.
+ echo "$line2"
+ read -r line2 <&4
+ echo "$line2"
+ continue
+ fi
+ if test "${line2:0:1}" != "#" && test "${line2:0:2}" != "$T#"; then
+ break
+ fi
+ echo "$line2"
+ done
+ test "$line1$line2" || break
+ echo "$line1"
+ echo "$line2"
+ done
+ rm "$0.temp1" "$0.temp2"
+ )
+}
echo \
-'### Generated by hash_md5_sha_x86-64.S.sh ###
+"### Generated by hash_md5_sha_x86-64.S.sh ###
#if CONFIG_SHA1_SMALL == 0 && defined(__GNUC__) && defined(__x86_64__)
- .section .text.sha1_process_block64,"ax",@progbits
+ .section .text.sha1_process_block64,\"ax\",@progbits
.globl sha1_process_block64
.hidden sha1_process_block64
.type sha1_process_block64, @function
@@ -51,16 +121,10 @@ sha1_process_block64:
# eax..edx: a..d
# ebp: e
# esi,edi: temps
-# -32+4*n(%rsp),r8...r15: W[0..7,8..15]
-# (TODO: actually W[0..7] are used a bit more often, put _them_ into r8..r15?)
- movl $3, %eax
-1:
- movq (%rdi,%rax,8), %rsi
- bswapq %rsi
- rolq $32, %rsi
- movq %rsi, -32(%rsp,%rax,8)
- decl %eax
- jns 1b
+# xmm0..xmm3: W[]
+# xmm4,xmm5: temps
+# xmm6: current round constant
+# -64(%rsp): area for passing RCONST + W[] from vector to integer units
movl 80(%rdi), %eax # a = ctx->hash[0]
movl 84(%rdi), %ebx # b = ctx->hash[1]
@@ -68,32 +132,120 @@ sha1_process_block64:
movl 92(%rdi), %edx # d = ctx->hash[3]
movl 96(%rdi), %ebp # e = ctx->hash[4]
+ movaps rconst0x5A827999(%rip), $xmmRCONST
+
+ # For round 1, steps 0 and 8..15, we pass W[0,8..15] in esi,r8..r15
+ # instead of spilling them to stack.
+ # (We lose parallelized addition of RCONST, but LEA
+ # can do two additions at once, so...)
+ movq 4*0(%rdi), %rsi
+ movq 4*2(%rdi), %r10
+ bswapq %rsi
+ bswapq %r10
+ rolq \$32, %rsi # rsi = W[1]:W[0]
+ rolq \$32, %r10
+ movq %rsi, %xmm0
+ movq %r10, $xmmT1
+ punpcklqdq $xmmT1, %xmm0 # xmm0 = r10:rsi = (W[0],W[1],W[2],W[3])
+ movaps %xmm0, $xmmT1
+ paddd $xmmRCONST, $xmmT1
+ movups $xmmT1, -64+4*0(%rsp)
+
+ movq 4*4(%rdi), %r8
+ movq 4*6(%rdi), %r10
+ bswapq %r8
+ bswapq %r10
+ rolq \$32, %r8
+ rolq \$32, %r10
+ movq %r8, %xmm1
+ movq %r10, $xmmT1
+ punpcklqdq $xmmT1, %xmm1 # xmm1 = r10:r8 = (W[4],W[5],W[6],W[7])
+ movaps %xmm1, $xmmT1
+ paddd $xmmRCONST, $xmmT1
+ movups $xmmT1, -64+4*4(%rsp)
+
movq 4*8(%rdi), %r8
movq 4*10(%rdi), %r10
bswapq %r8
bswapq %r10
+ movl %r8d, %r9d # r9d = W[9]
+ rolq \$32, %r8 # r8 = W[9]:W[8]
+ movl %r10d, %r11d # r11d = W[11]
+ rolq \$32, %r10 # r10 = W[11]:W[10]
+ movq %r8, %xmm2
+ movq %r10, $xmmT1
+ punpcklqdq $xmmT1, %xmm2 # xmm2 = r10:r8 = (W[8],W[9],W[10],W[11])
+
movq 4*12(%rdi), %r12
movq 4*14(%rdi), %r14
bswapq %r12
bswapq %r14
- movl %r8d, %r9d
- shrq $32, %r8
- movl %r10d, %r11d
- shrq $32, %r10
- movl %r12d, %r13d
- shrq $32, %r12
- movl %r14d, %r15d
- shrq $32, %r14
-'
-W32() {
-test "$1" || exit 1
-test "$1" -lt 0 && exit 1
-test "$1" -gt 15 && exit 1
-test "$1" -lt 8 && echo "-32+4*$1(%rsp)"
-test "$1" -ge 8 && echo "%r${1}d"
+ movl %r12d, %r13d # r13d = W[13]
+ rolq \$32, %r12 # r12 = W[13]:W[12]
+ movl %r14d, %r15d # r15d = W[15]
+ rolq \$32, %r14 # r14 = W[15]:W[14]
+ movq %r12, %xmm3
+ movq %r14, $xmmT1
+ punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r12 = (W[12],W[13],W[14],W[15])
+"
+
+PREP() {
+local xmmW0=$1
+local xmmW4=$2
+local xmmW8=$3
+local xmmW12=$4
+# the above must be %xmm0..3 in some permutation
+local dstmem=$5
+#W[0] = rol(W[13] ^ W[8] ^ W[2] ^ W[0], 1);
+#W[1] = rol(W[14] ^ W[9] ^ W[3] ^ W[1], 1);
+#W[2] = rol(W[15] ^ W[10] ^ W[4] ^ W[2], 1);
+#W[3] = rol( 0 ^ W[11] ^ W[5] ^ W[3], 1);
+#W[3] ^= rol(W[0], 1);
+echo "# PREP $@
+ movaps $xmmW12, $xmmT1
+ psrldq \$4, $xmmT1 # rshift by 4 bytes: T1 = ([13],[14],[15],0)
+
+ pshufd \$0x4e, $xmmW0, $xmmT2 # 01001110=2,3,0,1 shuffle, ([2],[3],x,x)
+ punpcklqdq $xmmW4, $xmmT2 # T2 = W4[0..63]:T2[0..63] = ([2],[3],[4],[5])
+
+ xorps $xmmW8, $xmmW0 # ([8],[9],[10],[11]) ^ ([0],[1],[2],[3])
+ xorps $xmmT1, $xmmT2 # ([13],[14],[15],0) ^ ([2],[3],[4],[5])
+ xorps $xmmT2, $xmmW0 # ^
+ # W0 = unrotated (W[0]..W[3]), still needs W[3] fixup
+ movaps $xmmW0, $xmmT2
+
+ xorps $xmmT1, $xmmT1 # rol(W0,1):
+ pcmpgtd $xmmW0, $xmmT1 # ffffffff for elements <0 (ones with msb bit 1)
+ paddd $xmmW0, $xmmW0 # shift left by 1
+ psubd $xmmT1, $xmmW0 # add 1 to those who had msb bit 1
+ # W0 = rotated (W[0]..W[3]), still needs W[3] fixup
+
+ pslldq \$12, $xmmT2 # lshift by 12 bytes: T2 = (0,0,0,unrotW[0])
+ movaps $xmmT2, $xmmT1
+ pslld \$2, $xmmT2
+ psrld \$30, $xmmT1
+# xorps $xmmT1, $xmmT2 # rol((0,0,0,unrotW[0]),2)
+ xorps $xmmT1, $xmmW0 # same result, but does not depend on/does not modify T2
+
+ xorps $xmmT2, $xmmW0 # W0 = rol(W[0]..W[3],1) ^ (0,0,0,rol(unrotW[0],2))
+"
+# movq $xmmW0, %r8 # high latency (~6 cycles)
+# movaps $xmmW0, $xmmT1
+# psrldq \$8, $xmmT1 # rshift by 8 bytes: move upper 64 bits to lower
+# movq $xmmT1, %r10 # high latency
+# movq %r8, %r9
+# movq %r10, %r11
+# shrq \$32, %r9
+# shrq \$32, %r11
+# ^^^ slower than passing the results on stack (!!!)
+echo "
+ movaps $xmmW0, $xmmT2
+ paddd $xmmRCONST, $xmmT2
+ movups $xmmT2, $dstmem
+"
}
-# It's possible to interleave insns in rounds to mostly eliminate
+# It's possible to interleave integer insns in rounds to mostly eliminate
# dependency chains, but this likely to only help old Pentium-based
# CPUs (ones without OOO, which can only simultaneously execute a pair
# of _adjacent_ insns).
@@ -107,21 +259,16 @@ local n0=$(((n+0) & 15))
echo "
# $n
";test $n0 = 0 && echo "
- # W[0], already in %esi
+ leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n]
";test $n0 != 0 && test $n0 -lt 8 && echo "
- movl `W32 $n0`, %esi # W[n]
+ addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n]
";test $n0 -ge 8 && echo "
- # W[n], in %r$n0
+ leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n]
";echo "
movl %e$c, %edi # c
xorl %e$d, %edi # ^d
andl %e$b, %edi # &b
xorl %e$d, %edi # (((c ^ d) & b) ^ d)
-";test $n0 -lt 8 && echo "
- leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n]
-";test $n0 -ge 8 && echo "
- leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n]
-";echo "
addl %edi, %e$e # e += (((c ^ d) & b) ^ d)
movl %e$a, %esi #
roll \$5, %esi # rotl32(a,5)
@@ -138,28 +285,11 @@ local n2=$(((n+2) & 15))
local n0=$(((n+0) & 15))
echo "
# $n
-";test $n0 -lt 8 && echo "
- movl `W32 $n13`, %esi # W[(n+13) & 15]
- xorl `W32 $n8`, %esi # ^W[(n+8) & 15]
- xorl `W32 $n2`, %esi # ^W[(n+2) & 15]
- xorl `W32 $n0`, %esi # ^W[n & 15]
- roll %esi #
- movl %esi, `W32 $n0` # store to W[n & 15]
-";test $n0 -ge 8 && echo "
- xorl `W32 $n13`, `W32 $n0` # W[n & 15] ^= W[(n+13) & 15]
- xorl `W32 $n8`, `W32 $n0` # ^W[(n+8) & 15]
- xorl `W32 $n2`, `W32 $n0` # ^W[(n+2) & 15]
- roll `W32 $n0` #
-";echo "
movl %e$c, %edi # c
xorl %e$d, %edi # ^d
andl %e$b, %edi # &b
xorl %e$d, %edi # (((c ^ d) & b) ^ d)
-";test $n0 -lt 8 && echo "
- leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n & 15]
-";test $n0 -ge 8 && echo "
- leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n & 15]
-";echo "
+ addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n & 15]
addl %edi, %e$e # e += (((c ^ d) & b) ^ d)
movl %e$a, %esi #
roll \$5, %esi # rotl32(a,5)
@@ -167,13 +297,6 @@ echo "
rorl \$2, %e$b # b = rotl32(b,30)
"
}
-{
-RCONST=0x5A827999
-RD1A ax bx cx dx bp 0; RD1A bp ax bx cx dx 1; RD1A dx bp ax bx cx 2; RD1A cx dx bp ax bx 3; RD1A bx cx dx bp ax 4
-RD1A ax bx cx dx bp 5; RD1A bp ax bx cx dx 6; RD1A dx bp ax bx cx 7; RD1A cx dx bp ax bx 8; RD1A bx cx dx bp ax 9
-RD1A ax bx cx dx bp 10; RD1A bp ax bx cx dx 11; RD1A dx bp ax bx cx 12; RD1A cx dx bp ax bx 13; RD1A bx cx dx bp ax 14
-RD1A ax bx cx dx bp 15; RD1B bp ax bx cx dx 16; RD1B dx bp ax bx cx 17; RD1B cx dx bp ax bx 18; RD1B bx cx dx bp ax 19
-} | grep -v '^$'
RD2() {
local a=$1;local b=$2;local c=$3;local d=$4;local e=$5
@@ -184,27 +307,10 @@ local n2=$(((n+2) & 15))
local n0=$(((n+0) & 15))
echo "
# $n
-";test $n0 -lt 8 && echo "
- movl `W32 $n13`, %esi # W[(n+13) & 15]
- xorl `W32 $n8`, %esi # ^W[(n+8) & 15]
- xorl `W32 $n2`, %esi # ^W[(n+2) & 15]
- xorl `W32 $n0`, %esi # ^W[n & 15]
- roll %esi #
- movl %esi, `W32 $n0` # store to W[n & 15]
-";test $n0 -ge 8 && echo "
- xorl `W32 $n13`, `W32 $n0` # W[n & 15] ^= W[(n+13) & 15]
- xorl `W32 $n8`, `W32 $n0` # ^W[(n+8) & 15]
- xorl `W32 $n2`, `W32 $n0` # ^W[(n+2) & 15]
- roll `W32 $n0` #
-";echo "
movl %e$c, %edi # c
xorl %e$d, %edi # ^d
xorl %e$b, %edi # ^b
-";test $n0 -lt 8 && echo "
- leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n & 15]
-";test $n0 -ge 8 && echo "
- leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n & 15]
-";echo "
+ addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n & 15]
addl %edi, %e$e # e += (c ^ d ^ b)
movl %e$a, %esi #
roll \$5, %esi # rotl32(a,5)
@@ -212,13 +318,6 @@ echo "
rorl \$2, %e$b # b = rotl32(b,30)
"
}
-{
-RCONST=0x6ED9EBA1
-RD2 ax bx cx dx bp 20; RD2 bp ax bx cx dx 21; RD2 dx bp ax bx cx 22; RD2 cx dx bp ax bx 23; RD2 bx cx dx bp ax 24
-RD2 ax bx cx dx bp 25; RD2 bp ax bx cx dx 26; RD2 dx bp ax bx cx 27; RD2 cx dx bp ax bx 28; RD2 bx cx dx bp ax 29
-RD2 ax bx cx dx bp 30; RD2 bp ax bx cx dx 31; RD2 dx bp ax bx cx 32; RD2 cx dx bp ax bx 33; RD2 bx cx dx bp ax 34
-RD2 ax bx cx dx bp 35; RD2 bp ax bx cx dx 36; RD2 dx bp ax bx cx 37; RD2 cx dx bp ax bx 38; RD2 bx cx dx bp ax 39
-} | grep -v '^$'
RD3() {
local a=$1;local b=$2;local c=$3;local d=$4;local e=$5
@@ -235,53 +334,82 @@ echo "
andl %e$c, %esi # si: b & c
andl %e$d, %edi # di: (b | c) & d
orl %esi, %edi # ((b | c) & d) | (b & c)
-";test $n0 -lt 8 && echo "
- movl `W32 $n13`, %esi # W[(n+13) & 15]
- xorl `W32 $n8`, %esi # ^W[(n+8) & 15]
- xorl `W32 $n2`, %esi # ^W[(n+2) & 15]
- xorl `W32 $n0`, %esi # ^W[n & 15]
- roll %esi #
- movl %esi, `W32 $n0` # store to W[n & 15]
-";test $n0 -ge 8 && echo "
- xorl `W32 $n13`, `W32 $n0` # W[n & 15] ^= W[(n+13) & 15]
- xorl `W32 $n8`, `W32 $n0` # ^W[(n+8) & 15]
- xorl `W32 $n2`, `W32 $n0` # ^W[(n+2) & 15]
- roll `W32 $n0` #
-";echo "
addl %edi, %e$e # += ((b | c) & d) | (b & c)
-";test $n0 -lt 8 && echo "
- leal $RCONST(%r$e,%rsi), %e$e # e += RCONST + W[n & 15]
-";test $n0 -ge 8 && echo "
- leal $RCONST(%r$e,%r$n0), %e$e # e += RCONST + W[n & 15]
-";echo "
+ addl -64+4*$n0(%rsp), %e$e # e += RCONST + W[n & 15]
movl %e$a, %esi #
roll \$5, %esi # rotl32(a,5)
addl %esi, %e$e # e += rotl32(a,5)
rorl \$2, %e$b # b = rotl32(b,30)
"
}
+
{
-#RCONST=0x8F1BBCDC "out of range for signed 32bit displacement"
-RCONST=-0x70E44324
-RD3 ax bx cx dx bp 40; RD3 bp ax bx cx dx 41; RD3 dx bp ax bx cx 42; RD3 cx dx bp ax bx 43; RD3 bx cx dx bp ax 44
-RD3 ax bx cx dx bp 45; RD3 bp ax bx cx dx 46; RD3 dx bp ax bx cx 47; RD3 cx dx bp ax bx 48; RD3 bx cx dx bp ax 49
-RD3 ax bx cx dx bp 50; RD3 bp ax bx cx dx 51; RD3 dx bp ax bx cx 52; RD3 cx dx bp ax bx 53; RD3 bx cx dx bp ax 54
-RD3 ax bx cx dx bp 55; RD3 bp ax bx cx dx 56; RD3 dx bp ax bx cx 57; RD3 cx dx bp ax bx 58; RD3 bx cx dx bp ax 59
-} | grep -v '^$'
+# Round 1
+RCONST=0x5A827999
+RD1A ax bx cx dx bp 0; RD1A bp ax bx cx dx 1; RD1A dx bp ax bx cx 2; RD1A cx dx bp ax bx 3;
+RD1A bx cx dx bp ax 4; RD1A ax bx cx dx bp 5; RD1A bp ax bx cx dx 6; RD1A dx bp ax bx cx 7;
+a=`PREP %xmm0 %xmm1 %xmm2 %xmm3 "-64+16*0(%rsp)"`
+b=`RD1A cx dx bp ax bx 8; RD1A bx cx dx bp ax 9; RD1A ax bx cx dx bp 10; RD1A bp ax bx cx dx 11;`
+INTERLEAVE "$a" "$b"
+a=`echo " movaps rconst0x6ED9EBA1(%rip), $xmmRCONST"
+ PREP %xmm1 %xmm2 %xmm3 %xmm0 "-64+16*1(%rsp)"`
+b=`RD1A dx bp ax bx cx 12; RD1A cx dx bp ax bx 13; RD1A bx cx dx bp ax 14; RD1A ax bx cx dx bp 15;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm2 %xmm3 %xmm0 %xmm1 "-64+16*2(%rsp)"`
+b=`RD1B bp ax bx cx dx 16; RD1B dx bp ax bx cx 17; RD1B cx dx bp ax bx 18; RD1B bx cx dx bp ax 19;`
+INTERLEAVE "$a" "$b"
+
+# Round 2
+RCONST=0x6ED9EBA1
+a=`PREP %xmm3 %xmm0 %xmm1 %xmm2 "-64+16*3(%rsp)"`
+b=`RD2 ax bx cx dx bp 20; RD2 bp ax bx cx dx 21; RD2 dx bp ax bx cx 22; RD2 cx dx bp ax bx 23;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm0 %xmm1 %xmm2 %xmm3 "-64+16*0(%rsp)"`
+b=`RD2 bx cx dx bp ax 24; RD2 ax bx cx dx bp 25; RD2 bp ax bx cx dx 26; RD2 dx bp ax bx cx 27;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm1 %xmm2 %xmm3 %xmm0 "-64+16*1(%rsp)"`
+b=`RD2 cx dx bp ax bx 28; RD2 bx cx dx bp ax 29; RD2 ax bx cx dx bp 30; RD2 bp ax bx cx dx 31;`
+INTERLEAVE "$a" "$b"
+a=`echo " movaps rconst0x8F1BBCDC(%rip), $xmmRCONST"
+ PREP %xmm2 %xmm3 %xmm0 %xmm1 "-64+16*2(%rsp)"`
+b=`RD2 dx bp ax bx cx 32; RD2 cx dx bp ax bx 33; RD2 bx cx dx bp ax 34; RD2 ax bx cx dx bp 35;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm3 %xmm0 %xmm1 %xmm2 "-64+16*3(%rsp)"`
+b=`RD2 bp ax bx cx dx 36; RD2 dx bp ax bx cx 37; RD2 cx dx bp ax bx 38; RD2 bx cx dx bp ax 39;`
+INTERLEAVE "$a" "$b"
+
+# Round 3
+RCONST=0x8F1BBCDC
+a=`PREP %xmm0 %xmm1 %xmm2 %xmm3 "-64+16*0(%rsp)"`
+b=`RD3 ax bx cx dx bp 40; RD3 bp ax bx cx dx 41; RD3 dx bp ax bx cx 42; RD3 cx dx bp ax bx 43;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm1 %xmm2 %xmm3 %xmm0 "-64+16*1(%rsp)"`
+b=`RD3 bx cx dx bp ax 44; RD3 ax bx cx dx bp 45; RD3 bp ax bx cx dx 46; RD3 dx bp ax bx cx 47;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm2 %xmm3 %xmm0 %xmm1 "-64+16*2(%rsp)"`
+b=`RD3 cx dx bp ax bx 48; RD3 bx cx dx bp ax 49; RD3 ax bx cx dx bp 50; RD3 bp ax bx cx dx 51;`
+INTERLEAVE "$a" "$b"
+a=`echo " movaps rconst0xCA62C1D6(%rip), $xmmRCONST"
+ PREP %xmm3 %xmm0 %xmm1 %xmm2 "-64+16*3(%rsp)"`
+b=`RD3 dx bp ax bx cx 52; RD3 cx dx bp ax bx 53; RD3 bx cx dx bp ax 54; RD3 ax bx cx dx bp 55;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm0 %xmm1 %xmm2 %xmm3 "-64+16*0(%rsp)"`
+b=`RD3 bp ax bx cx dx 56; RD3 dx bp ax bx cx 57; RD3 cx dx bp ax bx 58; RD3 bx cx dx bp ax 59;`
+INTERLEAVE "$a" "$b"
# Round 4 has the same logic as round 2, only n and RCONST are different
-{
-#RCONST=0xCA62C1D6 "out of range for signed 32bit displacement"
-RCONST=-0x359D3E2A
-RD2 ax bx cx dx bp 60; RD2 bp ax bx cx dx 61; RD2 dx bp ax bx cx 62; RD2 cx dx bp ax bx 63; RD2 bx cx dx bp ax 64
-RD2 ax bx cx dx bp 65; RD2 bp ax bx cx dx 66; RD2 dx bp ax bx cx 67; RD2 cx dx bp ax bx 68; RD2 bx cx dx bp ax 69
-RD2 ax bx cx dx bp 70; RD2 bp ax bx cx dx 71; RD2 dx bp ax bx cx 72; RD2 cx dx bp ax bx 73; RD2 bx cx dx bp ax 74
-RD2 ax bx cx dx bp 75; RD2 bp ax bx cx dx 76; RD2 dx bp ax bx cx 77; RD2 cx dx bp ax bx 78; RD2 bx cx dx bp ax 79
-# Note: new W[n&15] values generated in last 3 iterations
-# (W[13,14,15]) are unused after each of these iterations.
-# Since we use r8..r15 for W[8..15], this does not matter.
-# If we switch to e.g. using r8..r15 for W[0..7], then saving of W[13,14,15]
-# (the "movl %esi, `W32 $n0`" insn) is a dead store and can be removed.
+RCONST=0xCA62C1D6
+a=`PREP %xmm1 %xmm2 %xmm3 %xmm0 "-64+16*1(%rsp)"`
+b=`RD2 ax bx cx dx bp 60; RD2 bp ax bx cx dx 61; RD2 dx bp ax bx cx 62; RD2 cx dx bp ax bx 63;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm2 %xmm3 %xmm0 %xmm1 "-64+16*2(%rsp)"`
+b=`RD2 bx cx dx bp ax 64; RD2 ax bx cx dx bp 65; RD2 bp ax bx cx dx 66; RD2 dx bp ax bx cx 67;`
+INTERLEAVE "$a" "$b"
+a=`PREP %xmm3 %xmm0 %xmm1 %xmm2 "-64+16*3(%rsp)"`
+b=`RD2 cx dx bp ax bx 68; RD2 bx cx dx bp ax 69; RD2 ax bx cx dx bp 70; RD2 bp ax bx cx dx 71;`
+INTERLEAVE "$a" "$b"
+RD2 dx bp ax bx cx 72; RD2 cx dx bp ax bx 73; RD2 bx cx dx bp ax 74; RD2 ax bx cx dx bp 75;
+RD2 bp ax bx cx dx 76; RD2 dx bp ax bx cx 77; RD2 cx dx bp ax bx 78; RD2 bx cx dx bp ax 79;
} | grep -v '^$'
echo "
@@ -300,4 +428,28 @@ echo "
ret
.size sha1_process_block64, .-sha1_process_block64
+
+ .section .rodata.cst16.sha1const, \"aM\", @progbits, 16
+ .align 16
+rconst0x5A827999:
+ .long 0x5A827999
+ .long 0x5A827999
+ .long 0x5A827999
+ .long 0x5A827999
+rconst0x6ED9EBA1:
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+ .long 0x6ED9EBA1
+rconst0x8F1BBCDC:
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+ .long 0x8F1BBCDC
+rconst0xCA62C1D6:
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+ .long 0xCA62C1D6
+
#endif"