summaryrefslogtreecommitdiff
path: root/lib/accelerated/x86/macosx/aesni-x86_64.s
diff options
context:
space:
mode:
Diffstat (limited to 'lib/accelerated/x86/macosx/aesni-x86_64.s')
-rw-r--r--lib/accelerated/x86/macosx/aesni-x86_64.s1793
1 files changed, 1240 insertions, 553 deletions
diff --git a/lib/accelerated/x86/macosx/aesni-x86_64.s b/lib/accelerated/x86/macosx/aesni-x86_64.s
index 420fd5a7b6..f0a5606348 100644
--- a/lib/accelerated/x86/macosx/aesni-x86_64.s
+++ b/lib/accelerated/x86/macosx/aesni-x86_64.s
@@ -38,6 +38,7 @@
# *** This file is auto-generated ***
#
.text
+
.globl _aesni_encrypt
.p2align 4
@@ -53,9 +54,12 @@ L$oop_enc1_1:
decl %eax
movups (%rdx),%xmm1
leaq 16(%rdx),%rdx
- jnz L$oop_enc1_1
+ jnz L$oop_enc1_1
.byte 102,15,56,221,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
.byte 0xf3,0xc3
@@ -74,34 +78,96 @@ L$oop_dec1_2:
decl %eax
movups (%rdx),%xmm1
leaq 16(%rdx),%rdx
- jnz L$oop_dec1_2
+ jnz L$oop_dec1_2
.byte 102,15,56,223,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ .byte 0xf3,0xc3
+
+
+.p2align 4
+_aesni_encrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+ xorps %xmm0,%xmm2
+ xorps %xmm0,%xmm3
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
+
+L$enc_loop2:
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
+.byte 102,15,56,220,208
+.byte 102,15,56,220,216
+ movups -16(%rcx,%rax,1),%xmm0
+ jnz L$enc_loop2
+
+.byte 102,15,56,220,209
+.byte 102,15,56,220,217
+.byte 102,15,56,221,208
+.byte 102,15,56,221,216
+ .byte 0xf3,0xc3
+
+
+.p2align 4
+_aesni_decrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+ xorps %xmm0,%xmm2
+ xorps %xmm0,%xmm3
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
+
+L$dec_loop2:
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
+.byte 102,15,56,222,208
+.byte 102,15,56,222,216
+ movups -16(%rcx,%rax,1),%xmm0
+ jnz L$dec_loop2
+
+.byte 102,15,56,222,209
+.byte 102,15,56,222,217
+.byte 102,15,56,223,208
+.byte 102,15,56,223,216
.byte 0xf3,0xc3
.p2align 4
_aesni_encrypt3:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
L$enc_loop3:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop3
.byte 102,15,56,220,209
@@ -116,25 +182,26 @@ L$enc_loop3:
.p2align 4
_aesni_decrypt3:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+ addq $16,%rax
L$dec_loop3:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$dec_loop3
.byte 102,15,56,222,209
@@ -149,28 +216,30 @@ L$dec_loop3:
.p2align 4
_aesni_encrypt4:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 0x0f,0x1f,0x00
+ addq $16,%rax
L$enc_loop4:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
.byte 102,15,56,220,233
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop4
.byte 102,15,56,220,209
@@ -187,28 +256,30 @@ L$enc_loop4:
.p2align 4
_aesni_decrypt4:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
xorps %xmm0,%xmm4
xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
+ movups 32(%rcx),%xmm0
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 0x0f,0x1f,0x00
+ addq $16,%rax
L$dec_loop4:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
.byte 102,15,56,222,233
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$dec_loop4
.byte 102,15,56,222,209
@@ -225,43 +296,40 @@ L$dec_loop4:
.p2align 4
_aesni_encrypt6:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
pxor %xmm0,%xmm4
+.byte 102,15,56,220,209
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
.byte 102,15,56,220,217
pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
+.byte 102,15,56,220,225
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
- movups (%rcx),%xmm0
-.byte 102,15,56,220,249
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
jmp L$enc_loop6_enter
.p2align 4
L$enc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
+L$enc_loop6_enter:
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-L$enc_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop6
.byte 102,15,56,220,209
@@ -282,43 +350,40 @@ L$enc_loop6_enter:
.p2align 4
_aesni_decrypt6:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
pxor %xmm0,%xmm3
-.byte 102,15,56,222,209
pxor %xmm0,%xmm4
+.byte 102,15,56,222,209
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
.byte 102,15,56,222,217
pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
+.byte 102,15,56,222,225
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
- movups (%rcx),%xmm0
-.byte 102,15,56,222,249
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
jmp L$dec_loop6_enter
.p2align 4
L$dec_loop6:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
+L$dec_loop6_enter:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
-L$dec_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$dec_loop6
.byte 102,15,56,222,209
@@ -339,52 +404,46 @@ L$dec_loop6_enter:
.p2align 4
_aesni_encrypt8:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
-.byte 102,15,56,220,209
pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 102,15,56,220,209
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
pxor %xmm0,%xmm8
-.byte 102,15,56,220,249
+.byte 102,15,56,220,217
pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,220,193
-.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
- jmp L$enc_loop8_enter
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
+ jmp L$enc_loop8_inner
.p2align 4
L$enc_loop8:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
+L$enc_loop8_inner:
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
L$enc_loop8_enter:
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$enc_loop8
.byte 102,15,56,220,209
@@ -409,52 +468,46 @@ L$enc_loop8_enter:
.p2align 4
_aesni_decrypt8:
movups (%rcx),%xmm0
- shrl $1,%eax
+ shll $4,%eax
movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
xorps %xmm0,%xmm3
-.byte 102,15,56,222,209
pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
+ leaq 32(%rcx,%rax,1),%rcx
+ negq %rax
+.byte 102,15,56,222,209
pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
pxor %xmm0,%xmm8
-.byte 102,15,56,222,249
+.byte 102,15,56,222,217
pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
- jmp L$dec_loop8_enter
+ movups (%rcx,%rax,1),%xmm0
+ addq $16,%rax
+ jmp L$dec_loop8_inner
.p2align 4
L$dec_loop8:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
+L$dec_loop8_inner:
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
L$dec_loop8_enter:
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
- movups (%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$dec_loop8
.byte 102,15,56,222,209
@@ -489,7 +542,7 @@ _aesni_ecb_encrypt:
testl %r8d,%r8d
jz L$ecb_decrypt
- cmpq $128,%rdx
+ cmpq $0x80,%rdx
jb L$ecb_enc_tail
movdqu (%rdi),%xmm2
@@ -501,7 +554,7 @@ _aesni_ecb_encrypt:
movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi
- subq $128,%rdx
+ subq $0x80,%rdx
jmp L$ecb_enc_loop8_enter
.p2align 4
L$ecb_enc_loop8:
@@ -529,7 +582,7 @@ L$ecb_enc_loop8_enter:
call _aesni_encrypt8
- subq $128,%rdx
+ subq $0x80,%rdx
jnc L$ecb_enc_loop8
movups %xmm2,(%rsi)
@@ -543,26 +596,27 @@ L$ecb_enc_loop8_enter:
movups %xmm8,96(%rsi)
movups %xmm9,112(%rsi)
leaq 128(%rsi),%rsi
- addq $128,%rdx
+ addq $0x80,%rdx
jz L$ecb_ret
L$ecb_enc_tail:
movups (%rdi),%xmm2
- cmpq $32,%rdx
+ cmpq $0x20,%rdx
jb L$ecb_enc_one
movups 16(%rdi),%xmm3
je L$ecb_enc_two
movups 32(%rdi),%xmm4
- cmpq $64,%rdx
+ cmpq $0x40,%rdx
jb L$ecb_enc_three
movups 48(%rdi),%xmm5
je L$ecb_enc_four
movups 64(%rdi),%xmm6
- cmpq $96,%rdx
+ cmpq $0x60,%rdx
jb L$ecb_enc_five
movups 80(%rdi),%xmm7
je L$ecb_enc_six
movdqu 96(%rdi),%xmm8
+ xorps %xmm9,%xmm9
call _aesni_encrypt8
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
@@ -583,14 +637,13 @@ L$oop_enc1_3:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_enc1_3
+ jnz L$oop_enc1_3
.byte 102,15,56,221,209
movups %xmm2,(%rsi)
jmp L$ecb_ret
.p2align 4
L$ecb_enc_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
+ call _aesni_encrypt2
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
jmp L$ecb_ret
@@ -632,7 +685,7 @@ L$ecb_enc_six:
.p2align 4
L$ecb_decrypt:
- cmpq $128,%rdx
+ cmpq $0x80,%rdx
jb L$ecb_dec_tail
movdqu (%rdi),%xmm2
@@ -644,7 +697,7 @@ L$ecb_decrypt:
movdqu 96(%rdi),%xmm8
movdqu 112(%rdi),%xmm9
leaq 128(%rdi),%rdi
- subq $128,%rdx
+ subq $0x80,%rdx
jmp L$ecb_dec_loop8_enter
.p2align 4
L$ecb_dec_loop8:
@@ -673,49 +726,66 @@ L$ecb_dec_loop8_enter:
call _aesni_decrypt8
movups (%r11),%xmm0
- subq $128,%rdx
+ subq $0x80,%rdx
jnc L$ecb_dec_loop8
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movq %r11,%rcx
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movl %r10d,%eax
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
movups %xmm8,96(%rsi)
+ pxor %xmm8,%xmm8
movups %xmm9,112(%rsi)
+ pxor %xmm9,%xmm9
leaq 128(%rsi),%rsi
- addq $128,%rdx
+ addq $0x80,%rdx
jz L$ecb_ret
L$ecb_dec_tail:
movups (%rdi),%xmm2
- cmpq $32,%rdx
+ cmpq $0x20,%rdx
jb L$ecb_dec_one
movups 16(%rdi),%xmm3
je L$ecb_dec_two
movups 32(%rdi),%xmm4
- cmpq $64,%rdx
+ cmpq $0x40,%rdx
jb L$ecb_dec_three
movups 48(%rdi),%xmm5
je L$ecb_dec_four
movups 64(%rdi),%xmm6
- cmpq $96,%rdx
+ cmpq $0x60,%rdx
jb L$ecb_dec_five
movups 80(%rdi),%xmm7
je L$ecb_dec_six
movups 96(%rdi),%xmm8
movups (%rcx),%xmm0
+ xorps %xmm9,%xmm9
call _aesni_decrypt8
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
movups %xmm8,96(%rsi)
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
jmp L$ecb_ret
.p2align 4
L$ecb_dec_one:
@@ -728,53 +798,76 @@ L$oop_dec1_4:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_dec1_4
+ jnz L$oop_dec1_4
.byte 102,15,56,223,209
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
jmp L$ecb_ret
.p2align 4
L$ecb_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
+ call _aesni_decrypt2
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
jmp L$ecb_ret
.p2align 4
L$ecb_dec_three:
call _aesni_decrypt3
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
jmp L$ecb_ret
.p2align 4
L$ecb_dec_four:
call _aesni_decrypt4
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
jmp L$ecb_ret
.p2align 4
L$ecb_dec_five:
xorps %xmm7,%xmm7
call _aesni_decrypt6
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
jmp L$ecb_ret
.p2align 4
L$ecb_dec_six:
call _aesni_decrypt6
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
movups %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movups %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movups %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
movups %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
movups %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
L$ecb_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
.byte 0xf3,0xc3
.globl _aesni_ccm64_encrypt_blocks
@@ -782,56 +875,62 @@ L$ecb_ret:
.p2align 4
_aesni_ccm64_encrypt_blocks:
movl 240(%rcx),%eax
- movdqu (%r8),%xmm9
- movdqa L$increment64(%rip),%xmm6
+ movdqu (%r8),%xmm6
+ movdqa L$increment64(%rip),%xmm9
movdqa L$bswap_mask(%rip),%xmm7
- shrl $1,%eax
+ shll $4,%eax
+ movl $16,%r10d
leaq 0(%rcx),%r11
movdqu (%r9),%xmm3
- movdqa %xmm9,%xmm2
- movl %eax,%r10d
-.byte 102,68,15,56,0,207
+ movdqa %xmm6,%xmm2
+ leaq 32(%rcx,%rax,1),%rcx
+.byte 102,15,56,0,247
+ subq %rax,%r10
jmp L$ccm64_enc_outer
.p2align 4
L$ccm64_enc_outer:
movups (%r11),%xmm0
- movl %r10d,%eax
+ movq %r10,%rax
movups (%rdi),%xmm8
xorps %xmm0,%xmm2
movups 16(%r11),%xmm1
xorps %xmm8,%xmm0
- leaq 32(%r11),%rcx
xorps %xmm0,%xmm3
- movups (%rcx),%xmm0
+ movups 32(%r11),%xmm0
L$ccm64_enc2_loop:
.byte 102,15,56,220,209
- decl %eax
.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$ccm64_enc2_loop
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
+ decq %rdx
.byte 102,15,56,221,208
.byte 102,15,56,221,216
- decq %rdx
leaq 16(%rdi),%rdi
xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
+ movdqa %xmm6,%xmm2
movups %xmm8,(%rsi)
- leaq 16(%rsi),%rsi
.byte 102,15,56,0,215
+ leaq 16(%rsi),%rsi
jnz L$ccm64_enc_outer
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
movups %xmm3,(%r9)
+ pxor %xmm3,%xmm3
+ pxor %xmm8,%xmm8
+ pxor %xmm6,%xmm6
.byte 0xf3,0xc3
.globl _aesni_ccm64_decrypt_blocks
@@ -839,15 +938,15 @@ L$ccm64_enc2_loop:
.p2align 4
_aesni_ccm64_decrypt_blocks:
movl 240(%rcx),%eax
- movups (%r8),%xmm9
+ movups (%r8),%xmm6
movdqu (%r9),%xmm3
- movdqa L$increment64(%rip),%xmm6
+ movdqa L$increment64(%rip),%xmm9
movdqa L$bswap_mask(%rip),%xmm7
- movaps %xmm9,%xmm2
+ movaps %xmm6,%xmm2
movl %eax,%r10d
movq %rcx,%r11
-.byte 102,68,15,56,0,207
+.byte 102,15,56,0,247
movups (%rcx),%xmm0
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
@@ -857,17 +956,21 @@ L$oop_enc1_5:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_enc1_5
+ jnz L$oop_enc1_5
.byte 102,15,56,221,209
+ shll $4,%r10d
+ movl $16,%eax
movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
leaq 16(%rdi),%rdi
+ subq %r10,%rax
+ leaq 32(%r11,%r10,1),%rcx
+ movq %rax,%r10
jmp L$ccm64_dec_outer
.p2align 4
L$ccm64_dec_outer:
xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
- movl %r10d,%eax
+ movdqa %xmm6,%xmm2
movups %xmm8,(%rsi)
leaq 16(%rsi),%rsi
.byte 102,15,56,0,215
@@ -876,36 +979,36 @@ L$ccm64_dec_outer:
jz L$ccm64_dec_break
movups (%r11),%xmm0
- shrl $1,%eax
+ movq %r10,%rax
movups 16(%r11),%xmm1
xorps %xmm0,%xmm8
- leaq 32(%r11),%rcx
xorps %xmm0,%xmm2
xorps %xmm8,%xmm3
- movups (%rcx),%xmm0
-
+ movups 32(%r11),%xmm0
+ jmp L$ccm64_dec2_loop
+.p2align 4
L$ccm64_dec2_loop:
.byte 102,15,56,220,209
- decl %eax
.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
+ movups (%rcx,%rax,1),%xmm1
+ addq $32,%rax
.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
+ movups -16(%rcx,%rax,1),%xmm0
jnz L$ccm64_dec2_loop
movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
+ paddq %xmm9,%xmm6
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- leaq 16(%rdi),%rdi
.byte 102,15,56,221,208
.byte 102,15,56,221,216
+ leaq 16(%rdi),%rdi
jmp L$ccm64_dec_outer
.p2align 4
L$ccm64_dec_break:
+ movl 240(%r11),%eax
movups (%r11),%xmm0
movups 16(%r11),%xmm1
xorps %xmm0,%xmm8
@@ -916,23 +1019,58 @@ L$oop_enc1_6:
decl %eax
movups (%r11),%xmm1
leaq 16(%r11),%r11
- jnz L$oop_enc1_6
+ jnz L$oop_enc1_6
.byte 102,15,56,221,217
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
movups %xmm3,(%r9)
+ pxor %xmm3,%xmm3
+ pxor %xmm8,%xmm8
+ pxor %xmm6,%xmm6
.byte 0xf3,0xc3
.globl _aesni_ctr32_encrypt_blocks
.p2align 4
_aesni_ctr32_encrypt_blocks:
+ cmpq $1,%rdx
+ jne L$ctr32_bulk
+
+
+
+ movups (%r8),%xmm2
+ movups (%rdi),%xmm3
+ movl 240(%rcx),%edx
+ movups (%rcx),%xmm0
+ movups 16(%rcx),%xmm1
+ leaq 32(%rcx),%rcx
+ xorps %xmm0,%xmm2
+L$oop_enc1_7:
+.byte 102,15,56,220,209
+ decl %edx
+ movups (%rcx),%xmm1
+ leaq 16(%rcx),%rcx
+ jnz L$oop_enc1_7
+.byte 102,15,56,221,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ xorps %xmm3,%xmm2
+ pxor %xmm3,%xmm3
+ movups %xmm2,(%rsi)
+ xorps %xmm2,%xmm2
+ jmp L$ctr32_epilogue
+
+.p2align 4
+L$ctr32_bulk:
leaq (%rsp),%rax
pushq %rbp
subq $128,%rsp
andq $-16,%rsp
leaq -8(%rax),%rbp
- cmpq $1,%rdx
- je L$ctr32_one_shortcut
+
+
movdqu (%r8),%xmm2
movdqu (%rcx),%xmm0
@@ -947,32 +1085,33 @@ _aesni_ctr32_encrypt_blocks:
movdqa %xmm2,64(%rsp)
movdqa %xmm2,80(%rsp)
movdqa %xmm2,96(%rsp)
+ movq %rdx,%r10
movdqa %xmm2,112(%rsp)
- movl 240(%rcx),%eax
-
- leaq 1(%r8),%r9
- leaq 2(%r8),%r10
- bswapl %r9d
- bswapl %r10d
- xorl %r11d,%r9d
- xorl %r11d,%r10d
-.byte 102,65,15,58,34,217,3
- leaq 3(%r8),%r9
+ leaq 1(%r8),%rax
+ leaq 2(%r8),%rdx
+ bswapl %eax
+ bswapl %edx
+ xorl %r11d,%eax
+ xorl %r11d,%edx
+.byte 102,15,58,34,216,3
+ leaq 3(%r8),%rax
movdqa %xmm3,16(%rsp)
-.byte 102,65,15,58,34,226,3
- bswapl %r9d
+.byte 102,15,58,34,226,3
+ bswapl %eax
+ movq %r10,%rdx
leaq 4(%r8),%r10
movdqa %xmm4,32(%rsp)
- xorl %r11d,%r9d
+ xorl %r11d,%eax
bswapl %r10d
-.byte 102,65,15,58,34,233,3
+.byte 102,15,58,34,232,3
xorl %r11d,%r10d
movdqa %xmm5,48(%rsp)
leaq 5(%r8),%r9
movl %r10d,64+12(%rsp)
bswapl %r9d
leaq 6(%r8),%r10
+ movl 240(%rcx),%eax
xorl %r11d,%r9d
bswapl %r10d
movl %r9d,80+12(%rsp)
@@ -980,7 +1119,9 @@ _aesni_ctr32_encrypt_blocks:
leaq 7(%r8),%r9
movl %r10d,96+12(%rsp)
bswapl %r9d
+ movl __gnutls_x86_cpuid_s+4(%rip),%r10d
xorl %r11d,%r9d
+ andl $71303168,%r10d
movl %r9d,112+12(%rsp)
movups 16(%rcx),%xmm1
@@ -991,10 +1132,104 @@ _aesni_ctr32_encrypt_blocks:
cmpq $8,%rdx
jb L$ctr32_tail
+ subq $6,%rdx
+ cmpl $4194304,%r10d
+ je L$ctr32_6x
+
leaq 128(%rcx),%rcx
- subq $8,%rdx
+ subq $2,%rdx
jmp L$ctr32_loop8
+.p2align 4
+L$ctr32_6x:
+ shll $4,%eax
+ movl $48,%r10d
+ bswapl %r11d
+ leaq 32(%rcx,%rax,1),%rcx
+ subq %rax,%r10
+ jmp L$ctr32_loop6
+
+.p2align 4
+L$ctr32_loop6:
+ addl $6,%r8d
+ movups -48(%rcx,%r10,1),%xmm0
+.byte 102,15,56,220,209
+ movl %r8d,%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,217
+.byte 0x0f,0x38,0xf1,0x44,0x24,12
+ leal 1(%r8),%eax
+.byte 102,15,56,220,225
+ xorl %r11d,%eax
+.byte 0x0f,0x38,0xf1,0x44,0x24,28
+.byte 102,15,56,220,233
+ leal 2(%r8),%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,241
+.byte 0x0f,0x38,0xf1,0x44,0x24,44
+ leal 3(%r8),%eax
+.byte 102,15,56,220,249
+ movups -32(%rcx,%r10,1),%xmm1
+ xorl %r11d,%eax
+
+.byte 102,15,56,220,208
+.byte 0x0f,0x38,0xf1,0x44,0x24,60
+ leal 4(%r8),%eax
+.byte 102,15,56,220,216
+ xorl %r11d,%eax
+.byte 0x0f,0x38,0xf1,0x44,0x24,76
+.byte 102,15,56,220,224
+ leal 5(%r8),%eax
+ xorl %r11d,%eax
+.byte 102,15,56,220,232
+.byte 0x0f,0x38,0xf1,0x44,0x24,92
+ movq %r10,%rax
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+ movups -16(%rcx,%r10,1),%xmm0
+
+ call L$enc_loop6
+
+ movdqu (%rdi),%xmm8
+ movdqu 16(%rdi),%xmm9
+ movdqu 32(%rdi),%xmm10
+ movdqu 48(%rdi),%xmm11
+ movdqu 64(%rdi),%xmm12
+ movdqu 80(%rdi),%xmm13
+ leaq 96(%rdi),%rdi
+ movups -64(%rcx,%r10,1),%xmm1
+ pxor %xmm2,%xmm8
+ movaps 0(%rsp),%xmm2
+ pxor %xmm3,%xmm9
+ movaps 16(%rsp),%xmm3
+ pxor %xmm4,%xmm10
+ movaps 32(%rsp),%xmm4
+ pxor %xmm5,%xmm11
+ movaps 48(%rsp),%xmm5
+ pxor %xmm6,%xmm12
+ movaps 64(%rsp),%xmm6
+ pxor %xmm7,%xmm13
+ movaps 80(%rsp),%xmm7
+ movdqu %xmm8,(%rsi)
+ movdqu %xmm9,16(%rsi)
+ movdqu %xmm10,32(%rsi)
+ movdqu %xmm11,48(%rsi)
+ movdqu %xmm12,64(%rsi)
+ movdqu %xmm13,80(%rsi)
+ leaq 96(%rsi),%rsi
+
+ subq $6,%rdx
+ jnc L$ctr32_loop6
+
+ addq $6,%rdx
+ jz L$ctr32_done
+
+ leal -48(%r10),%eax
+ leaq -80(%rcx,%r10,1),%rcx
+ negl %eax
+ shrl $4,%eax
+ jmp L$ctr32_tail
+
.p2align 5
L$ctr32_loop8:
addl $8,%r8d
@@ -1007,6 +1242,7 @@ L$ctr32_loop8:
movups 32-128(%rcx),%xmm0
.byte 102,15,56,220,225
xorl %r11d,%r9d
+ nop
.byte 102,15,56,220,233
movl %r9d,0+12(%rsp)
leaq 1(%r8),%r9
@@ -1015,11 +1251,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 48-128(%rcx),%xmm1
+ bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- bswapl %r9d
-.byte 102,15,56,220,224
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,16+12(%rsp)
leaq 2(%r8),%r9
@@ -1028,11 +1265,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 64-128(%rcx),%xmm0
+ bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- bswapl %r9d
-.byte 102,15,56,220,225
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,32+12(%rsp)
leaq 3(%r8),%r9
@@ -1041,11 +1279,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 80-128(%rcx),%xmm1
+ bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- bswapl %r9d
-.byte 102,15,56,220,224
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,48+12(%rsp)
leaq 4(%r8),%r9
@@ -1054,11 +1293,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 96-128(%rcx),%xmm0
+ bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- bswapl %r9d
-.byte 102,15,56,220,225
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,64+12(%rsp)
leaq 5(%r8),%r9
@@ -1067,11 +1307,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 112-128(%rcx),%xmm1
+ bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- bswapl %r9d
-.byte 102,15,56,220,224
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,224
.byte 102,15,56,220,232
movl %r9d,80+12(%rsp)
leaq 6(%r8),%r9
@@ -1080,11 +1321,12 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 128-128(%rcx),%xmm0
+ bswapl %r9d
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- bswapl %r9d
-.byte 102,15,56,220,225
xorl %r11d,%r9d
+.byte 0x66,0x90
+.byte 102,15,56,220,225
.byte 102,15,56,220,233
movl %r9d,96+12(%rsp)
leaq 7(%r8),%r9
@@ -1093,21 +1335,21 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movups 144-128(%rcx),%xmm1
+ bswapl %r9d
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- bswapl %r9d
.byte 102,15,56,220,224
xorl %r11d,%r9d
+ movdqu 0(%rdi),%xmm10
.byte 102,15,56,220,232
movl %r9d,112+12(%rsp)
+ cmpl $11,%eax
.byte 102,15,56,220,240
.byte 102,15,56,220,248
.byte 102,68,15,56,220,192
- movdqu 0(%rdi),%xmm10
.byte 102,68,15,56,220,200
movups 160-128(%rcx),%xmm0
- cmpl $11,%eax
jb L$ctr32_enc_done
.byte 102,15,56,220,209
@@ -1150,7 +1392,9 @@ L$ctr32_loop8:
.byte 102,68,15,56,220,192
.byte 102,68,15,56,220,200
movups 224-128(%rcx),%xmm0
+ jmp L$ctr32_enc_done
+.p2align 4
L$ctr32_enc_done:
movdqu 16(%rdi),%xmm11
pxor %xmm0,%xmm10
@@ -1162,8 +1406,8 @@ L$ctr32_enc_done:
pxor %xmm0,%xmm13
movdqu 80(%rdi),%xmm15
pxor %xmm0,%xmm14
-.byte 102,15,56,220,209
pxor %xmm0,%xmm15
+.byte 102,15,56,220,209
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
@@ -1172,26 +1416,26 @@ L$ctr32_enc_done:
.byte 102,68,15,56,220,193
.byte 102,68,15,56,220,201
movdqu 96(%rdi),%xmm1
+ leaq 128(%rdi),%rdi
.byte 102,65,15,56,221,210
pxor %xmm0,%xmm1
- movdqu 112(%rdi),%xmm10
- leaq 128(%rdi),%rdi
+ movdqu 112-128(%rdi),%xmm10
.byte 102,65,15,56,221,219
pxor %xmm0,%xmm10
movdqa 0(%rsp),%xmm11
.byte 102,65,15,56,221,228
- movdqa 16(%rsp),%xmm12
.byte 102,65,15,56,221,237
+ movdqa 16(%rsp),%xmm12
movdqa 32(%rsp),%xmm13
.byte 102,65,15,56,221,246
- movdqa 48(%rsp),%xmm14
.byte 102,65,15,56,221,255
+ movdqa 48(%rsp),%xmm14
movdqa 64(%rsp),%xmm15
.byte 102,68,15,56,221,193
movdqa 80(%rsp),%xmm0
-.byte 102,69,15,56,221,202
movups 16-128(%rcx),%xmm1
+.byte 102,69,15,56,221,202
movups %xmm2,(%rsi)
movdqa %xmm11,%xmm2
@@ -1217,29 +1461,32 @@ L$ctr32_enc_done:
leaq -128(%rcx),%rcx
L$ctr32_tail:
+
+
leaq 16(%rcx),%rcx
cmpq $4,%rdx
jb L$ctr32_loop3
je L$ctr32_loop4
+
+ shll $4,%eax
movdqa 96(%rsp),%xmm8
pxor %xmm9,%xmm9
movups 16(%rcx),%xmm0
.byte 102,15,56,220,209
- leaq 16(%rcx),%rcx
.byte 102,15,56,220,217
- shrl $1,%eax
+ leaq 32-16(%rcx,%rax,1),%rcx
+ negq %rax
.byte 102,15,56,220,225
- decl %eax
-.byte 102,15,56,220,233
+ addq $16,%rax
movups (%rdi),%xmm10
+.byte 102,15,56,220,233
.byte 102,15,56,220,241
movups 16(%rdi),%xmm11
-.byte 102,15,56,220,249
movups 32(%rdi),%xmm12
+.byte 102,15,56,220,249
.byte 102,68,15,56,220,193
- movups 16(%rcx),%xmm1
call L$enc_loop8_enter
@@ -1272,19 +1519,19 @@ L$ctr32_tail:
L$ctr32_loop4:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
+ decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
.byte 102,15,56,220,233
movups (%rcx),%xmm1
- decl %eax
jnz L$ctr32_loop4
.byte 102,15,56,221,209
- movups (%rdi),%xmm10
.byte 102,15,56,221,217
+ movups (%rdi),%xmm10
movups 16(%rdi),%xmm11
.byte 102,15,56,221,225
- movups 32(%rdi),%xmm12
.byte 102,15,56,221,233
+ movups 32(%rdi),%xmm12
movups 48(%rdi),%xmm13
xorps %xmm10,%xmm2
@@ -1301,10 +1548,10 @@ L$ctr32_loop4:
L$ctr32_loop3:
.byte 102,15,56,220,209
leaq 16(%rcx),%rcx
+ decl %eax
.byte 102,15,56,220,217
.byte 102,15,56,220,225
movups (%rcx),%xmm1
- decl %eax
jnz L$ctr32_loop3
.byte 102,15,56,221,209
.byte 102,15,56,221,217
@@ -1324,30 +1571,33 @@ L$ctr32_loop3:
movups 32(%rdi),%xmm12
xorps %xmm12,%xmm4
movups %xmm4,32(%rsi)
- jmp L$ctr32_done
-.p2align 4
-L$ctr32_one_shortcut:
- movups (%r8),%xmm2
- movups (%rdi),%xmm10
- movl 240(%rcx),%eax
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-L$oop_enc1_7:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz L$oop_enc1_7
-.byte 102,15,56,221,209
- xorps %xmm10,%xmm2
- movups %xmm2,(%rsi)
- jmp L$ctr32_done
-
-.p2align 4
L$ctr32_done:
+ xorps %xmm0,%xmm0
+ xorl %r11d,%r11d
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ movaps %xmm0,112(%rsp)
+ pxor %xmm15,%xmm15
leaq (%rbp),%rsp
popq %rbp
L$ctr32_epilogue:
@@ -1359,245 +1609,285 @@ L$ctr32_epilogue:
_aesni_xts_encrypt:
leaq (%rsp),%rax
pushq %rbp
- subq $96,%rsp
+ subq $112,%rsp
andq $-16,%rsp
leaq -8(%rax),%rbp
- movups (%r9),%xmm15
+ movups (%r9),%xmm2
movl 240(%r8),%eax
movl 240(%rcx),%r10d
movups (%r8),%xmm0
movups 16(%r8),%xmm1
leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
+ xorps %xmm0,%xmm2
L$oop_enc1_8:
-.byte 102,68,15,56,220,249
+.byte 102,15,56,220,209
decl %eax
movups (%r8),%xmm1
leaq 16(%r8),%r8
- jnz L$oop_enc1_8
-.byte 102,68,15,56,221,249
+ jnz L$oop_enc1_8
+.byte 102,15,56,221,209
+ movups (%rcx),%xmm0
movq %rcx,%r11
movl %r10d,%eax
+ shll $4,%r10d
movq %rdx,%r9
andq $-16,%rdx
+ movups 16(%rcx,%r10,1),%xmm1
+
movdqa L$xts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ movdqa %xmm2,%xmm15
+ pshufd $0x5f,%xmm2,%xmm9
+ pxor %xmm0,%xmm1
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm10
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm10
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm11
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm11
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm12
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm12
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm13
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm13
+ pxor %xmm14,%xmm15
+ movdqa %xmm15,%xmm14
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
+ pxor %xmm0,%xmm14
pxor %xmm9,%xmm15
+ movaps %xmm1,96(%rsp)
+
subq $96,%rdx
jc L$xts_enc_short
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
+ movl $16+96,%eax
+ leaq 32(%r11,%r10,1),%rcx
+ subq %r10,%rax
+ movups 16(%r11),%xmm1
+ movq %rax,%r10
+ leaq L$xts_magic(%rip),%r8
jmp L$xts_enc_grandloop
-.p2align 4
+.p2align 5
L$xts_enc_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
+ movdqa %xmm0,%xmm8
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
+ movdqu 32(%rdi),%xmm4
pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
+.byte 102,15,56,220,209
+ movdqu 48(%rdi),%xmm5
pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
+.byte 102,15,56,220,217
+ movdqu 64(%rdi),%xmm6
pxor %xmm13,%xmm5
- movups (%r11),%xmm0
+.byte 102,15,56,220,225
+ movdqu 80(%rdi),%xmm7
+ pxor %xmm15,%xmm8
+ movdqa 96(%rsp),%xmm9
pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
+.byte 102,15,56,220,233
+ movups 32(%r11),%xmm0
+ leaq 96(%rdi),%rdi
+ pxor %xmm8,%xmm7
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
+ pxor %xmm9,%xmm10
+.byte 102,15,56,220,241
+ pxor %xmm9,%xmm11
movdqa %xmm10,0(%rsp)
-.byte 102,15,56,220,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
+.byte 102,15,56,220,249
+ movups 48(%r11),%xmm1
+ pxor %xmm9,%xmm12
+
+.byte 102,15,56,220,208
+ pxor %xmm9,%xmm13
movdqa %xmm11,16(%rsp)
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
+.byte 102,15,56,220,216
+ pxor %xmm9,%xmm14
movdqa %xmm12,32(%rsp)
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+ pxor %xmm9,%xmm8
movdqa %xmm14,64(%rsp)
-.byte 102,15,56,220,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,220,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp L$xts_enc_loop6_enter
-
-.p2align 4
+.byte 102,15,56,220,240
+.byte 102,15,56,220,248
+ movups 64(%r11),%xmm0
+ movdqa %xmm8,80(%rsp)
+ pshufd $0x5f,%xmm15,%xmm9
+ jmp L$xts_enc_loop6
+.p2align 5
L$xts_enc_loop6:
.byte 102,15,56,220,209
.byte 102,15,56,220,217
- decl %eax
.byte 102,15,56,220,225
.byte 102,15,56,220,233
.byte 102,15,56,220,241
.byte 102,15,56,220,249
-L$xts_enc_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups -64(%rcx,%rax,1),%xmm1
+ addq $32,%rax
+
.byte 102,15,56,220,208
.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,220,224
.byte 102,15,56,220,232
.byte 102,15,56,220,240
.byte 102,15,56,220,248
- movups (%rcx),%xmm0
+ movups -80(%rcx,%rax,1),%xmm0
jnz L$xts_enc_loop6
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
+ movdqa (%r8),%xmm8
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
.byte 102,15,56,220,209
- pand %xmm8,%xmm9
+ paddq %xmm15,%xmm15
+ psrad $31,%xmm14
.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
+ pand %xmm8,%xmm14
+ movups (%r11),%xmm10
.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
.byte 102,15,56,220,233
.byte 102,15,56,220,241
+ pxor %xmm14,%xmm15
+ movaps %xmm10,%xmm11
.byte 102,15,56,220,249
- movups 16(%rcx),%xmm1
+ movups -64(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,208
- pand %xmm8,%xmm9
+ paddd %xmm9,%xmm9
+ pxor %xmm15,%xmm10
.byte 102,15,56,220,216
- pcmpgtd %xmm15,%xmm14
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
.byte 102,15,56,220,224
- pxor %xmm9,%xmm15
.byte 102,15,56,220,232
+ pand %xmm8,%xmm14
+ movaps %xmm11,%xmm12
.byte 102,15,56,220,240
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,248
- movups 32(%rcx),%xmm0
+ movups -48(%rcx),%xmm0
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
+ paddd %xmm9,%xmm9
.byte 102,15,56,220,209
- pand %xmm8,%xmm9
+ pxor %xmm15,%xmm11
+ psrad $31,%xmm14
.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
.byte 102,15,56,220,233
+ movdqa %xmm13,48(%rsp)
+ pxor %xmm14,%xmm15
.byte 102,15,56,220,241
+ movaps %xmm12,%xmm13
+ movdqa %xmm9,%xmm14
.byte 102,15,56,220,249
+ movups -32(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
+ paddd %xmm9,%xmm9
+.byte 102,15,56,220,208
+ pxor %xmm15,%xmm12
+ psrad $31,%xmm14
+.byte 102,15,56,220,216
paddq %xmm15,%xmm15
-.byte 102,15,56,221,208
- pand %xmm8,%xmm9
-.byte 102,15,56,221,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,221,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
+ pand %xmm8,%xmm14
+.byte 102,15,56,220,224
+.byte 102,15,56,220,232
+.byte 102,15,56,220,240
+ pxor %xmm14,%xmm15
+ movaps %xmm13,%xmm14
+.byte 102,15,56,220,248
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
+ movdqa %xmm9,%xmm0
+ paddd %xmm9,%xmm9
+.byte 102,15,56,220,209
+ pxor %xmm15,%xmm13
+ psrad $31,%xmm0
+.byte 102,15,56,220,217
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm0
+.byte 102,15,56,220,225
+.byte 102,15,56,220,233
+ pxor %xmm0,%xmm15
+ movups (%r11),%xmm0
+.byte 102,15,56,220,241
+.byte 102,15,56,220,249
+ movups 16(%r11),%xmm1
+
+ pxor %xmm15,%xmm14
+.byte 102,15,56,221,84,36,0
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
+.byte 102,15,56,221,92,36,16
+.byte 102,15,56,221,100,36,32
pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
+ movq %r10,%rax
+.byte 102,15,56,221,108,36,48
+.byte 102,15,56,221,116,36,64
+.byte 102,15,56,221,124,36,80
pxor %xmm9,%xmm15
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
leaq 96(%rsi),%rsi
+ movups %xmm2,-96(%rsi)
+ movups %xmm3,-80(%rsi)
+ movups %xmm4,-64(%rsi)
+ movups %xmm5,-48(%rsi)
+ movups %xmm6,-32(%rsi)
+ movups %xmm7,-16(%rsi)
subq $96,%rdx
jnc L$xts_enc_grandloop
- leal 3(%rax,%rax,1),%eax
+ movl $16+96,%eax
+ subl %r10d,%eax
movq %r11,%rcx
- movl %eax,%r10d
+ shrl $4,%eax
L$xts_enc_short:
+
+ movl %eax,%r10d
+ pxor %xmm0,%xmm10
addq $96,%rdx
jz L$xts_enc_done
- cmpq $32,%rdx
+ pxor %xmm0,%xmm11
+ cmpq $0x20,%rdx
jb L$xts_enc_one
+ pxor %xmm0,%xmm12
je L$xts_enc_two
- cmpq $64,%rdx
+ pxor %xmm0,%xmm13
+ cmpq $0x40,%rdx
jb L$xts_enc_three
+ pxor %xmm0,%xmm14
je L$xts_enc_four
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
movdqu 48(%rdi),%xmm5
@@ -1607,6 +1897,7 @@ L$xts_enc_short:
pxor %xmm12,%xmm4
pxor %xmm13,%xmm5
pxor %xmm14,%xmm6
+ pxor %xmm7,%xmm7
call _aesni_encrypt6
@@ -1638,7 +1929,7 @@ L$oop_enc1_9:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_enc1_9
+ jnz L$oop_enc1_9
.byte 102,15,56,221,209
xorps %xmm10,%xmm2
movdqa %xmm11,%xmm10
@@ -1654,7 +1945,7 @@ L$xts_enc_two:
xorps %xmm10,%xmm2
xorps %xmm11,%xmm3
- call _aesni_encrypt3
+ call _aesni_encrypt2
xorps %xmm10,%xmm2
movdqa %xmm12,%xmm10
@@ -1700,15 +1991,15 @@ L$xts_enc_four:
call _aesni_encrypt4
- xorps %xmm10,%xmm2
- movdqa %xmm15,%xmm10
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
+ pxor %xmm10,%xmm2
+ movdqa %xmm14,%xmm10
+ pxor %xmm11,%xmm3
+ pxor %xmm12,%xmm4
+ movdqu %xmm2,(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm3,16(%rsi)
+ movdqu %xmm4,32(%rsi)
+ movdqu %xmm5,48(%rsi)
leaq 64(%rsi),%rsi
jmp L$xts_enc_done
@@ -1743,12 +2034,35 @@ L$oop_enc1_10:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_enc1_10
+ jnz L$oop_enc1_10
.byte 102,15,56,221,209
xorps %xmm10,%xmm2
movups %xmm2,-16(%rsi)
L$xts_enc_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ pxor %xmm15,%xmm15
leaq (%rbp),%rsp
popq %rbp
L$xts_enc_epilogue:
@@ -1760,251 +2074,291 @@ L$xts_enc_epilogue:
_aesni_xts_decrypt:
leaq (%rsp),%rax
pushq %rbp
- subq $96,%rsp
+ subq $112,%rsp
andq $-16,%rsp
leaq -8(%rax),%rbp
- movups (%r9),%xmm15
+ movups (%r9),%xmm2
movl 240(%r8),%eax
movl 240(%rcx),%r10d
movups (%r8),%xmm0
movups 16(%r8),%xmm1
leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
+ xorps %xmm0,%xmm2
L$oop_enc1_11:
-.byte 102,68,15,56,220,249
+.byte 102,15,56,220,209
decl %eax
movups (%r8),%xmm1
leaq 16(%r8),%r8
- jnz L$oop_enc1_11
-.byte 102,68,15,56,221,249
+ jnz L$oop_enc1_11
+.byte 102,15,56,221,209
xorl %eax,%eax
testq $15,%rdx
setnz %al
shlq $4,%rax
subq %rax,%rdx
+ movups (%rcx),%xmm0
movq %rcx,%r11
movl %r10d,%eax
+ shll $4,%r10d
movq %rdx,%r9
andq $-16,%rdx
+ movups 16(%rcx,%r10,1),%xmm1
+
movdqa L$xts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ movdqa %xmm2,%xmm15
+ pshufd $0x5f,%xmm2,%xmm9
+ pxor %xmm0,%xmm1
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm10
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm10
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm11
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm11
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm12
+ psrad $31,%xmm14
paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm12
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
movdqa %xmm15,%xmm13
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
+ pxor %xmm0,%xmm13
+ pxor %xmm14,%xmm15
+ movdqa %xmm15,%xmm14
+ psrad $31,%xmm9
paddq %xmm15,%xmm15
pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
+ pxor %xmm0,%xmm14
pxor %xmm9,%xmm15
+ movaps %xmm1,96(%rsp)
+
subq $96,%rdx
jc L$xts_dec_short
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
+ movl $16+96,%eax
+ leaq 32(%r11,%r10,1),%rcx
+ subq %r10,%rax
+ movups 16(%r11),%xmm1
+ movq %rax,%r10
+ leaq L$xts_magic(%rip),%r8
jmp L$xts_dec_grandloop
-.p2align 4
+.p2align 5
L$xts_dec_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
+ movdqa %xmm0,%xmm8
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
+ movdqu 32(%rdi),%xmm4
pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
+.byte 102,15,56,222,209
+ movdqu 48(%rdi),%xmm5
pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
+.byte 102,15,56,222,217
+ movdqu 64(%rdi),%xmm6
pxor %xmm13,%xmm5
- movups (%r11),%xmm0
+.byte 102,15,56,222,225
+ movdqu 80(%rdi),%xmm7
+ pxor %xmm15,%xmm8
+ movdqa 96(%rsp),%xmm9
pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
+.byte 102,15,56,222,233
+ movups 32(%r11),%xmm0
+ leaq 96(%rdi),%rdi
+ pxor %xmm8,%xmm7
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
+ pxor %xmm9,%xmm10
+.byte 102,15,56,222,241
+ pxor %xmm9,%xmm11
movdqa %xmm10,0(%rsp)
-.byte 102,15,56,222,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
+.byte 102,15,56,222,249
+ movups 48(%r11),%xmm1
+ pxor %xmm9,%xmm12
+
+.byte 102,15,56,222,208
+ pxor %xmm9,%xmm13
movdqa %xmm11,16(%rsp)
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
+.byte 102,15,56,222,216
+ pxor %xmm9,%xmm14
movdqa %xmm12,32(%rsp)
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+ pxor %xmm9,%xmm8
movdqa %xmm14,64(%rsp)
-.byte 102,15,56,222,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,222,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp L$xts_dec_loop6_enter
-
-.p2align 4
+.byte 102,15,56,222,240
+.byte 102,15,56,222,248
+ movups 64(%r11),%xmm0
+ movdqa %xmm8,80(%rsp)
+ pshufd $0x5f,%xmm15,%xmm9
+ jmp L$xts_dec_loop6
+.p2align 5
L$xts_dec_loop6:
.byte 102,15,56,222,209
.byte 102,15,56,222,217
- decl %eax
.byte 102,15,56,222,225
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
-L$xts_dec_loop6_enter:
- movups 16(%rcx),%xmm1
+ movups -64(%rcx,%rax,1),%xmm1
+ addq $32,%rax
+
.byte 102,15,56,222,208
.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
.byte 102,15,56,222,224
.byte 102,15,56,222,232
.byte 102,15,56,222,240
.byte 102,15,56,222,248
- movups (%rcx),%xmm0
+ movups -80(%rcx,%rax,1),%xmm0
jnz L$xts_dec_loop6
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
+ movdqa (%r8),%xmm8
+ movdqa %xmm9,%xmm14
+ paddd %xmm9,%xmm9
.byte 102,15,56,222,209
- pand %xmm8,%xmm9
+ paddq %xmm15,%xmm15
+ psrad $31,%xmm14
.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
+ pand %xmm8,%xmm14
+ movups (%r11),%xmm10
.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
.byte 102,15,56,222,233
.byte 102,15,56,222,241
+ pxor %xmm14,%xmm15
+ movaps %xmm10,%xmm11
.byte 102,15,56,222,249
- movups 16(%rcx),%xmm1
+ movups -64(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,208
- pand %xmm8,%xmm9
+ paddd %xmm9,%xmm9
+ pxor %xmm15,%xmm10
.byte 102,15,56,222,216
- pcmpgtd %xmm15,%xmm14
+ psrad $31,%xmm14
+ paddq %xmm15,%xmm15
.byte 102,15,56,222,224
- pxor %xmm9,%xmm15
.byte 102,15,56,222,232
+ pand %xmm8,%xmm14
+ movaps %xmm11,%xmm12
.byte 102,15,56,222,240
+ pxor %xmm14,%xmm15
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,248
- movups 32(%rcx),%xmm0
+ movups -48(%rcx),%xmm0
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
+ paddd %xmm9,%xmm9
.byte 102,15,56,222,209
- pand %xmm8,%xmm9
+ pxor %xmm15,%xmm11
+ psrad $31,%xmm14
.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
+ paddq %xmm15,%xmm15
+ pand %xmm8,%xmm14
.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
.byte 102,15,56,222,233
+ movdqa %xmm13,48(%rsp)
+ pxor %xmm14,%xmm15
.byte 102,15,56,222,241
+ movaps %xmm12,%xmm13
+ movdqa %xmm9,%xmm14
.byte 102,15,56,222,249
+ movups -32(%rcx),%xmm1
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
+ paddd %xmm9,%xmm9
+.byte 102,15,56,222,208
+ pxor %xmm15,%xmm12
+ psrad $31,%xmm14
+.byte 102,15,56,222,216
paddq %xmm15,%xmm15
-.byte 102,15,56,223,208
- pand %xmm8,%xmm9
-.byte 102,15,56,223,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,223,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
+ pand %xmm8,%xmm14
+.byte 102,15,56,222,224
+.byte 102,15,56,222,232
+.byte 102,15,56,222,240
+ pxor %xmm14,%xmm15
+ movaps %xmm13,%xmm14
+.byte 102,15,56,222,248
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
+ movdqa %xmm9,%xmm0
+ paddd %xmm9,%xmm9
+.byte 102,15,56,222,209
+ pxor %xmm15,%xmm13
+ psrad $31,%xmm0
+.byte 102,15,56,222,217
paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
+ pand %xmm8,%xmm0
+.byte 102,15,56,222,225
+.byte 102,15,56,222,233
+ pxor %xmm0,%xmm15
+ movups (%r11),%xmm0
+.byte 102,15,56,222,241
+.byte 102,15,56,222,249
+ movups 16(%r11),%xmm1
+
+ pxor %xmm15,%xmm14
+.byte 102,15,56,223,84,36,0
+ psrad $31,%xmm9
+ paddq %xmm15,%xmm15
+.byte 102,15,56,223,92,36,16
+.byte 102,15,56,223,100,36,32
pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
+ movq %r10,%rax
+.byte 102,15,56,223,108,36,48
+.byte 102,15,56,223,116,36,64
+.byte 102,15,56,223,124,36,80
pxor %xmm9,%xmm15
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
leaq 96(%rsi),%rsi
+ movups %xmm2,-96(%rsi)
+ movups %xmm3,-80(%rsi)
+ movups %xmm4,-64(%rsi)
+ movups %xmm5,-48(%rsi)
+ movups %xmm6,-32(%rsi)
+ movups %xmm7,-16(%rsi)
subq $96,%rdx
jnc L$xts_dec_grandloop
- leal 3(%rax,%rax,1),%eax
+ movl $16+96,%eax
+ subl %r10d,%eax
movq %r11,%rcx
- movl %eax,%r10d
+ shrl $4,%eax
L$xts_dec_short:
+
+ movl %eax,%r10d
+ pxor %xmm0,%xmm10
+ pxor %xmm0,%xmm11
addq $96,%rdx
jz L$xts_dec_done
- cmpq $32,%rdx
+ pxor %xmm0,%xmm12
+ cmpq $0x20,%rdx
jb L$xts_dec_one
+ pxor %xmm0,%xmm13
je L$xts_dec_two
- cmpq $64,%rdx
+ pxor %xmm0,%xmm14
+ cmpq $0x40,%rdx
jb L$xts_dec_three
je L$xts_dec_four
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movdqu 32(%rdi),%xmm4
pxor %xmm10,%xmm2
movdqu 48(%rdi),%xmm5
@@ -2030,7 +2384,7 @@ L$xts_dec_short:
pcmpgtd %xmm15,%xmm14
movdqu %xmm6,64(%rsi)
leaq 80(%rsi),%rsi
- pshufd $19,%xmm14,%xmm11
+ pshufd $0x13,%xmm14,%xmm11
andq $15,%r9
jz L$xts_dec_ret
@@ -2054,7 +2408,7 @@ L$oop_dec1_12:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_dec1_12
+ jnz L$oop_dec1_12
.byte 102,15,56,223,209
xorps %xmm10,%xmm2
movdqa %xmm11,%xmm10
@@ -2071,7 +2425,7 @@ L$xts_dec_two:
xorps %xmm10,%xmm2
xorps %xmm11,%xmm3
- call _aesni_decrypt3
+ call _aesni_decrypt2
xorps %xmm10,%xmm2
movdqa %xmm12,%xmm10
@@ -2097,7 +2451,7 @@ L$xts_dec_three:
xorps %xmm10,%xmm2
movdqa %xmm13,%xmm10
xorps %xmm11,%xmm3
- movdqa %xmm15,%xmm11
+ movdqa %xmm14,%xmm11
xorps %xmm12,%xmm4
movups %xmm2,(%rsi)
movups %xmm3,16(%rsi)
@@ -2107,14 +2461,8 @@ L$xts_dec_three:
.p2align 4
L$xts_dec_four:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
movups (%rdi),%xmm2
- pand %xmm8,%xmm9
movups 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
movups 32(%rdi),%xmm4
xorps %xmm10,%xmm2
movups 48(%rdi),%xmm5
@@ -2125,16 +2473,16 @@ L$xts_dec_four:
call _aesni_decrypt4
- xorps %xmm10,%xmm2
+ pxor %xmm10,%xmm2
movdqa %xmm14,%xmm10
- xorps %xmm11,%xmm3
+ pxor %xmm11,%xmm3
movdqa %xmm15,%xmm11
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm2,(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm3,16(%rsi)
+ movdqu %xmm4,32(%rsi)
+ movdqu %xmm5,48(%rsi)
leaq 64(%rsi),%rsi
jmp L$xts_dec_done
@@ -2158,7 +2506,7 @@ L$oop_dec1_13:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_dec1_13
+ jnz L$oop_dec1_13
.byte 102,15,56,223,209
xorps %xmm11,%xmm2
movups %xmm2,(%rsi)
@@ -2188,12 +2536,35 @@ L$oop_dec1_14:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_dec1_14
+ jnz L$oop_dec1_14
.byte 102,15,56,223,209
xorps %xmm10,%xmm2
movups %xmm2,(%rsi)
L$xts_dec_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ movaps %xmm0,0(%rsp)
+ pxor %xmm8,%xmm8
+ movaps %xmm0,16(%rsp)
+ pxor %xmm9,%xmm9
+ movaps %xmm0,32(%rsp)
+ pxor %xmm10,%xmm10
+ movaps %xmm0,48(%rsp)
+ pxor %xmm11,%xmm11
+ movaps %xmm0,64(%rsp)
+ pxor %xmm12,%xmm12
+ movaps %xmm0,80(%rsp)
+ pxor %xmm13,%xmm13
+ movaps %xmm0,96(%rsp)
+ pxor %xmm14,%xmm14
+ pxor %xmm15,%xmm15
leaq (%rbp),%rsp
popq %rbp
L$xts_dec_epilogue:
@@ -2232,7 +2603,7 @@ L$oop_enc1_15:
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_enc1_15
+ jnz L$oop_enc1_15
.byte 102,15,56,221,209
movl %r10d,%eax
movq %r11,%rcx
@@ -2242,26 +2613,59 @@ L$oop_enc1_15:
jnc L$cbc_enc_loop
addq $16,%rdx
jnz L$cbc_enc_tail
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
movups %xmm2,(%r8)
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
jmp L$cbc_ret
L$cbc_enc_tail:
movq %rdx,%rcx
xchgq %rdi,%rsi
-.long 0x9066A4F3
+.long 0x9066A4F3
movl $16,%ecx
subq %rdx,%rcx
xorl %eax,%eax
-.long 0x9066AAF3
+.long 0x9066AAF3
leaq -16(%rdi),%rdi
movl %r10d,%eax
movq %rdi,%rsi
movq %r11,%rcx
xorq %rdx,%rdx
- jmp L$cbc_enc_loop
+ jmp L$cbc_enc_loop
.p2align 4
L$cbc_decrypt:
+ cmpq $16,%rdx
+ jne L$cbc_decrypt_bulk
+
+
+
+ movdqu (%rdi),%xmm2
+ movdqu (%r8),%xmm3
+ movdqa %xmm2,%xmm4
+ movups (%rcx),%xmm0
+ movups 16(%rcx),%xmm1
+ leaq 32(%rcx),%rcx
+ xorps %xmm0,%xmm2
+L$oop_dec1_16:
+.byte 102,15,56,222,209
+ decl %r10d
+ movups (%rcx),%xmm1
+ leaq 16(%rcx),%rcx
+ jnz L$oop_dec1_16
+.byte 102,15,56,223,209
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ movdqu %xmm4,(%r8)
+ xorps %xmm3,%xmm2
+ pxor %xmm3,%xmm3
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ jmp L$cbc_ret
+.p2align 4
+L$cbc_decrypt_bulk:
leaq (%rsp),%rax
pushq %rbp
subq $16,%rsp
@@ -2269,7 +2673,7 @@ L$cbc_decrypt:
leaq -8(%rax),%rbp
movups (%r8),%xmm10
movl %r10d,%eax
- cmpq $80,%rdx
+ cmpq $0x50,%rdx
jbe L$cbc_dec_tail
movups (%rcx),%xmm0
@@ -2284,10 +2688,15 @@ L$cbc_decrypt:
movdqa %xmm5,%xmm14
movdqu 80(%rdi),%xmm7
movdqa %xmm6,%xmm15
- cmpq $112,%rdx
+ movl __gnutls_x86_cpuid_s+4(%rip),%r9d
+ cmpq $0x70,%rdx
jbe L$cbc_dec_six_or_seven
- subq $112,%rdx
+ andl $71303168,%r9d
+ subq $0x50,%rdx
+ cmpl $4194304,%r9d
+ je L$cbc_dec_loop6_enter
+ subq $0x20,%rdx
leaq 112(%rcx),%rcx
jmp L$cbc_dec_loop8_enter
.p2align 4
@@ -2302,7 +2711,7 @@ L$cbc_dec_loop8_enter:
movups 16-112(%rcx),%xmm1
pxor %xmm0,%xmm4
xorq %r11,%r11
- cmpq $112,%rdx
+ cmpq $0x70,%rdx
pxor %xmm0,%xmm5
pxor %xmm0,%xmm6
pxor %xmm0,%xmm7
@@ -2316,8 +2725,8 @@ L$cbc_dec_loop8_enter:
.byte 102,15,56,222,233
.byte 102,15,56,222,241
.byte 102,15,56,222,249
- setnc %r11b
.byte 102,68,15,56,222,193
+ setnc %r11b
shlq $7,%r11
.byte 102,68,15,56,222,201
addq %rdi,%r11
@@ -2331,6 +2740,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
movups 64-112(%rcx),%xmm0
+ nop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -2340,6 +2750,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
movups 80-112(%rcx),%xmm1
+ nop
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
@@ -2349,6 +2760,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
movups 96-112(%rcx),%xmm0
+ nop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -2358,6 +2770,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
movups 112-112(%rcx),%xmm1
+ nop
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
@@ -2367,6 +2780,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
movups 128-112(%rcx),%xmm0
+ nop
.byte 102,15,56,222,209
.byte 102,15,56,222,217
.byte 102,15,56,222,225
@@ -2376,6 +2790,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
movups 144-112(%rcx),%xmm1
+ cmpl $11,%eax
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
@@ -2385,7 +2800,6 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
movups 160-112(%rcx),%xmm0
- cmpl $11,%eax
jb L$cbc_dec_done
.byte 102,15,56,222,209
.byte 102,15,56,222,217
@@ -2396,6 +2810,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
movups 176-112(%rcx),%xmm1
+ nop
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
@@ -2415,6 +2830,7 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
movups 208-112(%rcx),%xmm1
+ nop
.byte 102,15,56,222,208
.byte 102,15,56,222,216
.byte 102,15,56,222,224
@@ -2424,18 +2840,20 @@ L$cbc_dec_loop8_enter:
.byte 102,68,15,56,222,192
.byte 102,68,15,56,222,200
movups 224-112(%rcx),%xmm0
+ jmp L$cbc_dec_done
+.p2align 4
L$cbc_dec_done:
.byte 102,15,56,222,209
- pxor %xmm0,%xmm10
.byte 102,15,56,222,217
+ pxor %xmm0,%xmm10
pxor %xmm0,%xmm11
.byte 102,15,56,222,225
- pxor %xmm0,%xmm12
.byte 102,15,56,222,233
+ pxor %xmm0,%xmm12
pxor %xmm0,%xmm13
.byte 102,15,56,222,241
- pxor %xmm0,%xmm14
.byte 102,15,56,222,249
+ pxor %xmm0,%xmm14
pxor %xmm0,%xmm15
.byte 102,68,15,56,222,193
.byte 102,68,15,56,222,201
@@ -2447,16 +2865,16 @@ L$cbc_dec_done:
.byte 102,65,15,56,223,219
pxor %xmm0,%xmm10
movdqu 112(%rdi),%xmm0
- leaq 128(%rdi),%rdi
.byte 102,65,15,56,223,228
+ leaq 128(%rdi),%rdi
movdqu 0(%r11),%xmm11
.byte 102,65,15,56,223,237
- movdqu 16(%r11),%xmm12
.byte 102,65,15,56,223,246
+ movdqu 16(%r11),%xmm12
movdqu 32(%r11),%xmm13
.byte 102,65,15,56,223,255
- movdqu 48(%r11),%xmm14
.byte 102,68,15,56,223,193
+ movdqu 48(%r11),%xmm14
movdqu 64(%r11),%xmm15
.byte 102,69,15,56,223,202
movdqa %xmm0,%xmm10
@@ -2478,21 +2896,21 @@ L$cbc_dec_done:
movups %xmm8,96(%rsi)
leaq 112(%rsi),%rsi
- subq $128,%rdx
+ subq $0x80,%rdx
ja L$cbc_dec_loop8
movaps %xmm9,%xmm2
leaq -112(%rcx),%rcx
- addq $112,%rdx
- jle L$cbc_dec_tail_collected
+ addq $0x70,%rdx
+ jle L$cbc_dec_clear_tail_collected
movups %xmm9,(%rsi)
leaq 16(%rsi),%rsi
- cmpq $80,%rdx
+ cmpq $0x50,%rdx
jbe L$cbc_dec_tail
movaps %xmm11,%xmm2
L$cbc_dec_six_or_seven:
- cmpq $96,%rdx
+ cmpq $0x60,%rdx
ja L$cbc_dec_seven
movaps %xmm7,%xmm8
@@ -2503,14 +2921,19 @@ L$cbc_dec_six_or_seven:
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
pxor %xmm14,%xmm6
movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
pxor %xmm15,%xmm7
movdqu %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
leaq 80(%rsi),%rsi
movdqa %xmm7,%xmm2
+ pxor %xmm7,%xmm7
jmp L$cbc_dec_tail_collected
.p2align 4
@@ -2525,36 +2948,88 @@ L$cbc_dec_seven:
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
pxor %xmm14,%xmm6
movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
pxor %xmm15,%xmm7
movdqu %xmm6,64(%rsi)
+ pxor %xmm6,%xmm6
pxor %xmm9,%xmm8
movdqu %xmm7,80(%rsi)
+ pxor %xmm7,%xmm7
leaq 96(%rsi),%rsi
movdqa %xmm8,%xmm2
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
jmp L$cbc_dec_tail_collected
+.p2align 4
+L$cbc_dec_loop6:
+ movups %xmm7,(%rsi)
+ leaq 16(%rsi),%rsi
+ movdqu 0(%rdi),%xmm2
+ movdqu 16(%rdi),%xmm3
+ movdqa %xmm2,%xmm11
+ movdqu 32(%rdi),%xmm4
+ movdqa %xmm3,%xmm12
+ movdqu 48(%rdi),%xmm5
+ movdqa %xmm4,%xmm13
+ movdqu 64(%rdi),%xmm6
+ movdqa %xmm5,%xmm14
+ movdqu 80(%rdi),%xmm7
+ movdqa %xmm6,%xmm15
+L$cbc_dec_loop6_enter:
+ leaq 96(%rdi),%rdi
+ movdqa %xmm7,%xmm8
+
+ call _aesni_decrypt6
+
+ pxor %xmm10,%xmm2
+ movdqa %xmm8,%xmm10
+ pxor %xmm11,%xmm3
+ movdqu %xmm2,(%rsi)
+ pxor %xmm12,%xmm4
+ movdqu %xmm3,16(%rsi)
+ pxor %xmm13,%xmm5
+ movdqu %xmm4,32(%rsi)
+ pxor %xmm14,%xmm6
+ movq %r11,%rcx
+ movdqu %xmm5,48(%rsi)
+ pxor %xmm15,%xmm7
+ movl %r10d,%eax
+ movdqu %xmm6,64(%rsi)
+ leaq 80(%rsi),%rsi
+ subq $0x60,%rdx
+ ja L$cbc_dec_loop6
+
+ movdqa %xmm7,%xmm2
+ addq $0x50,%rdx
+ jle L$cbc_dec_clear_tail_collected
+ movups %xmm7,(%rsi)
+ leaq 16(%rsi),%rsi
+
L$cbc_dec_tail:
movups (%rdi),%xmm2
- subq $16,%rdx
+ subq $0x10,%rdx
jbe L$cbc_dec_one
movups 16(%rdi),%xmm3
movaps %xmm2,%xmm11
- subq $16,%rdx
+ subq $0x10,%rdx
jbe L$cbc_dec_two
movups 32(%rdi),%xmm4
movaps %xmm3,%xmm12
- subq $16,%rdx
+ subq $0x10,%rdx
jbe L$cbc_dec_three
movups 48(%rdi),%xmm5
movaps %xmm4,%xmm13
- subq $16,%rdx
+ subq $0x10,%rdx
jbe L$cbc_dec_four
movups 64(%rdi),%xmm6
@@ -2568,13 +3043,18 @@ L$cbc_dec_tail:
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
pxor %xmm14,%xmm6
movdqu %xmm5,48(%rsi)
+ pxor %xmm5,%xmm5
leaq 64(%rsi),%rsi
movdqa %xmm6,%xmm2
- subq $16,%rdx
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ subq $0x10,%rdx
jmp L$cbc_dec_tail_collected
.p2align 4
@@ -2584,12 +3064,12 @@ L$cbc_dec_one:
movups 16(%rcx),%xmm1
leaq 32(%rcx),%rcx
xorps %xmm0,%xmm2
-L$oop_dec1_16:
+L$oop_dec1_17:
.byte 102,15,56,222,209
decl %eax
movups (%rcx),%xmm1
leaq 16(%rcx),%rcx
- jnz L$oop_dec1_16
+ jnz L$oop_dec1_17
.byte 102,15,56,223,209
xorps %xmm10,%xmm2
movaps %xmm11,%xmm10
@@ -2597,13 +3077,13 @@ L$oop_dec1_16:
.p2align 4
L$cbc_dec_two:
movaps %xmm3,%xmm12
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
+ call _aesni_decrypt2
pxor %xmm10,%xmm2
movaps %xmm12,%xmm10
pxor %xmm11,%xmm3
movdqu %xmm2,(%rsi)
movdqa %xmm3,%xmm2
+ pxor %xmm3,%xmm3
leaq 16(%rsi),%rsi
jmp L$cbc_dec_tail_collected
.p2align 4
@@ -2616,7 +3096,9 @@ L$cbc_dec_three:
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
movdqa %xmm4,%xmm2
+ pxor %xmm4,%xmm4
leaq 32(%rsi),%rsi
jmp L$cbc_dec_tail_collected
.p2align 4
@@ -2629,29 +3111,45 @@ L$cbc_dec_four:
movdqu %xmm2,(%rsi)
pxor %xmm12,%xmm4
movdqu %xmm3,16(%rsi)
+ pxor %xmm3,%xmm3
pxor %xmm13,%xmm5
movdqu %xmm4,32(%rsi)
+ pxor %xmm4,%xmm4
movdqa %xmm5,%xmm2
+ pxor %xmm5,%xmm5
leaq 48(%rsi),%rsi
jmp L$cbc_dec_tail_collected
.p2align 4
+L$cbc_dec_clear_tail_collected:
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+ pxor %xmm6,%xmm6
+ pxor %xmm7,%xmm7
+ pxor %xmm8,%xmm8
+ pxor %xmm9,%xmm9
L$cbc_dec_tail_collected:
movups %xmm10,(%r8)
andq $15,%rdx
jnz L$cbc_dec_tail_partial
movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
jmp L$cbc_dec_ret
.p2align 4
L$cbc_dec_tail_partial:
movaps %xmm2,(%rsp)
+ pxor %xmm2,%xmm2
movq $16,%rcx
movq %rsi,%rdi
subq %rdx,%rcx
leaq (%rsp),%rsi
-.long 0x9066A4F3
+.long 0x9066A4F3
+ movdqa %xmm2,(%rsp)
L$cbc_dec_ret:
+ xorps %xmm0,%xmm0
+ pxor %xmm1,%xmm1
leaq (%rbp),%rsp
popq %rbp
L$cbc_ret:
@@ -2661,7 +3159,7 @@ L$cbc_ret:
.p2align 4
_aesni_set_decrypt_key:
-.byte 0x48,0x83,0xEC,0x08
+.byte 0x48,0x83,0xEC,0x08
call __aesni_set_encrypt_key
shll $4,%esi
testl %eax,%eax
@@ -2689,7 +3187,9 @@ L$dec_key_inverse:
movups (%rdx),%xmm0
.byte 102,15,56,219,192
+ pxor %xmm1,%xmm1
movups %xmm0,(%rdi)
+ pxor %xmm0,%xmm0
L$dec_key_ret:
addq $8,%rsp
.byte 0xf3,0xc3
@@ -2700,15 +3200,17 @@ L$SEH_end_set_decrypt_key:
.p2align 4
_aesni_set_encrypt_key:
__aesni_set_encrypt_key:
-.byte 0x48,0x83,0xEC,0x08
+.byte 0x48,0x83,0xEC,0x08
movq $-1,%rax
testq %rdi,%rdi
jz L$enc_key_ret
testq %rdx,%rdx
jz L$enc_key_ret
+ movl $268437504,%r10d
movups (%rdi),%xmm0
xorps %xmm4,%xmm4
+ andl __gnutls_x86_cpuid_s+4(%rip),%r10d
leaq 16(%rdx),%rax
cmpl $256,%esi
je L$14rounds
@@ -2719,6 +3221,9 @@ __aesni_set_encrypt_key:
L$10rounds:
movl $9,%esi
+ cmpl $268435456,%r10d
+ je L$10rounds_alt
+
movups %xmm0,(%rdx)
.byte 102,15,58,223,200,1
call L$key_expansion_128_cold
@@ -2746,9 +3251,79 @@ L$10rounds:
jmp L$enc_key_ret
.p2align 4
+L$10rounds_alt:
+ movdqa L$key_rotate(%rip),%xmm5
+ movl $8,%r10d
+ movdqa L$key_rcon1(%rip),%xmm4
+ movdqa %xmm0,%xmm2
+ movdqu %xmm0,(%rdx)
+ jmp L$oop_key128
+
+.p2align 4
+L$oop_key128:
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+ leaq 16(%rax),%rax
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,-16(%rax)
+ movdqa %xmm0,%xmm2
+
+ decl %r10d
+ jnz L$oop_key128
+
+ movdqa L$key_rcon1b(%rip),%xmm4
+
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+ pslld $1,%xmm4
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%rax)
+
+ movdqa %xmm0,%xmm2
+.byte 102,15,56,0,197
+.byte 102,15,56,221,196
+
+ movdqa %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm2,%xmm3
+ pslldq $4,%xmm2
+ pxor %xmm3,%xmm2
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,16(%rax)
+
+ movl %esi,96(%rax)
+ xorl %eax,%eax
+ jmp L$enc_key_ret
+
+.p2align 4
L$12rounds:
movq 16(%rdi),%xmm2
movl $11,%esi
+ cmpl $268435456,%r10d
+ je L$12rounds_alt
+
movups %xmm0,(%rdx)
.byte 102,15,58,223,202,1
call L$key_expansion_192a_cold
@@ -2772,10 +3347,54 @@ L$12rounds:
jmp L$enc_key_ret
.p2align 4
+L$12rounds_alt:
+ movdqa L$key_rotate192(%rip),%xmm5
+ movdqa L$key_rcon1(%rip),%xmm4
+ movl $8,%r10d
+ movdqu %xmm0,(%rdx)
+ jmp L$oop_key192
+
+.p2align 4
+L$oop_key192:
+ movq %xmm2,0(%rax)
+ movdqa %xmm2,%xmm1
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+ pslld $1,%xmm4
+ leaq 24(%rax),%rax
+
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+
+ pshufd $0xff,%xmm0,%xmm3
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+
+ pxor %xmm2,%xmm0
+ pxor %xmm3,%xmm2
+ movdqu %xmm0,-16(%rax)
+
+ decl %r10d
+ jnz L$oop_key192
+
+ movl %esi,32(%rax)
+ xorl %eax,%eax
+ jmp L$enc_key_ret
+
+.p2align 4
L$14rounds:
movups 16(%rdi),%xmm2
movl $13,%esi
leaq 16(%rax),%rax
+ cmpl $268435456,%r10d
+ je L$14rounds_alt
+
movups %xmm0,(%rdx)
movups %xmm2,16(%rdx)
.byte 102,15,58,223,202,1
@@ -2810,9 +3429,69 @@ L$14rounds:
jmp L$enc_key_ret
.p2align 4
+L$14rounds_alt:
+ movdqa L$key_rotate(%rip),%xmm5
+ movdqa L$key_rcon1(%rip),%xmm4
+ movl $7,%r10d
+ movdqu %xmm0,0(%rdx)
+ movdqa %xmm2,%xmm1
+ movdqu %xmm2,16(%rdx)
+ jmp L$oop_key256
+
+.p2align 4
+L$oop_key256:
+.byte 102,15,56,0,213
+.byte 102,15,56,221,212
+
+ movdqa %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm0,%xmm3
+ pslldq $4,%xmm0
+ pxor %xmm3,%xmm0
+ pslld $1,%xmm4
+
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,(%rax)
+
+ decl %r10d
+ jz L$done_key256
+
+ pshufd $0xff,%xmm0,%xmm2
+ pxor %xmm3,%xmm3
+.byte 102,15,56,221,211
+
+ movdqa %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm1,%xmm3
+ pslldq $4,%xmm1
+ pxor %xmm3,%xmm1
+
+ pxor %xmm1,%xmm2
+ movdqu %xmm2,16(%rax)
+ leaq 32(%rax),%rax
+ movdqa %xmm2,%xmm1
+
+ jmp L$oop_key256
+
+L$done_key256:
+ movl %esi,16(%rax)
+ xorl %eax,%eax
+ jmp L$enc_key_ret
+
+.p2align 4
L$bad_keybits:
movq $-2,%rax
L$enc_key_ret:
+ pxor %xmm0,%xmm0
+ pxor %xmm1,%xmm1
+ pxor %xmm2,%xmm2
+ pxor %xmm3,%xmm3
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
addq $8,%rsp
.byte 0xf3,0xc3
L$SEH_end_set_encrypt_key:
@@ -2898,6 +3577,14 @@ L$xts_magic:
.long 0x87,0,1,0
L$increment1:
.byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
+L$key_rotate:
+.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
+L$key_rotate192:
+.long 0x04070605,0x04070605,0x04070605,0x04070605
+L$key_rcon1:
+.long 1,1,1,1
+L$key_rcon1b:
+.long 0x1b,0x1b,0x1b,0x1b
.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6