summaryrefslogtreecommitdiff
path: root/arm
diff options
context:
space:
mode:
authorMichael Weiser <michael.weiser@gmx.de>2020-03-05 20:13:07 +0100
committerNiels Möller <nisse@lysator.liu.se>2020-03-12 21:55:25 +0100
commit46fcc7f0b6abffd2632d8809ef8dc8f70e4a1ff9 (patch)
treef3ae2c9bdc1075c8ae9485cd0e6afe2aef0e84f9 /arm
parentdedba6ff09f78b96dbc5a2b3a13fb8825f438d3c (diff)
downloadnettle-46fcc7f0b6abffd2632d8809ef8dc8f70e4a1ff9.tar.gz
arm: Fix memxor for non-armv6+ big-endian systems
ARM assembly adjustments for big-endian systems contained armv6+-only instructions (rev) in generic arm memxor code. Replace those with an actual conversion of the leftover byte store routines for big-endian systems. This also provides a slight optimisation by removing the additional instruction as well as increased symmetry between little- and big-endian implementations. Signed-off-by: Michael Weiser <michael.weiser@gmx.de>
Diffstat (limited to 'arm')
-rw-r--r--arm/memxor.asm13
-rw-r--r--arm/memxor3.asm31
2 files changed, 25 insertions, 19 deletions
diff --git a/arm/memxor.asm b/arm/memxor.asm
index 239a4034..e4619629 100644
--- a/arm/memxor.asm
+++ b/arm/memxor.asm
@@ -138,24 +138,25 @@ PROLOGUE(nettle_memxor)
adds N, #8
beq .Lmemxor_odd_done
- C We have TNC/8 left-over bytes in r4, high end
+ C We have TNC/8 left-over bytes in r4, high end on LE and low end on
+ C BE, excess bits to be discarded by alignment adjustment at the other
S0ADJ r4, CNT
+ C now byte-aligned at low end on LE and high end on BE
ldr r3, [DST]
eor r3, r4
- C memxor_leftover does an LSB store
- C so we need to reverse if actually BE
-IF_BE(< rev r3, r3>)
-
pop {r4,r5,r6}
C Store bytes, one by one.
.Lmemxor_leftover:
+ C bring uppermost byte down for saving while preserving lower ones
+IF_BE(< ror r3, #24>)
strb r3, [DST], #+1
subs N, #1
beq .Lmemxor_done
subs TNC, #8
- lsr r3, #8
+ C bring down next byte, no need to preserve
+IF_LE(< lsr r3, #8>)
bne .Lmemxor_leftover
b .Lmemxor_bytes
.Lmemxor_odd_done:
diff --git a/arm/memxor3.asm b/arm/memxor3.asm
index 69598e1c..b6c6da49 100644
--- a/arm/memxor3.asm
+++ b/arm/memxor3.asm
@@ -159,21 +159,23 @@ PROLOGUE(nettle_memxor3)
adds N, #8
beq .Lmemxor3_done
- C Leftover bytes in r4, low end
+ C Leftover bytes in r4, low end on LE and high end on BE before
+ C preparatory alignment correction
ldr r5, [AP, #-4]
eor r4, r5, r4, S1ADJ ATNC
-
- C leftover does an LSB store
- C so we need to reverse if actually BE
-IF_BE(< rev r4, r4>)
+ C now byte-aligned in high end on LE and low end on BE because we're
+ C working downwards in saving the very first bytes of the buffer
.Lmemxor3_au_leftover:
C Store a byte at a time
- ror r4, #24
+ C bring uppermost byte down for saving while preserving lower ones
+IF_LE(< ror r4, #24>)
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
+ C bring down next byte, no need to preserve
+IF_BE(< lsr r4, #8>)
sub AP, #1
bne .Lmemxor3_au_leftover
b .Lmemxor3_bytes
@@ -273,18 +275,21 @@ IF_BE(< rev r4, r4>)
adds N, #8
beq .Lmemxor3_done
- C leftover does an LSB store
- C so we need to reverse if actually BE
-IF_BE(< rev r4, r4>)
-
- C Leftover bytes in a4, low end
- ror r4, ACNT
+ C Leftover bytes in r4, low end on LE and high end on BE before
+ C preparatory alignment correction
+IF_LE(< ror r4, ACNT>)
+IF_BE(< ror r4, ATNC>)
+ C now byte-aligned in high end on LE and low end on BE because we're
+ C working downwards in saving the very first bytes of the buffer
.Lmemxor3_uu_leftover:
- ror r4, #24
+ C bring uppermost byte down for saving while preserving lower ones
+IF_LE(< ror r4, #24>)
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
+ C bring down next byte, no need to preserve
+IF_BE(< lsr r4, #8>)
bne .Lmemxor3_uu_leftover
b .Lmemxor3_bytes