1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
# Pyramid __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
# sum in a third limb vector.
# Copyright 1995, 2000 Free Software Foundation, Inc.
# This file is part of the GNU MP Library.
# The GNU MP Library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
# The GNU MP Library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with the GNU MP Library. If not, see https://www.gnu.org/licenses/.
.text
.align 2
.globl ___gmpn_add_n
___gmpn_add_n:
movw $-1,tr0 # representation for carry clear
movw pr3,tr2
andw $3,tr2
beq Lend0
subw tr2,pr3
Loop0: rsubw $0,tr0 # restore carry bit from carry-save register
movw (pr1),tr1
addwc (pr2),tr1
movw tr1,(pr0)
subwb tr0,tr0
addw $4,pr0
addw $4,pr1
addw $4,pr2
addw $-1,tr2
bne Loop0
mtstw pr3,pr3
beq Lend
Lend0:
Loop: rsubw $0,tr0 # restore carry bit from carry-save register
movw (pr1),tr1
addwc (pr2),tr1
movw tr1,(pr0)
movw 4(pr1),tr1
addwc 4(pr2),tr1
movw tr1,4(pr0)
movw 8(pr1),tr1
addwc 8(pr2),tr1
movw tr1,8(pr0)
movw 12(pr1),tr1
addwc 12(pr2),tr1
movw tr1,12(pr0)
subwb tr0,tr0
addw $16,pr0
addw $16,pr1
addw $16,pr2
addw $-4,pr3
bne Loop
Lend:
mnegw tr0,pr0
ret
|