summaryrefslogtreecommitdiff
path: root/mpn/sparc64/addmul_1.asm
blob: 48d040e9a5e050079c82c9eb04688272d54ac715 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
dnl  SPARC 64-bit mpn_addmul_1 -- Multiply a limb vector with a limb and
dnl  add the result to a second limb vector.

dnl  Copyright (C) 1998, 2000 Free Software Foundation, Inc.

dnl  This file is part of the GNU MP Library.

dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of the GNU Library General Public License as published
dnl  by the Free Software Foundation; either version 2 of the License, or (at
dnl  your option) any later version.

dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Library General Public
dnl  License for more details.

dnl  You should have received a copy of the GNU Library General Public License
dnl  along with the GNU MP Library; see the file COPYING.LIB.  If not, write to
dnl  the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
dnl  MA 02111-1307, USA.

include(`../config.m4')

C INPUT PARAMETERS
C res_ptr	i0
C s1_ptr	i1
C size		i2
C s2_limb	i3

ASM_START()
	.register	%g2,#scratch
	.register	%g3,#scratch

PROLOGUE(mpn_addmul_1)
	save	%sp,-256,%sp

C We store 0.0 in f10 and keep it invariant accross thw two
C function calls below.  Note that this is not ABI conformant,
C but since the functions are local, that's acceptable.
ifdef(`PIC',
`L(pc):	rd	%pc,%o7
	ld	[%o7+L(noll)-L(pc)],%f10',
`	sethi	%hh(L(noll)),%g2
	sethi	%lm(L(noll)),%g1
	or	%g2,%hm(L(noll)),%g2
	or	%g1,%lo(L(noll)),%g1
	sllx	%g2,32,%g2
	ld	[%g1+%g2],%f10')

C First multiply-add with low 32 bits of s2_limb
	mov	%i0,%o0
	mov	%i1,%o1
	add	%i2,%i2,%o2
	call	addmull
	srl	%i3,0,%o3

	mov	%o0,%l0			C keep carry-out from accmull

C Now multiply-add with high 32 bits of s2_limb, unless it is zero.
	srlx	%i3,32,%o3
	brz,a,pn	%o3,L(small)
	 mov	%o0,%i0
	mov	%i1,%o1
	add	%i2,%i2,%o2
	call	addmulu
	add	%i0,4,%o0

	add	%l0,%o0,%i0
L(small):
	ret
	restore	%g0,%g0,%g0
EPILOGUE(mpn_addmul_1)

	TEXT
	ALIGN(4)
L(noll):
	.word	0

define(`LO',`(+4)')
define(`HI',`(-4)')

define(`DLO',`(+4)')
define(`DHI',`(-4)')
define(`LOWPART')
define(`E',`L(l.$1)')
include(`addmul_1h.asm')

define(`DLO',`(-4)')
define(`DHI',`(+4)')
undefine(`LOWPART')
define(`E',`L(u.$1)')
include(`addmul_1h.asm')