summaryrefslogtreecommitdiff
path: root/mpn/x86_64/fastsse/copyi.asm
blob: 0d408c3af5732b3fad8f09b6dd3815909ce8be98 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
dnl  AMD64 mpn_copyi optimised for CPUs with fast SSE.

dnl  Copyright 2003, 2005, 2007, 2011, 2012 Free Software Foundation, Inc.

dnl  This file is part of the GNU MP Library.

dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of the GNU Lesser General Public License as published
dnl  by the Free Software Foundation; either version 3 of the License, or (at
dnl  your option) any later version.

dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
dnl  License for more details.

dnl  You should have received a copy of the GNU Lesser General Public License
dnl  along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.

include(`../config.m4')


C	    cycles/limb		  good for cpu?
C AMD K8,K9
C AMD K10	 0.85	 1.64		Y/N
C AMD bd1	 1.4	 1.4		Y
C AMD bobcat
C Intel P4	 2.3	 2.3		Y
C Intel core2	 1.0	 1.0
C Intel NHM	 0.5	 0.67		Y
C Intel SBR	 0.5	 0.75		Y
C Intel atom
C VIA nano	 1.16	 5.16		Y/N

C We try to do as many 16-byte operations as possible.  The top-most and
C bottom-most writes might need 8-byte operations.  We can always write using
C aligned 16-byte operations, we read with both aligned and unaligned 16-byte
C operations.

C Instead of having separate loops for reading aligned and unaligned, we read
C using MOVDQU.  This seems to work great except for core2; there performance
C doubles when reading using MOVDQA (for aligned source).  It is unclear how to
C best handle the unaligned case there.

C INPUT PARAMETERS
define(`rp', `%rdi')
define(`up', `%rsi')
define(`n',  `%rdx')

ABI_SUPPORT(DOS64)
ABI_SUPPORT(STD64)

dnl define(`movdqu', lddqu)

ASM_START()
	TEXT
	ALIGN(64)
PROLOGUE(mpn_copyi)
	DOS64_ENTRY(3)

	cmp	$3, n
	jc	L(bc)

	test	$8, R8(rp)		C is rp 16-byte aligned?
	jz	L(ali)			C jump if rp aligned
	movsq				C copy single limb
	dec	n

	sub	$16, n
	jc	L(sma)

	ALIGN(16)
L(top):	movdqu	(up), %xmm0
	movdqu	16(up), %xmm1
	movdqu	32(up), %xmm2
	movdqu	48(up), %xmm3
	movdqu	64(up), %xmm4
	movdqu	80(up), %xmm5
	movdqu	96(up), %xmm6
	movdqu	112(up), %xmm7
	lea	128(up), up
	movdqa	%xmm0, (rp)
	movdqa	%xmm1, 16(rp)
	movdqa	%xmm2, 32(rp)
	movdqa	%xmm3, 48(rp)
	movdqa	%xmm4, 64(rp)
	movdqa	%xmm5, 80(rp)
	movdqa	%xmm6, 96(rp)
	movdqa	%xmm7, 112(rp)
	lea	128(rp), rp
L(ali):	sub	$16, n
	jnc	L(top)

L(sma):	test	$8, R8(n)
	jz	1f
	movdqu	(up), %xmm0
	movdqu	16(up), %xmm1
	movdqu	32(up), %xmm2
	movdqu	48(up), %xmm3
	lea	64(up), up
	movdqa	%xmm0, (rp)
	movdqa	%xmm1, 16(rp)
	movdqa	%xmm2, 32(rp)
	movdqa	%xmm3, 48(rp)
	lea	64(rp), rp
1:
	test	$4, R8(n)
	jz	1f
	movdqu	(up), %xmm0
	movdqu	16(up), %xmm1
	lea	32(up), up
	movdqa	%xmm0, (rp)
	movdqa	%xmm1, 16(rp)
	lea	32(rp), rp
1:
	test	$2, R8(n)
	jz	1f
	movdqu	(up), %xmm0
	lea	16(up), up
	movdqa	%xmm0, (rp)
	lea	16(rp), rp
	ALIGN(16)
1:
L(end):	bt	$0, n
	jnc	1f
	mov	(up), %r8
	mov	%r8, (rp)
1:
	DOS64_EXIT()
	ret

C Basecase code.  Needed for good small operands speed, not for
C correctness as the above code is currently written.

L(bc):	sub	$2, n
	jc	L(end)
	ALIGN(16)
1:	mov	(up), %rax
	mov	8(up), %rcx
	lea	16(up), up
	mov	%rax, (rp)
	mov	%rcx, 8(rp)
	lea	16(rp), rp
	sub	$2, n
	jnc	1b

	bt	$0, n
	jnc	L(ret)
	mov	(up), %rax
	mov	%rax, (rp)
L(ret):	DOS64_EXIT()
	ret
EPILOGUE()