summaryrefslogtreecommitdiff
path: root/sysdeps/sparc/sparc64/memcpy.S
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-31 14:19:30 -0700
committerDavid S. Miller <davem@davemloft.net>2012-05-31 14:19:30 -0700
commitf230c29b40cc36ce62387664be92c3cf94119efe (patch)
tree26028666b279654830d54846e9194429accfd9f5 /sysdeps/sparc/sparc64/memcpy.S
parentfed806c3af7b6421e17ac50f8129dbe4fd693e4a (diff)
downloadglibc-f230c29b40cc36ce62387664be92c3cf94119efe.tar.gz
Avoid performance penalty in sparc optimized memcpy/memset.
fmovd clears the current exception field in the %fsr, fsrc2 does not and therefore runs more efficiently on some cpus. * sysdeps/sparc/sparc64/memcpy.S: Use fsrc2 to move 64-bit values between float registers. * sysdeps/sparc/sparc64/memset.S: Likewise. * sysdeps/sparc/sparc64/multiarch/memcpy-niagara2.S: Likewise.
Diffstat (limited to 'sysdeps/sparc/sparc64/memcpy.S')
-rw-r--r--sysdeps/sparc/sparc64/memcpy.S8
1 files changed, 4 insertions, 4 deletions
diff --git a/sysdeps/sparc/sparc64/memcpy.S b/sysdeps/sparc/sparc64/memcpy.S
index 668ebecef1..8371088572 100644
--- a/sysdeps/sparc/sparc64/memcpy.S
+++ b/sysdeps/sparc/sparc64/memcpy.S
@@ -79,7 +79,7 @@
#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
subcc %left, 8, %left; \
bl,pn %xcc, 205f; \
- fsrc1 %f0, %f1; \
+ fsrc2 %f0, %f1; \
ba,a,pt %xcc, 204f;
/* Macros for non-VIS memcpy code. */
@@ -162,7 +162,7 @@ ENTRY(__memcpy_large)
3: andcc %o0, 0x38, %g5 /* IEU1 Group */
201: be,pt %icc, 202f /* CTI */
mov 64, %g1 /* IEU0 */
- fmovd %f0, %f2 /* FPU */
+ fsrc2 %f0, %f2 /* FPU */
sub %g1, %g5, %g5 /* IEU0 Group */
alignaddr %o1, %g0, %g1 /* GRU Group */
ldd [%g1], %f4 /* Load Group */
@@ -193,7 +193,7 @@ ENTRY(__memcpy_large)
andn %o1, (0x40 - 1), %o1 /* IEU1 */
and %g2, 7, %g2 /* IEU0 Group */
andncc %g3, 0x7, %g3 /* IEU1 */
- fmovd %f0, %f2 /* FPU */
+ fsrc2 %f0, %f2 /* FPU */
sub %g3, 0x10, %g3 /* IEU0 Group */
sub %o2, %g6, %o2 /* IEU1 */
alignaddr %g1, %g0, %g0 /* GRU Group */
@@ -541,7 +541,7 @@ ENTRY(memcpy)
stb %g5, [%o0 - 1] /* Store */
2: andn %o2, 7, %g5 /* IEU0 Group */
and %o2, 7, %o2 /* IEU1 */
- fmovd %f0, %f2 /* FPU */
+ fsrc2 %f0, %f2 /* FPU */
alignaddr %o1, %g0, %g1 /* GRU Group */
ldd [%g1], %f4 /* Load Group */
1: ldd [%g1 + 0x8], %f6 /* Load Group */