From 727b38df054b26d7410c275930f72135061ab9e1 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Wed, 23 May 2018 11:25:20 -0700 Subject: x86-64: Skip zero length in __mem[pcpy|move|set]_erms This patch skips zero length in __mempcpy_erms, __memmove_erms and __memset_erms. Tested on x86-64. * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S (__mempcpy_erms): Skip zero length. (__memmove_erms): Likewise. * sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S (__memset_erms): Likewise. --- sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S | 6 ++++++ sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S | 5 +++++ 2 files changed, 11 insertions(+) (limited to 'sysdeps/x86_64/multiarch') diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index 1404131bb7..e2ede45e9f 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -156,6 +156,9 @@ END (__mempcpy_chk_erms) /* Only used to measure performance of REP MOVSB. */ ENTRY (__mempcpy_erms) movq %rdi, %rax + /* Skip zero length. */ + testq %rdx, %rdx + jz 2f addq %rdx, %rax jmp L(start_movsb) END (__mempcpy_erms) @@ -167,6 +170,9 @@ END (__memmove_chk_erms) ENTRY (__memmove_erms) movq %rdi, %rax + /* Skip zero length. */ + testq %rdx, %rdx + jz 2f L(start_movsb): movq %rdx, %rcx cmpq %rsi, %rdi diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index 0a827f3e47..dc9cb88b37 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -128,6 +128,11 @@ END (__memset_chk_erms) /* Only used to measure performance of REP STOSB. */ ENTRY (__memset_erms) + /* Skip zero length. */ + testq %rdx, %rdx + jnz L(stosb) + movq %rdi, %rax + ret # else /* Provide a hidden symbol to debugger. */ .hidden MEMSET_SYMBOL (__memset, erms) -- cgit v1.2.1