diff options
author | Lynne <dev@lynne.ee> | 2023-02-01 02:26:20 +0100 |
---|---|---|
committer | Lynne <dev@lynne.ee> | 2023-02-01 04:23:55 +0100 |
commit | bbe95f7353a972f28a48be8da883549f02c59e4b (patch) | |
tree | 08841c9da55e7f076f6046d1dbd70f49d74c0ec0 /libavutil | |
parent | fc9a3b584da3cf3fc1f00036be2eaf5dff903ccf (diff) | |
download | ffmpeg-bbe95f7353a972f28a48be8da883549f02c59e4b.tar.gz |
x86: replace explicit REP_RETs with RETs
From x86inc:
> On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
> a branch or a branch target. So switch to a 2-byte form of ret in that case.
> We can automatically detect "follows a branch", but not a branch target.
> (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
x86inc can automatically determine whether to use REP_RET rather than
REP in most of these cases, so impact is minimal. Additionally, a few
REP_RETs were used unnecessary, despite the return being nowhere near a
branch.
The only CPUs affected were AMD K10s, made between 2007 and 2011, 16
years ago and 12 years ago, respectively.
In the future, everyone involved with x86inc should consider dropping
REP_RETs altogether.
Diffstat (limited to 'libavutil')
-rw-r--r-- | libavutil/x86/float_dsp.asm | 18 | ||||
-rw-r--r-- | libavutil/x86/lls.asm | 4 |
2 files changed, 11 insertions, 11 deletions
diff --git a/libavutil/x86/float_dsp.asm b/libavutil/x86/float_dsp.asm index ff608f5f5a..e84ba52566 100644 --- a/libavutil/x86/float_dsp.asm +++ b/libavutil/x86/float_dsp.asm @@ -48,7 +48,7 @@ ALIGN 16 sub lenq, 64 jge .loop - REP_RET + RET %endmacro INIT_XMM sse @@ -141,7 +141,7 @@ cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len %endif ; mmsize sub lenq, 64 jge .loop - REP_RET + RET %endmacro INIT_XMM sse @@ -178,7 +178,7 @@ cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len mova [dstq+lenq], m1 sub lenq, mmsize jge .loop - REP_RET + RET %endmacro INIT_XMM sse @@ -233,7 +233,7 @@ cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len movaps [dstq+lenq+3*mmsize], m4 sub lenq, mmsize*4 jge .loop - REP_RET + RET %endmacro INIT_XMM sse2 @@ -280,7 +280,7 @@ cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len movaps [dstq+lenq+mmsize], m2 sub lenq, 2*mmsize jge .loop - REP_RET + RET %endmacro INIT_XMM sse2 @@ -323,7 +323,7 @@ cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1 sub len1q, mmsize add lenq, mmsize jl .loop - REP_RET + RET ;----------------------------------------------------------------------------- ; vector_fmul_add(float *dst, const float *src0, const float *src1, @@ -352,7 +352,7 @@ ALIGN 16 sub lenq, 2*mmsize jge .loop - REP_RET + RET %endmacro INIT_XMM sse @@ -401,7 +401,7 @@ ALIGN 16 add src1q, 2*mmsize sub lenq, 2*mmsize jge .loop - REP_RET + RET %endmacro INIT_XMM sse @@ -585,4 +585,4 @@ cglobal butterflies_float, 3,3,3, src0, src1, len mova [src0q + lenq], m0 add lenq, mmsize jl .loop - REP_RET + RET diff --git a/libavutil/x86/lls.asm b/libavutil/x86/lls.asm index d2526d1ff4..e8141e6c4f 100644 --- a/libavutil/x86/lls.asm +++ b/libavutil/x86/lls.asm @@ -123,7 +123,7 @@ cglobal update_lls, 2,5,8, ctx, var, i, j, covar2 test id, id jle .loop2x1 .ret: - REP_RET + RET %macro UPDATE_LLS 0 cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2 @@ -240,7 +240,7 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2 cmp id, countd jle .loop2x1 .ret: - REP_RET + RET %endmacro ; UPDATE_LLS %if HAVE_AVX_EXTERNAL |