diff options
Diffstat (limited to 'libswresample/x86')
-rw-r--r-- | libswresample/x86/Makefile | 3 | ||||
-rw-r--r-- | libswresample/x86/audio_convert.asm | 465 | ||||
-rw-r--r-- | libswresample/x86/rematrix.asm | 250 | ||||
-rw-r--r-- | libswresample/x86/resample_mmx.h | 70 | ||||
-rw-r--r-- | libswresample/x86/swresample_x86.c | 200 |
5 files changed, 988 insertions, 0 deletions
diff --git a/libswresample/x86/Makefile b/libswresample/x86/Makefile new file mode 100644 index 0000000000..e8feede6b6 --- /dev/null +++ b/libswresample/x86/Makefile @@ -0,0 +1,3 @@ +YASM-OBJS += x86/swresample_x86.o\ + x86/audio_convert.o\ + x86/rematrix.o\ diff --git a/libswresample/x86/audio_convert.asm b/libswresample/x86/audio_convert.asm new file mode 100644 index 0000000000..b6e9e5d79d --- /dev/null +++ b/libswresample/x86/audio_convert.asm @@ -0,0 +1,465 @@ +;****************************************************************************** +;* Copyright (c) 2012 Michael Niedermayer +;* +;* This file is part of FFmpeg. +;* +;* FFmpeg is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* FFmpeg is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with FFmpeg; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + +SECTION_RODATA 32 +flt2pm31: times 8 dd 4.6566129e-10 +flt2p31 : times 8 dd 2147483648.0 +flt2p15 : times 8 dd 32768.0 + +word_unpack_shuf : db 0, 1, 4, 5, 8, 9,12,13, 2, 3, 6, 7,10,11,14,15 + +SECTION .text + + +;to, from, a/u, log2_outsize, log_intsize, const +%macro PACK_2CH 5-7 +cglobal pack_2ch_%2_to_%1_%3, 3, 4, 6, dst, src, len, src2 + mov src2q , [srcq+gprsize] + mov srcq , [srcq] + mov dstq , [dstq] +%ifidn %3, a + test dstq, mmsize-1 + jne pack_2ch_%2_to_%1_u_int %+ SUFFIX + test srcq, mmsize-1 + jne pack_2ch_%2_to_%1_u_int %+ SUFFIX + test src2q, mmsize-1 + jne pack_2ch_%2_to_%1_u_int %+ SUFFIX +%else +pack_2ch_%2_to_%1_u_int %+ SUFFIX +%endif + lea srcq , [srcq + (1<<%5)*lenq] + lea src2q, [src2q + (1<<%5)*lenq] + lea dstq , [dstq + (2<<%4)*lenq] + neg lenq + %7 m0,m1,m2,m3,m4,m5 +.next: +%if %4 >= %5 + mov%3 m0, [ srcq +(1<<%5)*lenq] + mova m1, m0 + mov%3 m2, [ src2q+(1<<%5)*lenq] +%if %5 == 1 + punpcklwd m0, m2 + punpckhwd m1, m2 +%else + punpckldq m0, m2 + punpckhdq m1, m2 +%endif + %6 m0,m1,m2,m3,m4,m5 +%else + mov%3 m0, [ srcq +(1<<%5)*lenq] + mov%3 m1, [mmsize + srcq +(1<<%5)*lenq] + mov%3 m2, [ src2q+(1<<%5)*lenq] + mov%3 m3, [mmsize + src2q+(1<<%5)*lenq] + %6 m0,m1,m2,m3,m4,m5 + mova m2, m0 + punpcklwd m0, m1 + punpckhwd m2, m1 + SWAP 1,2 +%endif + mov%3 [ dstq+(2<<%4)*lenq], m0 + mov%3 [ mmsize + dstq+(2<<%4)*lenq], m1 +%if %4 > %5 + mov%3 [2*mmsize + dstq+(2<<%4)*lenq], m2 + mov%3 [3*mmsize + dstq+(2<<%4)*lenq], m3 + add lenq, 4*mmsize/(2<<%4) +%else + add lenq, 2*mmsize/(2<<%4) +%endif + jl .next + REP_RET +%endmacro + +%macro UNPACK_2CH 5-7 +cglobal unpack_2ch_%2_to_%1_%3, 3, 4, 7, dst, src, len, dst2 + mov dst2q , [dstq+gprsize] + mov srcq , [srcq] + mov dstq , [dstq] +%ifidn %3, a + test dstq, mmsize-1 + jne unpack_2ch_%2_to_%1_u_int %+ SUFFIX + test srcq, mmsize-1 + jne unpack_2ch_%2_to_%1_u_int %+ SUFFIX + test dst2q, mmsize-1 + jne unpack_2ch_%2_to_%1_u_int %+ SUFFIX +%else +unpack_2ch_%2_to_%1_u_int %+ SUFFIX +%endif + lea srcq , [srcq + (2<<%5)*lenq] + lea dstq , [dstq + (1<<%4)*lenq] + lea dst2q, [dst2q + (1<<%4)*lenq] + neg lenq + %7 m0,m1,m2,m3,m4,m5 + mova m6, [word_unpack_shuf] +.next: + mov%3 m0, [ srcq +(2<<%5)*lenq] + mov%3 m2, [ mmsize + srcq +(2<<%5)*lenq] +%if %5 == 1 +%ifidn SUFFIX, _ssse3 + pshufb m0, m6 + mova m1, m0 + pshufb m2, m6 + punpcklqdq m0,m2 + punpckhqdq m1,m2 +%else + mova m1, m0 + punpcklwd m0,m2 + punpckhwd m1,m2 + + mova m2, m0 + punpcklwd m0,m1 + punpckhwd m2,m1 + + mova m1, m0 + punpcklwd m0,m2 + punpckhwd m1,m2 +%endif +%else + mova m1, m0 + shufps m0, m2, 10001000b + shufps m1, m2, 11011101b +%endif +%if %4 < %5 + mov%3 m2, [2*mmsize + srcq +(2<<%5)*lenq] + mova m3, m2 + mov%3 m4, [3*mmsize + srcq +(2<<%5)*lenq] + shufps m2, m4, 10001000b + shufps m3, m4, 11011101b + SWAP 1,2 +%endif + %6 m0,m1,m2,m3,m4,m5 + mov%3 [ dstq+(1<<%4)*lenq], m0 +%if %4 > %5 + mov%3 [ dst2q+(1<<%4)*lenq], m2 + mov%3 [ mmsize + dstq+(1<<%4)*lenq], m1 + mov%3 [ mmsize + dst2q+(1<<%4)*lenq], m3 + add lenq, 2*mmsize/(1<<%4) +%else + mov%3 [ dst2q+(1<<%4)*lenq], m1 + add lenq, mmsize/(1<<%4) +%endif + jl .next + REP_RET +%endmacro + +%macro CONV 5-7 +cglobal %2_to_%1_%3, 3, 3, 6, dst, src, len + mov srcq , [srcq] + mov dstq , [dstq] +%ifidn %3, a + test dstq, mmsize-1 + jne %2_to_%1_u_int %+ SUFFIX + test srcq, mmsize-1 + jne %2_to_%1_u_int %+ SUFFIX +%else +%2_to_%1_u_int %+ SUFFIX +%endif + lea srcq , [srcq + (1<<%5)*lenq] + lea dstq , [dstq + (1<<%4)*lenq] + neg lenq + %7 m0,m1,m2,m3,m4,m5 +.next: + mov%3 m0, [ srcq +(1<<%5)*lenq] + mov%3 m1, [ mmsize + srcq +(1<<%5)*lenq] +%if %4 < %5 + mov%3 m2, [2*mmsize + srcq +(1<<%5)*lenq] + mov%3 m3, [3*mmsize + srcq +(1<<%5)*lenq] +%endif + %6 m0,m1,m2,m3,m4,m5 + mov%3 [ dstq+(1<<%4)*lenq], m0 + mov%3 [ mmsize + dstq+(1<<%4)*lenq], m1 +%if %4 > %5 + mov%3 [2*mmsize + dstq+(1<<%4)*lenq], m2 + mov%3 [3*mmsize + dstq+(1<<%4)*lenq], m3 + add lenq, 4*mmsize/(1<<%4) +%else + add lenq, 2*mmsize/(1<<%4) +%endif + jl .next +%if mmsize == 8 + emms + RET +%else + REP_RET +%endif +%endmacro + +%macro PACK_6CH 5-7 +cglobal pack_6ch_%2_to_%1_%3, 2,8,7, dst, src, src1, src2, src3, src4, src5, len +%if ARCH_X86_64 + mov lend, r2d +%else + %define lend dword r2m +%endif + mov src1q, [srcq+1*gprsize] + mov src2q, [srcq+2*gprsize] + mov src3q, [srcq+3*gprsize] + mov src4q, [srcq+4*gprsize] + mov src5q, [srcq+5*gprsize] + mov srcq, [srcq] + mov dstq, [dstq] +%ifidn %3, a + test dstq, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX + test srcq, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX + test src2q, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX + test src3q, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX + test src4q, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX + test src5q, mmsize-1 + jne pack_6ch_%2_to_%1_u_int %+ SUFFIX +%else +pack_6ch_%2_to_%1_u_int %+ SUFFIX +%endif + sub src1q, srcq + sub src2q, srcq + sub src3q, srcq + sub src4q, srcq + sub src5q, srcq +.loop: + mov%3 m0, [srcq ] + mov%3 m1, [srcq+src1q] + mov%3 m2, [srcq+src2q] + mov%3 m3, [srcq+src3q] + mov%3 m4, [srcq+src4q] + mov%3 m5, [srcq+src5q] + %7 x,x,x,x,m7,x +%if cpuflag(sse4) + SBUTTERFLYPS 0, 1, 6 + SBUTTERFLYPS 2, 3, 6 + SBUTTERFLYPS 4, 5, 6 + + blendps m6, m4, m0, 1100b + movlhps m0, m2 + movhlps m4, m2 + blendps m2, m5, m1, 1100b + movlhps m1, m3 + movhlps m5, m3 + + %6 m0,m6,x,x,m7,m3 + %6 m4,m1,x,x,m7,m3 + %6 m2,m5,x,x,m7,m3 + + mov %+ %3 %+ ps [dstq ], m0 + mov %+ %3 %+ ps [dstq+16], m6 + mov %+ %3 %+ ps [dstq+32], m4 + mov %+ %3 %+ ps [dstq+48], m1 + mov %+ %3 %+ ps [dstq+64], m2 + mov %+ %3 %+ ps [dstq+80], m5 +%else ; mmx + SBUTTERFLY dq, 0, 1, 6 + SBUTTERFLY dq, 2, 3, 6 + SBUTTERFLY dq, 4, 5, 6 + + movq [dstq ], m0 + movq [dstq+ 8], m2 + movq [dstq+16], m4 + movq [dstq+24], m1 + movq [dstq+32], m3 + movq [dstq+40], m5 +%endif + add srcq, mmsize + add dstq, mmsize*6 + sub lend, mmsize/4 + jg .loop +%if mmsize == 8 + emms + RET +%else + REP_RET +%endif +%endmacro + +%macro INT16_TO_INT32_N 6 + pxor m2, m2 + pxor m3, m3 + punpcklwd m2, m1 + punpckhwd m3, m1 + SWAP 4,0 + pxor m0, m0 + pxor m1, m1 + punpcklwd m0, m4 + punpckhwd m1, m4 +%endmacro + +%macro INT32_TO_INT16_N 6 + psrad m0, 16 + psrad m1, 16 + psrad m2, 16 + psrad m3, 16 + packssdw m0, m1 + packssdw m2, m3 + SWAP 1,2 +%endmacro + +%macro INT32_TO_FLOAT_INIT 6 + mova %5, [flt2pm31] +%endmacro +%macro INT32_TO_FLOAT_N 6 + cvtdq2ps %1, %1 + cvtdq2ps %2, %2 + mulps %1, %1, %5 + mulps %2, %2, %5 +%endmacro + +%macro FLOAT_TO_INT32_INIT 6 + mova %5, [flt2p31] +%endmacro +%macro FLOAT_TO_INT32_N 6 + mulps %1, %5 + mulps %2, %5 + cvtps2dq %6, %1 + cmpnltps %1, %5 + paddd %1, %6 + cvtps2dq %6, %2 + cmpnltps %2, %5 + paddd %2, %6 +%endmacro + +%macro INT16_TO_FLOAT_INIT 6 + mova m5, [flt2pm31] +%endmacro +%macro INT16_TO_FLOAT_N 6 + INT16_TO_INT32_N %1,%2,%3,%4,%5,%6 + cvtdq2ps m0, m0 + cvtdq2ps m1, m1 + cvtdq2ps m2, m2 + cvtdq2ps m3, m3 + mulps m0, m0, m5 + mulps m1, m1, m5 + mulps m2, m2, m5 + mulps m3, m3, m5 +%endmacro + +%macro FLOAT_TO_INT16_INIT 6 + mova m5, [flt2p15] +%endmacro +%macro FLOAT_TO_INT16_N 6 + mulps m0, m5 + mulps m1, m5 + mulps m2, m5 + mulps m3, m5 + cvtps2dq m0, m0 + cvtps2dq m1, m1 + packssdw m0, m1 + cvtps2dq m1, m2 + cvtps2dq m3, m3 + packssdw m1, m3 +%endmacro + +%macro NOP_N 0-6 +%endmacro + +INIT_MMX mmx +CONV int32, int16, u, 2, 1, INT16_TO_INT32_N, NOP_N +CONV int32, int16, a, 2, 1, INT16_TO_INT32_N, NOP_N +CONV int16, int32, u, 1, 2, INT32_TO_INT16_N, NOP_N +CONV int16, int32, a, 1, 2, INT32_TO_INT16_N, NOP_N + +PACK_6CH float, float, u, 2, 2, NOP_N, NOP_N +PACK_6CH float, float, a, 2, 2, NOP_N, NOP_N + +INIT_XMM sse2 +CONV int32, int16, u, 2, 1, INT16_TO_INT32_N, NOP_N +CONV int32, int16, a, 2, 1, INT16_TO_INT32_N, NOP_N +CONV int16, int32, u, 1, 2, INT32_TO_INT16_N, NOP_N +CONV int16, int32, a, 1, 2, INT32_TO_INT16_N, NOP_N + +PACK_2CH int16, int16, u, 1, 1, NOP_N, NOP_N +PACK_2CH int16, int16, a, 1, 1, NOP_N, NOP_N +PACK_2CH int32, int32, u, 2, 2, NOP_N, NOP_N +PACK_2CH int32, int32, a, 2, 2, NOP_N, NOP_N +PACK_2CH int32, int16, u, 2, 1, INT16_TO_INT32_N, NOP_N +PACK_2CH int32, int16, a, 2, 1, INT16_TO_INT32_N, NOP_N +PACK_2CH int16, int32, u, 1, 2, INT32_TO_INT16_N, NOP_N +PACK_2CH int16, int32, a, 1, 2, INT32_TO_INT16_N, NOP_N + +UNPACK_2CH int16, int16, u, 1, 1, NOP_N, NOP_N +UNPACK_2CH int16, int16, a, 1, 1, NOP_N, NOP_N +UNPACK_2CH int32, int32, u, 2, 2, NOP_N, NOP_N +UNPACK_2CH int32, int32, a, 2, 2, NOP_N, NOP_N +UNPACK_2CH int32, int16, u, 2, 1, INT16_TO_INT32_N, NOP_N +UNPACK_2CH int32, int16, a, 2, 1, INT16_TO_INT32_N, NOP_N +UNPACK_2CH int16, int32, u, 1, 2, INT32_TO_INT16_N, NOP_N +UNPACK_2CH int16, int32, a, 1, 2, INT32_TO_INT16_N, NOP_N + +CONV float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +CONV float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +CONV int32, float, u, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +CONV int32, float, a, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +CONV float, int16, u, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +CONV float, int16, a, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +CONV int16, float, u, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT +CONV int16, float, a, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT + +PACK_2CH float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_2CH float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_2CH int32, float, u, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +PACK_2CH int32, float, a, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +PACK_2CH float, int16, u, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +PACK_2CH float, int16, a, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +PACK_2CH int16, float, u, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT +PACK_2CH int16, float, a, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT + +UNPACK_2CH float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +UNPACK_2CH float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +UNPACK_2CH int32, float, u, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +UNPACK_2CH int32, float, a, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +UNPACK_2CH float, int16, u, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +UNPACK_2CH float, int16, a, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +UNPACK_2CH int16, float, u, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT +UNPACK_2CH int16, float, a, 1, 2, FLOAT_TO_INT16_N, FLOAT_TO_INT16_INIT + + +INIT_XMM ssse3 +UNPACK_2CH int16, int16, u, 1, 1, NOP_N, NOP_N +UNPACK_2CH int16, int16, a, 1, 1, NOP_N, NOP_N +UNPACK_2CH int32, int16, u, 2, 1, INT16_TO_INT32_N, NOP_N +UNPACK_2CH int32, int16, a, 2, 1, INT16_TO_INT32_N, NOP_N +UNPACK_2CH float, int16, u, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT +UNPACK_2CH float, int16, a, 2, 1, INT16_TO_FLOAT_N, INT16_TO_FLOAT_INIT + +INIT_XMM sse4 +PACK_6CH float, float, u, 2, 2, NOP_N, NOP_N +PACK_6CH float, float, a, 2, 2, NOP_N, NOP_N + +PACK_6CH float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_6CH float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_6CH int32, float, u, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +PACK_6CH int32, float, a, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT + +%if HAVE_AVX_EXTERNAL +INIT_XMM avx +PACK_6CH float, float, u, 2, 2, NOP_N, NOP_N +PACK_6CH float, float, a, 2, 2, NOP_N, NOP_N + +PACK_6CH float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_6CH float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +PACK_6CH int32, float, u, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT +PACK_6CH int32, float, a, 2, 2, FLOAT_TO_INT32_N, FLOAT_TO_INT32_INIT + +INIT_YMM avx +CONV float, int32, u, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +CONV float, int32, a, 2, 2, INT32_TO_FLOAT_N, INT32_TO_FLOAT_INIT +%endif diff --git a/libswresample/x86/rematrix.asm b/libswresample/x86/rematrix.asm new file mode 100644 index 0000000000..f0ae9599a2 --- /dev/null +++ b/libswresample/x86/rematrix.asm @@ -0,0 +1,250 @@ +;****************************************************************************** +;* Copyright (c) 2012 Michael Niedermayer +;* +;* This file is part of FFmpeg. +;* +;* FFmpeg is free software; you can redistribute it and/or +;* modify it under the terms of the GNU Lesser General Public +;* License as published by the Free Software Foundation; either +;* version 2.1 of the License, or (at your option) any later version. +;* +;* FFmpeg is distributed in the hope that it will be useful, +;* but WITHOUT ANY WARRANTY; without even the implied warranty of +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +;* Lesser General Public License for more details. +;* +;* You should have received a copy of the GNU Lesser General Public +;* License along with FFmpeg; if not, write to the Free Software +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +;****************************************************************************** + +%include "libavutil/x86/x86util.asm" + + +SECTION_RODATA 32 +dw1: times 8 dd 1 +w1 : times 16 dw 1 + +SECTION .text + +%macro MIX2_FLT 1 +cglobal mix_2_1_%1_float, 7, 7, 6, out, in1, in2, coeffp, index1, index2, len +%ifidn %1, a + test in1q, mmsize-1 + jne mix_2_1_float_u_int %+ SUFFIX + test in2q, mmsize-1 + jne mix_2_1_float_u_int %+ SUFFIX + test outq, mmsize-1 + jne mix_2_1_float_u_int %+ SUFFIX +%else +mix_2_1_float_u_int %+ SUFFIX +%endif + VBROADCASTSS m4, [coeffpq + 4*index1q] + VBROADCASTSS m5, [coeffpq + 4*index2q] + shl lend , 2 + add in1q , lenq + add in2q , lenq + add outq , lenq + neg lenq +.next: +%ifidn %1, a + mulps m0, m4, [in1q + lenq ] + mulps m1, m5, [in2q + lenq ] + mulps m2, m4, [in1q + lenq + mmsize] + mulps m3, m5, [in2q + lenq + mmsize] +%else + movu m0, [in1q + lenq ] + movu m1, [in2q + lenq ] + movu m2, [in1q + lenq + mmsize] + movu m3, [in2q + lenq + mmsize] + mulps m0, m0, m4 + mulps m1, m1, m5 + mulps m2, m2, m4 + mulps m3, m3, m5 +%endif + addps m0, m0, m1 + addps m2, m2, m3 + mov%1 [outq + lenq ], m0 + mov%1 [outq + lenq + mmsize], m2 + add lenq, mmsize*2 + jl .next + REP_RET +%endmacro + +%macro MIX1_FLT 1 +cglobal mix_1_1_%1_float, 5, 5, 3, out, in, coeffp, index, len +%ifidn %1, a + test inq, mmsize-1 + jne mix_1_1_float_u_int %+ SUFFIX + test outq, mmsize-1 + jne mix_1_1_float_u_int %+ SUFFIX +%else +mix_1_1_float_u_int %+ SUFFIX +%endif + VBROADCASTSS m2, [coeffpq + 4*indexq] + shl lenq , 2 + add inq , lenq + add outq , lenq + neg lenq +.next: +%ifidn %1, a + mulps m0, m2, [inq + lenq ] + mulps m1, m2, [inq + lenq + mmsize] +%else + movu m0, [inq + lenq ] + movu m1, [inq + lenq + mmsize] + mulps m0, m0, m2 + mulps m1, m1, m2 +%endif + mov%1 [outq + lenq ], m0 + mov%1 [outq + lenq + mmsize], m1 + add lenq, mmsize*2 + jl .next + REP_RET +%endmacro + +%macro MIX1_INT16 1 +cglobal mix_1_1_%1_int16, 5, 5, 6, out, in, coeffp, index, len +%ifidn %1, a + test inq, mmsize-1 + jne mix_1_1_int16_u_int %+ SUFFIX + test outq, mmsize-1 + jne mix_1_1_int16_u_int %+ SUFFIX +%else +mix_1_1_int16_u_int %+ SUFFIX +%endif + movd m4, [coeffpq + 4*indexq] + SPLATW m5, m4 + psllq m4, 32 + psrlq m4, 48 + mova m0, [w1] + psllw m0, m4 + psrlw m0, 1 + punpcklwd m5, m0 + add lenq , lenq + add inq , lenq + add outq , lenq + neg lenq +.next: + mov%1 m0, [inq + lenq ] + mov%1 m2, [inq + lenq + mmsize] + mova m1, m0 + mova m3, m2 + punpcklwd m0, [w1] + punpckhwd m1, [w1] + punpcklwd m2, [w1] + punpckhwd m3, [w1] + pmaddwd m0, m5 + pmaddwd m1, m5 + pmaddwd m2, m5 + pmaddwd m3, m5 + psrad m0, m4 + psrad m1, m4 + psrad m2, m4 + psrad m3, m4 + packssdw m0, m1 + packssdw m2, m3 + mov%1 [outq + lenq ], m0 + mov%1 [outq + lenq + mmsize], m2 + add lenq, mmsize*2 + jl .next +%if mmsize == 8 + emms + RET +%else + REP_RET +%endif +%endmacro + +%macro MIX2_INT16 1 +cglobal mix_2_1_%1_int16, 7, 7, 8, out, in1, in2, coeffp, index1, index2, len +%ifidn %1, a + test in1q, mmsize-1 + jne mix_2_1_int16_u_int %+ SUFFIX + test in2q, mmsize-1 + jne mix_2_1_int16_u_int %+ SUFFIX + test outq, mmsize-1 + jne mix_2_1_int16_u_int %+ SUFFIX +%else +mix_2_1_int16_u_int %+ SUFFIX +%endif + movd m4, [coeffpq + 4*index1q] + movd m6, [coeffpq + 4*index2q] + SPLATW m5, m4 + SPLATW m6, m6 + psllq m4, 32 + psrlq m4, 48 + mova m7, [dw1] + pslld m7, m4 + psrld m7, 1 + punpcklwd m5, m6 + add lend , lend + add in1q , lenq + add in2q , lenq + add outq , lenq + neg lenq +.next: + mov%1 m0, [in1q + lenq ] + mov%1 m2, [in2q + lenq ] + mova m1, m0 + punpcklwd m0, m2 + punpckhwd m1, m2 + + mov%1 m2, [in1q + lenq + mmsize] + mov%1 m6, [in2q + lenq + mmsize] + mova m3, m2 + punpcklwd m2, m6 + punpckhwd m3, m6 + + pmaddwd m0, m5 + pmaddwd m1, m5 + pmaddwd m2, m5 + pmaddwd m3, m5 + paddd m0, m7 + paddd m1, m7 + paddd m2, m7 + paddd m3, m7 + psrad m0, m4 + psrad m1, m4 + psrad m2, m4 + psrad m3, m4 + packssdw m0, m1 + packssdw m2, m3 + mov%1 [outq + lenq ], m0 + mov%1 [outq + lenq + mmsize], m2 + add lenq, mmsize*2 + jl .next +%if mmsize == 8 + emms + RET +%else + REP_RET +%endif +%endmacro + + +INIT_MMX mmx +MIX1_INT16 u +MIX1_INT16 a +MIX2_INT16 u +MIX2_INT16 a + +INIT_XMM sse +MIX2_FLT u +MIX2_FLT a +MIX1_FLT u +MIX1_FLT a + +INIT_XMM sse2 +MIX1_INT16 u +MIX1_INT16 a +MIX2_INT16 u +MIX2_INT16 a + +%if HAVE_AVX_EXTERNAL +INIT_YMM avx +MIX2_FLT u +MIX2_FLT a +MIX1_FLT u +MIX1_FLT a +%endif diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h new file mode 100644 index 0000000000..d96fd5a9d2 --- /dev/null +++ b/libswresample/x86/resample_mmx.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/x86/asm.h" +#include "libavutil/cpu.h" +#include "libswresample/swresample_internal.h" + +int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); +int swri_resample_int16_ssse3(struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); + +DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL}; + +#define COMMON_CORE_INT16_MMX2 \ + x86_reg len= -2*c->filter_length;\ +__asm__ volatile(\ + "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\ + "1: \n\t"\ + "movq (%1, %0), %%mm1 \n\t"\ + "pmaddwd (%2, %0), %%mm1 \n\t"\ + "paddd %%mm1, %%mm0 \n\t"\ + "add $8, %0 \n\t"\ + " js 1b \n\t"\ + "pshufw $0x0E, %%mm0, %%mm1 \n\t"\ + "paddd %%mm1, %%mm0 \n\t"\ + "psrad $15, %%mm0 \n\t"\ + "packssdw %%mm0, %%mm0 \n\t"\ + "movd %%mm0, (%3) \n\t"\ + : "+r" (len)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (dst+dst_index)\ +); + +#define COMMON_CORE_INT16_SSSE3 \ + x86_reg len= -2*c->filter_length;\ +__asm__ volatile(\ + "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\ + "1: \n\t"\ + "movdqu (%1, %0), %%xmm1 \n\t"\ + "pmaddwd (%2, %0), %%xmm1 \n\t"\ + "paddd %%xmm1, %%xmm0 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "phaddd %%xmm0, %%xmm0 \n\t"\ + "phaddd %%xmm0, %%xmm0 \n\t"\ + "psrad $15, %%xmm0 \n\t"\ + "packssdw %%xmm0, %%xmm0 \n\t"\ + "movd %%xmm0, (%3) \n\t"\ + : "+r" (len)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (dst+dst_index)\ +); diff --git a/libswresample/x86/swresample_x86.c b/libswresample/x86/swresample_x86.c new file mode 100644 index 0000000000..7483ba0bed --- /dev/null +++ b/libswresample/x86/swresample_x86.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2012 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libswresample/swresample_internal.h" +#include "libswresample/audioconvert.h" + +#define PROTO(pre, in, out, cap) void ff ## pre ## in## _to_ ##out## _a_ ##cap(uint8_t **dst, const uint8_t **src, int len); +#define PROTO2(pre, out, cap) PROTO(pre, int16, out, cap) PROTO(pre, int32, out, cap) PROTO(pre, float, out, cap) +#define PROTO3(pre, cap) PROTO2(pre, int16, cap) PROTO2(pre, int32, cap) PROTO2(pre, float, cap) +#define PROTO4(pre) PROTO3(pre, mmx) PROTO3(pre, sse) PROTO3(pre, sse2) PROTO3(pre, ssse3) PROTO3(pre, sse4) PROTO3(pre, avx) +PROTO4(_) +PROTO4(_pack_2ch_) +PROTO4(_pack_6ch_) +PROTO4(_unpack_2ch_) + +av_cold void swri_audio_convert_init_x86(struct AudioConvert *ac, + enum AVSampleFormat out_fmt, + enum AVSampleFormat in_fmt, + int channels){ + int mm_flags = av_get_cpu_flags(); + + ac->simd_f= NULL; + +//FIXME add memcpy case + +#define MULTI_CAPS_FUNC(flag, cap) \ + if (mm_flags & flag) {\ + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S16 || out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_S16P)\ + ac->simd_f = ff_int16_to_int32_a_ ## cap;\ + if( out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_S32 || out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_S32P)\ + ac->simd_f = ff_int32_to_int16_a_ ## cap;\ + } + +MULTI_CAPS_FUNC(AV_CPU_FLAG_MMX, mmx) +MULTI_CAPS_FUNC(AV_CPU_FLAG_SSE2, sse2) + + if(mm_flags & AV_CPU_FLAG_MMX) { + if(channels == 6) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_FLTP || out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_6ch_float_to_float_a_mmx; + } + } + + if(mm_flags & AV_CPU_FLAG_SSE2) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S32 || out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_int32_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S16 || out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S16P) + ac->simd_f = ff_int16_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_FLT || out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_float_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLT || out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_float_to_int16_a_sse2; + + if(channels == 2) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_FLTP || out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_2ch_int32_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_S16P) + ac->simd_f = ff_pack_2ch_int16_to_int16_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S16P) + ac->simd_f = ff_pack_2ch_int16_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_2ch_int32_to_int16_a_sse2; + + if( out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_FLT || out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_S32) + ac->simd_f = ff_unpack_2ch_int32_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_int16_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_S32) + ac->simd_f = ff_unpack_2ch_int32_to_int16_a_sse2; + + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_2ch_int32_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_pack_2ch_float_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S16P) + ac->simd_f = ff_pack_2ch_int16_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_pack_2ch_float_to_int16_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S32) + ac->simd_f = ff_unpack_2ch_int32_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_FLT) + ac->simd_f = ff_unpack_2ch_float_to_int32_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_float_a_sse2; + if( out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_FLT) + ac->simd_f = ff_unpack_2ch_float_to_int16_a_sse2; + } + } + if(mm_flags & AV_CPU_FLAG_SSSE3) { + if(channels == 2) { + if( out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_int16_a_ssse3; + if( out_fmt == AV_SAMPLE_FMT_S32P && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_int32_a_ssse3; + if( out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S16) + ac->simd_f = ff_unpack_2ch_int16_to_float_a_ssse3; + } + } + if(mm_flags & AV_CPU_FLAG_SSE4) { + if(channels == 6) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_FLTP || out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_6ch_float_to_float_a_sse4; + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_6ch_int32_to_float_a_sse4; + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_pack_6ch_float_to_int32_a_sse4; + } + } + if(HAVE_AVX_EXTERNAL && mm_flags & AV_CPU_FLAG_AVX) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S32 || out_fmt == AV_SAMPLE_FMT_FLTP && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_int32_to_float_a_avx; + if(channels == 6) { + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_FLTP || out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_6ch_float_to_float_a_avx; + if( out_fmt == AV_SAMPLE_FMT_FLT && in_fmt == AV_SAMPLE_FMT_S32P) + ac->simd_f = ff_pack_6ch_int32_to_float_a_avx; + if( out_fmt == AV_SAMPLE_FMT_S32 && in_fmt == AV_SAMPLE_FMT_FLTP) + ac->simd_f = ff_pack_6ch_float_to_int32_a_avx; + } + } +} + +#define D(type, simd) \ +mix_1_1_func_type ff_mix_1_1_a_## type ## _ ## simd;\ +mix_2_1_func_type ff_mix_2_1_a_## type ## _ ## simd; + +D(float, sse) +D(float, avx) +D(int16, mmx) +D(int16, sse2) + + +av_cold void swri_rematrix_init_x86(struct SwrContext *s){ + int mm_flags = av_get_cpu_flags(); + int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout); + int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout); + int num = nb_in * nb_out; + int i,j; + + s->mix_1_1_simd = NULL; + s->mix_2_1_simd = NULL; + + if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){ + if(mm_flags & AV_CPU_FLAG_MMX) { + s->mix_1_1_simd = ff_mix_1_1_a_int16_mmx; + s->mix_2_1_simd = ff_mix_2_1_a_int16_mmx; + } + if(mm_flags & AV_CPU_FLAG_SSE2) { + s->mix_1_1_simd = ff_mix_1_1_a_int16_sse2; + s->mix_2_1_simd = ff_mix_2_1_a_int16_sse2; + } + s->native_simd_matrix = av_mallocz(2 * num * sizeof(int16_t)); + s->native_simd_one = av_mallocz(2 * sizeof(int16_t)); + for(i=0; i<nb_out; i++){ + int sh = 0; + for(j=0; j<nb_in; j++) + sh = FFMAX(sh, FFABS(((int*)s->native_matrix)[i * nb_in + j])); + sh = FFMAX(av_log2(sh) - 14, 0); + for(j=0; j<nb_in; j++) { + ((int16_t*)s->native_simd_matrix)[2*(i * nb_in + j)+1] = 15 - sh; + ((int16_t*)s->native_simd_matrix)[2*(i * nb_in + j)] = + ((((int*)s->native_matrix)[i * nb_in + j]) + (1<<sh>>1)) >> sh; + } + } + ((int16_t*)s->native_simd_one)[1] = 14; + ((int16_t*)s->native_simd_one)[0] = 16384; + } else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){ + if(mm_flags & AV_CPU_FLAG_SSE) { + s->mix_1_1_simd = ff_mix_1_1_a_float_sse; + s->mix_2_1_simd = ff_mix_2_1_a_float_sse; + } + if(HAVE_AVX_EXTERNAL && mm_flags & AV_CPU_FLAG_AVX) { + s->mix_1_1_simd = ff_mix_1_1_a_float_avx; + s->mix_2_1_simd = ff_mix_2_1_a_float_avx; + } + s->native_simd_matrix = av_mallocz(num * sizeof(float)); + memcpy(s->native_simd_matrix, s->native_matrix, num * sizeof(float)); + s->native_simd_one = av_mallocz(sizeof(float)); + memcpy(s->native_simd_one, s->native_one, sizeof(float)); + } +} |