From 32291ba6eaacc9d4230322361b7bfee052e72040 Mon Sep 17 00:00:00 2001 From: James Almer Date: Thu, 20 Mar 2014 01:40:56 -0300 Subject: swresample: add swri_resample_float_sse At least two times faster than the C version. Signed-off-by: James Almer Signed-off-by: Michael Niedermayer --- libswresample/x86/resample_mmx.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'libswresample/x86') diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h index f366cc7f59..ba36de9e5c 100644 --- a/libswresample/x86/resample_mmx.h +++ b/libswresample/x86/resample_mmx.h @@ -24,6 +24,7 @@ int swri_resample_int16_mmx2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); int swri_resample_int16_sse2 (struct ResampleContext *c, int16_t *dst, const int16_t *src, int *consumed, int src_size, int dst_size, int update_ctx); +int swri_resample_float_sse (struct ResampleContext *c, float *dst, const float *src, int *consumed, int src_size, int dst_size, int update_ctx); DECLARE_ALIGNED(16, const uint64_t, ff_resample_int16_rounder)[2] = { 0x0000000000004000ULL, 0x0000000000000000ULL}; @@ -72,3 +73,25 @@ __asm__ volatile(\ "r" (dst+dst_index)\ NAMED_CONSTRAINTS_ADD(ff_resample_int16_rounder)\ ); + +#define COMMON_CORE_FLT_SSE \ + x86_reg len= -4*c->filter_length;\ +__asm__ volatile(\ + "xorps %%xmm0, %%xmm0 \n\t"\ + "1: \n\t"\ + "movups (%1, %0), %%xmm1 \n\t"\ + "mulps (%2, %0), %%xmm1 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "add $16, %0 \n\t"\ + " js 1b \n\t"\ + "movhlps %%xmm0, %%xmm1 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "movss %%xmm0, %%xmm1 \n\t"\ + "shufps $1, %%xmm0, %%xmm0 \n\t"\ + "addps %%xmm1, %%xmm0 \n\t"\ + "movss %%xmm0, (%3) \n\t"\ + : "+r" (len)\ + : "r" (((uint8_t*)(src+sample_index))-len),\ + "r" (((uint8_t*)filter)-len),\ + "r" (dst+dst_index)\ +); -- cgit v1.2.1