diff options
author | Erik de Castro Lopo <erikd@mega-nerd.com> | 2013-10-04 01:38:00 +1000 |
---|---|---|
committer | Erik de Castro Lopo <erikd@mega-nerd.com> | 2013-10-04 01:41:48 +1000 |
commit | ecd0acba75e7961b60465c5ee3b6876b407803ca (patch) | |
tree | b6a1dfa919eab7b35782f5f75ad9a29807873639 | |
parent | bd6a920e40b33d7640641aafd02b3d48a08fc4b3 (diff) | |
download | flac-ecd0acba75e7961b60465c5ee3b6876b407803ca.tar.gz |
Improve x86 instrinsic implementation.
* Splits lpc_x86intrin.c to lpc_intrin_sse.c and lpc_intrin_sse2.c
* Add FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2()
function to lpc_intrin_sse2.c
* Add lpc_intrin_sse41.c with two ..._wide_intrin_sse41() functions
(useful for 24-bit en-/decoding)
* Add precompute_partition_info_sums_intrin_sse2() / ...ssse3() and
disables precompute_partition_info_sums_32bit_asm_ia32_().
SSE2 version uses 4 SSE2 instructions instead of 1 SSSE3 instruction
PABSD so it is slightly slower.
Patch-from: lvqcl <lvqcl.mail@gmail.com>
-rw-r--r-- | include/share/compat.h | 8 | ||||
-rw-r--r-- | src/libFLAC/Makefile.am | 6 | ||||
-rw-r--r-- | src/libFLAC/Makefile.lite | 6 | ||||
-rw-r--r-- | src/libFLAC/include/private/Makefile.am | 1 | ||||
-rw-r--r-- | src/libFLAC/include/private/lpc.h | 11 | ||||
-rw-r--r-- | src/libFLAC/include/private/stream_encoder.h | 54 | ||||
-rw-r--r-- | src/libFLAC/libFLAC_dynamic.vcproj | 22 | ||||
-rw-r--r-- | src/libFLAC/libFLAC_static.vcproj | 22 | ||||
-rw-r--r-- | src/libFLAC/lpc_intrin_sse.c | 246 | ||||
-rw-r--r-- | src/libFLAC/lpc_intrin_sse2.c | 1318 | ||||
-rw-r--r-- | src/libFLAC/lpc_intrin_sse41.c | 1126 | ||||
-rw-r--r-- | src/libFLAC/lpc_x86intrin.c | 566 | ||||
-rw-r--r-- | src/libFLAC/stream_decoder.c | 6 | ||||
-rw-r--r-- | src/libFLAC/stream_encoder.c | 48 | ||||
-rw-r--r-- | src/libFLAC/stream_encoder_intrin_sse2.c | 161 | ||||
-rw-r--r-- | src/libFLAC/stream_encoder_intrin_ssse3.c | 161 |
16 files changed, 3186 insertions, 576 deletions
diff --git a/include/share/compat.h b/include/share/compat.h index bd4285bf..a4b48ac9 100644 --- a/include/share/compat.h +++ b/include/share/compat.h @@ -195,4 +195,12 @@ int flac_snprintf(char *str, size_t size, const char *fmt, ...); }; #endif +/* SSSE3, SSE4 support: MSVS 2008, GCC 4.3 -- currently disabled, Intel Compiler 10.0 */ +#if ( defined _MSC_VER && _MSC_VER >= 1500 ) \ + || ( 0 && defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) ) \ + || ( defined __INTEL_COMPILER && __INTEL_COMPILER >= 1000 ) +#define FLAC__SSSE3_SUPPORTED 1 +#define FLAC__SSE4_SUPPORTED 1 +#endif + #endif /* FLAC__SHARE__COMPAT_H */ diff --git a/src/libFLAC/Makefile.am b/src/libFLAC/Makefile.am index 669793ba..247e33cd 100644 --- a/src/libFLAC/Makefile.am +++ b/src/libFLAC/Makefile.am @@ -125,13 +125,17 @@ libFLAC_sources = \ float.c \ format.c \ lpc.c \ - lpc_x86intrin.c \ + lpc_intrin_sse.c \ + lpc_intrin_sse2.c \ + lpc_intrin_sse41.c \ md5.c \ memory.c \ metadata_iterators.c \ metadata_object.c \ stream_decoder.c \ stream_encoder.c \ + stream_encoder_intrin_sse2.c \ + stream_encoder_intrin_ssse3.c \ stream_encoder_framing.c \ window.c \ $(extra_ogg_sources) diff --git a/src/libFLAC/Makefile.lite b/src/libFLAC/Makefile.lite index 608ecc2c..4533138a 100644 --- a/src/libFLAC/Makefile.lite +++ b/src/libFLAC/Makefile.lite @@ -88,13 +88,17 @@ SRCS_C = \ float.c \ format.c \ lpc.c \ - lpc_x86intrin.c \ + lpc_intrin_sse.c \ + lpc_intrin_sse2.c \ + lpc_intrin_sse41.c \ md5.c \ memory.c \ metadata_iterators.c \ metadata_object.c \ stream_decoder.c \ stream_encoder.c \ + stream_encoder_intrin_sse2.c \ + stream_encoder_intrin_ssse3.c \ stream_encoder_framing.c \ window.c \ $(OGG_SRCS) diff --git a/src/libFLAC/include/private/Makefile.am b/src/libFLAC/include/private/Makefile.am index 5a747e12..a21fb2f2 100644 --- a/src/libFLAC/include/private/Makefile.am +++ b/src/libFLAC/include/private/Makefile.am @@ -48,5 +48,6 @@ noinst_HEADERS = \ ogg_encoder_aspect.h \ ogg_helper.h \ ogg_mapping.h \ + stream_encoder.h \ stream_encoder_framing.h \ window.h diff --git a/src/libFLAC/include/private/lpc.h b/src/libFLAC/include/private/lpc.h index e3b2e389..2e8c4b5e 100644 --- a/src/libFLAC/include/private/lpc.h +++ b/src/libFLAC/include/private/lpc.h @@ -156,6 +156,10 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_asm_ia32_mmx(const FLAC__i # endif # if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN void FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]); +void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]); +# ifdef FLAC__SSE4_SUPPORTED +void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]); +# endif # endif #endif @@ -187,7 +191,12 @@ void FLAC__lpc_restore_signal_asm_ia32_mmx(const FLAC__int32 residual[], unsigne # elif defined FLAC__CPU_PPC void FLAC__lpc_restore_signal_asm_ppc_altivec_16(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[]); void FLAC__lpc_restore_signal_asm_ppc_altivec_16_order8(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[]); -# endif/* FLAC__CPU_IA32 || FLAC__CPU_PPC */ +# endif /* FLAC__CPU_IA32 || FLAC__CPU_PPC */ +# if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN +# ifdef FLAC__SSE4_SUPPORTED +void FLAC__lpc_restore_signal_wide_intrin_sse41(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[]); +# endif +# endif #endif /* FLAC__NO_ASM */ #ifndef FLAC__INTEGER_ONLY_LIBRARY diff --git a/src/libFLAC/include/private/stream_encoder.h b/src/libFLAC/include/private/stream_encoder.h new file mode 100644 index 00000000..3b1c82a7 --- /dev/null +++ b/src/libFLAC/include/private/stream_encoder.h @@ -0,0 +1,54 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FLAC__PRIVATE__STREAM_ENCODER_H +#define FLAC__PRIVATE__STREAM_ENCODER_H + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN +#include "share/compat.h" +#include "FLAC/format.h" + +extern void precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], + unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps); + +#ifdef FLAC__SSSE3_SUPPORTED +extern void precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], + unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps); +#endif + +#endif + +#endif diff --git a/src/libFLAC/libFLAC_dynamic.vcproj b/src/libFLAC/libFLAC_dynamic.vcproj index 5b3dcb73..46ec9a5b 100644 --- a/src/libFLAC/libFLAC_dynamic.vcproj +++ b/src/libFLAC/libFLAC_dynamic.vcproj @@ -268,6 +268,10 @@ >
</File>
<File
+ RelativePath=".\include\private\stream_encoder.h"
+ >
+ </File>
+ <File
RelativePath=".\include\private\stream_encoder_framing.h"
>
</File>
@@ -318,7 +322,15 @@ >
</File>
<File
- RelativePath=".\lpc_x86intrin.c"
+ RelativePath=".\lpc_intrin_sse.c"
+ >
+ </File>
+ <File
+ RelativePath=".\lpc_intrin_sse2.c"
+ >
+ </File>
+ <File
+ RelativePath=".\lpc_intrin_sse41.c"
>
</File>
<File
@@ -366,6 +378,14 @@ >
</File>
<File
+ RelativePath=".\stream_encoder_intrin_sse2.c"
+ >
+ </File>
+ <File
+ RelativePath=".\stream_encoder_intrin_ssse3.c"
+ >
+ </File>
+ <File
RelativePath=".\window.c"
>
</File>
diff --git a/src/libFLAC/libFLAC_static.vcproj b/src/libFLAC/libFLAC_static.vcproj index 0b8b7f8c..a173a798 100644 --- a/src/libFLAC/libFLAC_static.vcproj +++ b/src/libFLAC/libFLAC_static.vcproj @@ -243,6 +243,10 @@ >
</File>
<File
+ RelativePath=".\include\private\stream_encoder.h"
+ >
+ </File>
+ <File
RelativePath=".\include\private\stream_encoder_framing.h"
>
</File>
@@ -333,7 +337,15 @@ >
</File>
<File
- RelativePath=".\lpc_x86intrin.c"
+ RelativePath=".\lpc_intrin_sse.c"
+ >
+ </File>
+ <File
+ RelativePath=".\lpc_intrin_sse2.c"
+ >
+ </File>
+ <File
+ RelativePath=".\lpc_intrin_sse41.c"
>
</File>
<File
@@ -381,6 +393,14 @@ >
</File>
<File
+ RelativePath=".\stream_encoder_intrin_sse2.c"
+ >
+ </File>
+ <File
+ RelativePath=".\stream_encoder_intrin_ssse3.c"
+ >
+ </File>
+ <File
RelativePath=".\window.c"
>
</File>
diff --git a/src/libFLAC/lpc_intrin_sse.c b/src/libFLAC/lpc_intrin_sse.c new file mode 100644 index 00000000..e8f9f578 --- /dev/null +++ b/src/libFLAC/lpc_intrin_sse.c @@ -0,0 +1,246 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if HAVE_CONFIG_H +# include <config.h> +#endif + +#ifndef FLAC__INTEGER_ONLY_LIBRARY +#ifndef FLAC__NO_ASM +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN + +#include "FLAC/assert.h" +#include "FLAC/format.h" +#include "private/lpc.h" + +#include <xmmintrin.h> /* SSE */ + +void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) +{ + __m128 xmm0, xmm2, xmm5; + + (void) lag; + FLAC__ASSERT(lag > 0); + FLAC__ASSERT(lag <= 4); + FLAC__ASSERT(lag <= data_len); + FLAC__ASSERT(data_len > 0); + + xmm5 = _mm_setzero_ps(); + + xmm0 = _mm_load_ss(data++); + xmm2 = xmm0; + xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); + + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm5 = _mm_add_ps(xmm5, xmm0); + + data_len--; + + while(data_len) + { + xmm0 = _mm_load1_ps(data++); + + xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); + xmm2 = _mm_move_ss(xmm2, xmm0); + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm5 = _mm_add_ps(xmm5, xmm0); + + data_len--; + } + + _mm_storeu_ps(autoc, xmm5); +} + +void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) +{ + __m128 xmm0, xmm1, xmm2, xmm3, xmm5, xmm6; + + (void) lag; + FLAC__ASSERT(lag > 0); + FLAC__ASSERT(lag <= 8); + FLAC__ASSERT(lag <= data_len); + FLAC__ASSERT(data_len > 0); + + xmm5 = _mm_setzero_ps(); + xmm6 = _mm_setzero_ps(); + + xmm0 = _mm_load_ss(data++); + xmm2 = xmm0; + xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); + xmm3 = _mm_setzero_ps(); + + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm5 = _mm_add_ps(xmm5, xmm0); + + data_len--; + + while(data_len) + { + xmm0 = _mm_load1_ps(data++); + + xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); + xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); + xmm3 = _mm_move_ss(xmm3, xmm2); + xmm2 = _mm_move_ss(xmm2, xmm0); + + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm3); + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm6 = _mm_add_ps(xmm6, xmm1); + xmm5 = _mm_add_ps(xmm5, xmm0); + + data_len--; + } + + _mm_storeu_ps(autoc, xmm5); + _mm_storeu_ps(autoc+4, xmm6); +} + +void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) +{ + __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + + (void) lag; + FLAC__ASSERT(lag > 0); + FLAC__ASSERT(lag <= 12); + FLAC__ASSERT(lag <= data_len); + FLAC__ASSERT(data_len > 0); + + xmm5 = _mm_setzero_ps(); + xmm6 = _mm_setzero_ps(); + xmm7 = _mm_setzero_ps(); + + xmm0 = _mm_load_ss(data++); + xmm2 = xmm0; + xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); + xmm3 = _mm_setzero_ps(); + xmm4 = _mm_setzero_ps(); + + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm5 = _mm_add_ps(xmm5, xmm0); + + data_len--; + + while(data_len) + { + xmm0 = _mm_load1_ps(data++); + + xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); + xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); + xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3)); + xmm4 = _mm_move_ss(xmm4, xmm3); + xmm3 = _mm_move_ss(xmm3, xmm2); + xmm2 = _mm_move_ss(xmm2, xmm0); + + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm2); + xmm5 = _mm_add_ps(xmm5, xmm1); + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm3); + xmm6 = _mm_add_ps(xmm6, xmm1); + xmm0 = _mm_mul_ps(xmm0, xmm4); + xmm7 = _mm_add_ps(xmm7, xmm0); + + data_len--; + } + + _mm_storeu_ps(autoc, xmm5); + _mm_storeu_ps(autoc+4, xmm6); + _mm_storeu_ps(autoc+8, xmm7); +} + +void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) +{ + __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9; + + (void) lag; + FLAC__ASSERT(lag > 0); + FLAC__ASSERT(lag <= 16); + FLAC__ASSERT(lag <= data_len); + FLAC__ASSERT(data_len > 0); + + xmm6 = _mm_setzero_ps(); + xmm7 = _mm_setzero_ps(); + xmm8 = _mm_setzero_ps(); + xmm9 = _mm_setzero_ps(); + + xmm0 = _mm_load_ss(data++); + xmm2 = xmm0; + xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); + xmm3 = _mm_setzero_ps(); + xmm4 = _mm_setzero_ps(); + xmm5 = _mm_setzero_ps(); + + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm6 = _mm_add_ps(xmm6, xmm0); + + data_len--; + + while(data_len) + { + xmm0 = _mm_load1_ps(data++); + + /* shift xmm5:xmm4:xmm3:xmm2 left by one float */ + xmm5 = _mm_shuffle_ps(xmm5, xmm5, _MM_SHUFFLE(2,1,0,3)); + xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3)); + xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); + xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); + xmm5 = _mm_move_ss(xmm5, xmm4); + xmm4 = _mm_move_ss(xmm4, xmm3); + xmm3 = _mm_move_ss(xmm3, xmm2); + xmm2 = _mm_move_ss(xmm2, xmm0); + + /* xmm9|xmm8|xmm7|xmm6 += xmm0|xmm0|xmm0|xmm0 * xmm5|xmm4|xmm3|xmm2 */ + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm5); + xmm9 = _mm_add_ps(xmm9, xmm1); + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm4); + xmm8 = _mm_add_ps(xmm8, xmm1); + xmm1 = xmm0; + xmm1 = _mm_mul_ps(xmm1, xmm3); + xmm7 = _mm_add_ps(xmm7, xmm1); + xmm0 = _mm_mul_ps(xmm0, xmm2); + xmm6 = _mm_add_ps(xmm6, xmm0); + + data_len--; + } + + _mm_storeu_ps(autoc, xmm6); + _mm_storeu_ps(autoc+4, xmm7); + _mm_storeu_ps(autoc+8, xmm8); + _mm_storeu_ps(autoc+12,xmm9); +} + +#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ +#endif /* FLAC__NO_ASM */ +#endif /* FLAC__INTEGER_ONLY_LIBRARY */ diff --git a/src/libFLAC/lpc_intrin_sse2.c b/src/libFLAC/lpc_intrin_sse2.c new file mode 100644 index 00000000..9311151d --- /dev/null +++ b/src/libFLAC/lpc_intrin_sse2.c @@ -0,0 +1,1318 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if HAVE_CONFIG_H +# include <config.h> +#endif + +#ifndef FLAC__INTEGER_ONLY_LIBRARY +#ifndef FLAC__NO_ASM +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN + +#include "FLAC/assert.h" +#include "FLAC/format.h" +#include "private/lpc.h" + +#include <emmintrin.h> /* SSE2 */ + +void FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]) +{ + int i; + FLAC__int32 sum; + + FLAC__ASSERT(order > 0); + FLAC__ASSERT(order <= 32); + FLAC__ASSERT(data_len > 0); + + if(order <= 12) { + FLAC__int32 curr; + if(order > 8) { /* order == 9, 10, 11, 12 */ +#ifdef FLAC__CPU_IA32 /* 8 XMM registers available */ + /* can be modified to work with order <= 15 but the subset limit is 12 */ + int r; + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */ + switch(order) /* ...and zero them out */ + { + case 9: + xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); break; + case 10: + xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); break; + case 11: + xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); break; + } + xmm2 = _mm_setzero_si128(); + xmm0 = _mm_packs_epi32(xmm0, xmm6); + xmm1 = _mm_packs_epi32(xmm1, xmm2); + + xmm4 = _mm_loadu_si128((const __m128i*)(data-12)); + xmm5 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3)); + xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm4 = _mm_packs_epi32(xmm4, xmm2); + xmm3 = _mm_packs_epi32(xmm3, xmm5); + + xmm7 = _mm_slli_si128(xmm1, 2); + xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14)); + xmm2 = _mm_slli_si128(xmm0, 2); + + /* xmm0, xmm1: qlp_coeff + xmm2, xmm7: qlp_coeff << 16 bit + xmm3, xmm4: data */ + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 2; + + if(r) { + xmm4 = _mm_slli_si128(xmm4, 2); + xmm6 = xmm3; + xmm3 = _mm_slli_si128(xmm3, 2); + xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 14)); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + + while(data_len) { /* data_len is a multiple of 2 */ + /* 1 _mm_slli_si128 per data element less but we need shifted qlp_coeff in xmm2:xmm7 */ + xmm4 = _mm_slli_si128(xmm4, 4); + xmm6 = xmm3; + xmm3 = _mm_slli_si128(xmm3, 4); + xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 12)); + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm7); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm2); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=2; + } +#else /* 16 XMM registers available */ + int r; + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmmA, xmmB; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */ + switch(order) /* ...and zero them out */ + { + case 9: + xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); break; + case 10: + xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); break; + case 11: + xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); break; + } + xmm2 = _mm_setzero_si128(); + xmm0 = _mm_packs_epi32(xmm0, xmm6); + xmm1 = _mm_packs_epi32(xmm1, xmm2); + + xmm4 = _mm_loadu_si128((const __m128i*)(data-12)); + xmm5 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3)); + xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm4 = _mm_packs_epi32(xmm4, xmm2); + xmm3 = _mm_packs_epi32(xmm3, xmm5); + + xmm7 = _mm_slli_si128(xmm1, 2); + xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14)); + xmm2 = _mm_slli_si128(xmm0, 2); + + xmm9 = _mm_slli_si128(xmm1, 4); + xmm9 = _mm_or_si128(xmm9, _mm_srli_si128(xmm0, 12)); + xmm8 = _mm_slli_si128(xmm0, 4); + + xmmB = _mm_slli_si128(xmm1, 6); + xmmB = _mm_or_si128(xmmB, _mm_srli_si128(xmm0, 10)); + xmmA = _mm_slli_si128(xmm0, 6); + + /* xmm0, xmm1: qlp_coeff + xmm2, xmm7: qlp_coeff << 16 bit + xmm8, xmm9: qlp_coeff << 2*16 bit + xmmA, xmmB: qlp_coeff << 3*16 bit + xmm3, xmm4: data */ + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 4; + + while(r) { + xmm4 = _mm_slli_si128(xmm4, 2); + xmm6 = xmm3; + xmm3 = _mm_slli_si128(xmm3, 2); + xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 14)); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; r--; + } + + while(data_len) { /* data_len is a multiple of 4 */ + xmm4 = _mm_slli_si128(xmm4, 8); + xmm6 = xmm3; + xmm3 = _mm_slli_si128(xmm3, 8); + xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 8)); + + xmm3 = _mm_insert_epi16(xmm3, curr, 3); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmmB); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmmA); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 2); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm9); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm8); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm7); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm2); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm4; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm5 = xmm3; + xmm5 = _mm_madd_epi16(xmm5, xmm0); + xmm6 = _mm_add_epi32(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=4; + } +#endif + } /* endif(order > 8) */ + else if(order > 4) { /* order == 5, 6, 7, 8 */ + if(order > 6) { /* order == 7, 8 */ + if(order == 8) { + __m128i xmm0, xmm1, xmm3, xmm6; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm0 = _mm_packs_epi32(xmm0, xmm1); + + xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm1); + + /* xmm0: qlp_coeff + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + + while(data_len) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + } + else { /* order == 7 */ + int r; + __m128i xmm0, xmm1, xmm2, xmm3, xmm6; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); + xmm0 = _mm_packs_epi32(xmm0, xmm1); + + xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm1); + xmm2 = _mm_slli_si128(xmm0, 2); + + /* xmm0: qlp_coeff + xmm2: qlp_coeff << 16 bit + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 2; + + if(r) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + + while(data_len) { /* data_len is a multiple of 2 */ + xmm3 = _mm_slli_si128(xmm3, 4); + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm2); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=2; + } + } + } + else { /* order == 5, 6 */ + if(order == 6) { + int r; + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); + xmm0 = _mm_packs_epi32(xmm0, xmm1); + + xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm1); + xmm2 = _mm_slli_si128(xmm0, 2); + xmm4 = _mm_slli_si128(xmm0, 4); + + /* xmm0: qlp_coeff + xmm2: qlp_coeff << 16 bit + xmm4: qlp_coeff << 2*16 bit + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 3; + + while(r) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; r--; + } + + while(data_len) { /* data_len is a multiple of 3 */ + xmm3 = _mm_slli_si128(xmm3, 6); + xmm3 = _mm_insert_epi16(xmm3, curr, 2); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm4); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm2); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=3; + } + } + else { /* order == 5 */ + int r; + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6; + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); + xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); + xmm0 = _mm_packs_epi32(xmm0, xmm1); + + xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm1); + xmm2 = _mm_slli_si128(xmm0, 2); + xmm4 = _mm_slli_si128(xmm0, 4); + xmm5 = _mm_slli_si128(xmm0, 6); + + /* xmm0: qlp_coeff + xmm2: qlp_coeff << 16 bit + xmm4: qlp_coeff << 2*16 bit + xmm4: qlp_coeff << 3*16 bit + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 4; + + while(r) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; r--; + } + + while(data_len) { /* data_len is a multiple of 4 */ + xmm3 = _mm_slli_si128(xmm3, 8); + xmm3 = _mm_insert_epi16(xmm3, curr, 3); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm5); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 2); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm4); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm2); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=4; + } + } + } + } + else { /* order == 1, 2, 3, 4 */ + if(order > 2) { + if(order == 4) { + __m128i xmm0, xmm3, xmm6; + xmm6 = _mm_setzero_si128(); + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_packs_epi32(xmm0, xmm6); + + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm6); + + /* xmm0: qlp_coeff + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + + while(data_len) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + } + else { /* order == 3 */ + int r; + __m128i xmm0, xmm1, xmm3, xmm6; + xmm6 = _mm_setzero_si128(); + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_slli_si128(xmm0, 4); xmm0 = _mm_srli_si128(xmm0, 4); + xmm0 = _mm_packs_epi32(xmm0, xmm6); + + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm6); + xmm1 = _mm_slli_si128(xmm0, 2); + + /* xmm0: qlp_coeff + xmm1: qlp_coeff << 16 bit + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + r = data_len % 2; + + if(r) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + + while(data_len) { /* data_len is a multiple of 2 */ + xmm3 = _mm_slli_si128(xmm3, 4); + + xmm3 = _mm_insert_epi16(xmm3, curr, 1); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm1); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len-=2; + } + } + } + else { + if(order == 2) { + __m128i xmm0, xmm3, xmm6; + xmm6 = _mm_setzero_si128(); + xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_slli_si128(xmm0, 8); xmm0 = _mm_srli_si128(xmm0, 8); + xmm0 = _mm_packs_epi32(xmm0, xmm6); + + xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); + xmm3 = _mm_packs_epi32(xmm3, xmm6); + + /* xmm0: qlp_coeff + xmm3: data */ + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + + while(data_len) { + xmm3 = _mm_slli_si128(xmm3, 2); + xmm3 = _mm_insert_epi16(xmm3, curr, 0); + + xmm6 = xmm3; + xmm6 = _mm_madd_epi16(xmm6, xmm0); + + curr = *data++; + *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); + + data_len--; + } + } + else { /* order == 1 */ + for(i = 0; i < (int)data_len; i++) + residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization); + } + } + } + } + else { /* order > 12 */ + for(i = 0; i < (int)data_len; i++) { + sum = 0; + switch(order) { + case 32: sum += qlp_coeff[31] * data[i-32]; + case 31: sum += qlp_coeff[30] * data[i-31]; + case 30: sum += qlp_coeff[29] * data[i-30]; + case 29: sum += qlp_coeff[28] * data[i-29]; + case 28: sum += qlp_coeff[27] * data[i-28]; + case 27: sum += qlp_coeff[26] * data[i-27]; + case 26: sum += qlp_coeff[25] * data[i-26]; + case 25: sum += qlp_coeff[24] * data[i-25]; + case 24: sum += qlp_coeff[23] * data[i-24]; + case 23: sum += qlp_coeff[22] * data[i-23]; + case 22: sum += qlp_coeff[21] * data[i-22]; + case 21: sum += qlp_coeff[20] * data[i-21]; + case 20: sum += qlp_coeff[19] * data[i-20]; + case 19: sum += qlp_coeff[18] * data[i-19]; + case 18: sum += qlp_coeff[17] * data[i-18]; + case 17: sum += qlp_coeff[16] * data[i-17]; + case 16: sum += qlp_coeff[15] * data[i-16]; + case 15: sum += qlp_coeff[14] * data[i-15]; + case 14: sum += qlp_coeff[13] * data[i-14]; + case 13: sum += qlp_coeff[12] * data[i-13]; + sum += qlp_coeff[11] * data[i-12]; + sum += qlp_coeff[10] * data[i-11]; + sum += qlp_coeff[ 9] * data[i-10]; + sum += qlp_coeff[ 8] * data[i- 9]; + sum += qlp_coeff[ 7] * data[i- 8]; + sum += qlp_coeff[ 6] * data[i- 7]; + sum += qlp_coeff[ 5] * data[i- 6]; + sum += qlp_coeff[ 4] * data[i- 5]; + sum += qlp_coeff[ 3] * data[i- 4]; + sum += qlp_coeff[ 2] * data[i- 3]; + sum += qlp_coeff[ 1] * data[i- 2]; + sum += qlp_coeff[ 0] * data[i- 1]; + } + residual[i] = data[i] - (sum >> lp_quantization); + } + } +} + +#define RESIDUAL_RESULT(xmmN) residual[i] = data[i] - (_mm_cvtsi128_si32(xmmN) >> lp_quantization); + +void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]) +{ + int i; + + FLAC__ASSERT(order > 0); + FLAC__ASSERT(order <= 32); + + if(order <= 12) { + if(order > 8) { /* order == 9, 10, 11, 12 */ + if(order > 10) { /* order == 11, 12 */ + if(order == 12) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); // 0 0 q[1] q[0] + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); // 0 0 q[3] q[2] + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); // 0 0 q[5] q[4] + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); // 0 0 q[7] q[6] + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); // 0 0 q[9] q[8] + xmm5 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+10)); // 0 0 q[11] q[10] + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); // 0 q[1] 0 q[0] + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); // 0 q[3] 0 q[2] + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); // 0 q[5] 0 q[4] + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); // 0 q[7] 0 q[6] + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); // 0 q[9] 0 q[8] + xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(3,1,2,0)); // 0 q[11] 0 q[10] + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[11] * data[i-12]; + //sum += qlp_coeff[10] * data[i-11]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-12)); // 0 0 d[i-11] d[i-12] + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); // 0 d[i-12] 0 d[i-11] + xmm7 = _mm_mul_epu32(xmm7, xmm5); /* we use _unsigned_ multiplication and discard high dword of the result values */ + + //sum += qlp_coeff[9] * data[i-10]; + //sum += qlp_coeff[8] * data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm4); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[7] * data[i-8]; + //sum += qlp_coeff[6] * data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm3); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 11 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + xmm5 = _mm_cvtsi32_si128(qlp_coeff[10]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[10] * data[i-11]; + xmm7 = _mm_cvtsi32_si128(data[i-11]); + xmm7 = _mm_mul_epu32(xmm7, xmm5); + + //sum += qlp_coeff[9] * data[i-10]; + //sum += qlp_coeff[8] * data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm4); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[7] * data[i-8]; + //sum += qlp_coeff[6] * data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm3); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 9, 10 */ + if(order == 10) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[9] * data[i-10]; + //sum += qlp_coeff[8] * data[i-9]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epu32(xmm7, xmm4); + + //sum += qlp_coeff[7] * data[i-8]; + //sum += qlp_coeff[6] * data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm3); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 9 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_cvtsi32_si128(qlp_coeff[8]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[8] * data[i-9]; + xmm7 = _mm_cvtsi32_si128(data[i-9]); + xmm7 = _mm_mul_epu32(xmm7, xmm4); + + //sum += qlp_coeff[7] * data[i-8]; + //sum += qlp_coeff[6] * data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm3); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + } + else if(order > 4) { /* order == 5, 6, 7, 8 */ + if(order > 6) { /* order == 7, 8 */ + if(order == 8) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[7] * data[i-8]; + //sum += qlp_coeff[6] * data[i-7]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epu32(xmm7, xmm3); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 7 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_cvtsi32_si128(qlp_coeff[6]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[6] * data[i-7]; + xmm7 = _mm_cvtsi32_si128(data[i-7]); + xmm7 = _mm_mul_epu32(xmm7, xmm3); + + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm2); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 5, 6 */ + if(order == 6) { + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[5] * data[i-6]; + //sum += qlp_coeff[4] * data[i-5]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epu32(xmm7, xmm2); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 5 */ + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_cvtsi32_si128(qlp_coeff[4]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[4] * data[i-5]; + xmm7 = _mm_cvtsi32_si128(data[i-5]); + xmm7 = _mm_mul_epu32(xmm7, xmm2); + + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm1); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + } + else { /* order == 1, 2, 3, 4 */ + if(order > 2) { /* order == 3, 4 */ + if(order == 4) { + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[3] * data[i-4]; + //sum += qlp_coeff[2] * data[i-3]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epu32(xmm7, xmm1); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 3 */ + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_cvtsi32_si128(qlp_coeff[2]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[2] * data[i-3]; + xmm7 = _mm_cvtsi32_si128(data[i-3]); + xmm7 = _mm_mul_epu32(xmm7, xmm1); + + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epu32(xmm6, xmm0); + xmm7 = _mm_add_epi32(xmm7, xmm6); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 1, 2 */ + if(order == 2) { + __m128i xmm0, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[1] * data[i-2]; + //sum += qlp_coeff[0] * data[i-1]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epu32(xmm7, xmm0); + + xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 1 */ + for(i = 0; i < (int)data_len; i++) + residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization); + } + } + } + } + else { /* order > 12 */ + FLAC__int32 sum; + for(i = 0; i < (int)data_len; i++) { + sum = 0; + switch(order) { + case 32: sum += qlp_coeff[31] * data[i-32]; + case 31: sum += qlp_coeff[30] * data[i-31]; + case 30: sum += qlp_coeff[29] * data[i-30]; + case 29: sum += qlp_coeff[28] * data[i-29]; + case 28: sum += qlp_coeff[27] * data[i-28]; + case 27: sum += qlp_coeff[26] * data[i-27]; + case 26: sum += qlp_coeff[25] * data[i-26]; + case 25: sum += qlp_coeff[24] * data[i-25]; + case 24: sum += qlp_coeff[23] * data[i-24]; + case 23: sum += qlp_coeff[22] * data[i-23]; + case 22: sum += qlp_coeff[21] * data[i-22]; + case 21: sum += qlp_coeff[20] * data[i-21]; + case 20: sum += qlp_coeff[19] * data[i-20]; + case 19: sum += qlp_coeff[18] * data[i-19]; + case 18: sum += qlp_coeff[17] * data[i-18]; + case 17: sum += qlp_coeff[16] * data[i-17]; + case 16: sum += qlp_coeff[15] * data[i-16]; + case 15: sum += qlp_coeff[14] * data[i-15]; + case 14: sum += qlp_coeff[13] * data[i-14]; + case 13: sum += qlp_coeff[12] * data[i-13]; + sum += qlp_coeff[11] * data[i-12]; + sum += qlp_coeff[10] * data[i-11]; + sum += qlp_coeff[ 9] * data[i-10]; + sum += qlp_coeff[ 8] * data[i- 9]; + sum += qlp_coeff[ 7] * data[i- 8]; + sum += qlp_coeff[ 6] * data[i- 7]; + sum += qlp_coeff[ 5] * data[i- 6]; + sum += qlp_coeff[ 4] * data[i- 5]; + sum += qlp_coeff[ 3] * data[i- 4]; + sum += qlp_coeff[ 2] * data[i- 3]; + sum += qlp_coeff[ 1] * data[i- 2]; + sum += qlp_coeff[ 0] * data[i- 1]; + } + residual[i] = data[i] - (sum >> lp_quantization); + } + } +} + +#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ +#endif /* FLAC__NO_ASM */ +#endif /* FLAC__INTEGER_ONLY_LIBRARY */ diff --git a/src/libFLAC/lpc_intrin_sse41.c b/src/libFLAC/lpc_intrin_sse41.c new file mode 100644 index 00000000..ea8eb371 --- /dev/null +++ b/src/libFLAC/lpc_intrin_sse41.c @@ -0,0 +1,1126 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if HAVE_CONFIG_H +# include <config.h> +#endif + +#include "share/compat.h" + +#ifndef FLAC__INTEGER_ONLY_LIBRARY +#ifndef FLAC__NO_ASM +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN +#ifdef FLAC__SSE4_SUPPORTED + +#include "FLAC/assert.h" +#include "FLAC/format.h" +#include "private/lpc.h" + +#include <smmintrin.h> /* SSE4.1 */ + +#ifdef FLAC__CPU_IA32 +#if defined _MSC_VER || defined __INTEL_COMPILER +#define RESIDUAL_RESULT(xmmN) residual[i] = data[i] - (FLAC__int32)(xmmN.m128i_i64[0] >> lp_quantization); +#define DATA_RESULT(xmmN) data[i] = residual[i] + (FLAC__int32)(xmmN.m128i_i64[0] >> lp_quantization); +#else +#define RESIDUAL_RESULT(xmmN) { \ + FLAC__int64 tmp[2]; \ + _mm_storel_epi64((__m128i *)tmp, xmmN); \ + residual[i] = data[i] - (FLAC__int32)(tmp[0] >> lp_quantization); \ + } +#define DATA_RESULT(xmmN) { \ + FLAC__int64 tmp[2]; \ + _mm_storel_epi64((__m128i *)tmp, xmmN); \ + data[i] = residual[i] + (FLAC__int32)(tmp[0] >> lp_quantization); \ + } +#endif +#else +#define RESIDUAL_RESULT(xmmN) residual[i] = data[i] - (FLAC__int32)(_mm_cvtsi128_si64(xmmN) >> lp_quantization); +#define DATA_RESULT(xmmN) data[i] = residual[i] + (FLAC__int32)(_mm_cvtsi128_si64(xmmN) >> lp_quantization); +#endif + +void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]) +{ + int i; + + FLAC__ASSERT(order > 0); + FLAC__ASSERT(order <= 32); + + if(order <= 12) { + if(order > 8) { /* order == 9, 10, 11, 12 */ + if(order > 10) { /* order == 11, 12 */ + if(order == 12) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); // 0 0 q[1] q[0] + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); // 0 0 q[3] q[2] + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); // 0 0 q[5] q[4] + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); // 0 0 q[7] q[6] + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); // 0 0 q[9] q[8] + xmm5 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+10)); // 0 0 q[11] q[10] + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); // 0 q[1] 0 q[0] + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); // 0 q[3] 0 q[2] + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); // 0 q[5] 0 q[4] + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); // 0 q[7] 0 q[6] + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); // 0 q[9] 0 q[8] + xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(3,1,2,0)); // 0 q[11] 0 q[10] + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[11] * (FLAC__int64)data[i-12]; + //sum += qlp_coeff[10] * (FLAC__int64)data[i-11]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-12)); // 0 0 d[i-11] d[i-12] + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); // 0 d[i-12] 0 d[i-11] + xmm7 = _mm_mul_epi32(xmm7, xmm5); + + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm4); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 11 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + xmm5 = _mm_cvtsi32_si128(qlp_coeff[10]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[10] * (FLAC__int64)data[i-11]; + xmm7 = _mm_cvtsi32_si128(data[i-11]); + xmm7 = _mm_mul_epi32(xmm7, xmm5); + + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm4); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 9, 10 */ + if(order == 10) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm4); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 9 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_cvtsi32_si128(qlp_coeff[8]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm7 = _mm_cvtsi32_si128(data[i-9]); + xmm7 = _mm_mul_epi32(xmm7, xmm4); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + } + else if(order > 4) { /* order == 5, 6, 7, 8 */ + if(order > 6) { /* order == 7, 8 */ + if(order == 8) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm3); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 7 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_cvtsi32_si128(qlp_coeff[6]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm7 = _mm_cvtsi32_si128(data[i-7]); + xmm7 = _mm_mul_epi32(xmm7, xmm3); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 5, 6 */ + if(order == 6) { + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm2); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 5 */ + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_cvtsi32_si128(qlp_coeff[4]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm7 = _mm_cvtsi32_si128(data[i-5]); + xmm7 = _mm_mul_epi32(xmm7, xmm2); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + } + else { /* order == 1, 2, 3, 4 */ + if(order > 2) { /* order == 3, 4 */ + if(order == 4) { + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm1); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 3 */ + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_cvtsi32_si128(qlp_coeff[2]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm7 = _mm_cvtsi32_si128(data[i-3]); + xmm7 = _mm_mul_epi32(xmm7, xmm1); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + } + else { /* order == 1, 2 */ + if(order == 2) { + __m128i xmm0, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm0); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + RESIDUAL_RESULT(xmm7); + } + } + else { /* order == 1 */ + for(i = 0; i < (int)data_len; i++) + residual[i] = data[i] - (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization); + } + } + } + } + else { /* order > 12 */ + FLAC__int64 sum; + for(i = 0; i < (int)data_len; i++) { + sum = 0; + switch(order) { + case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32]; + case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31]; + case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30]; + case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29]; + case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28]; + case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27]; + case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26]; + case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25]; + case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24]; + case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23]; + case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22]; + case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21]; + case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20]; + case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19]; + case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18]; + case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17]; + case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16]; + case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15]; + case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14]; + case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13]; + sum += qlp_coeff[11] * (FLAC__int64)data[i-12]; + sum += qlp_coeff[10] * (FLAC__int64)data[i-11]; + sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10]; + sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9]; + sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8]; + sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7]; + sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6]; + sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5]; + sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4]; + sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3]; + sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2]; + sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1]; + } + residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization); + } + } +} + +void FLAC__lpc_restore_signal_wide_intrin_sse41(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[]) +{ + int i; + + FLAC__ASSERT(order > 0); + FLAC__ASSERT(order <= 32); + + if(order <= 12) { + if(order > 8) { /* order == 9, 10, 11, 12 */ + if(order > 10) { /* order == 11, 12 */ + if(order == 12) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); // 0 0 q[1] q[0] + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); // 0 0 q[3] q[2] + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); // 0 0 q[5] q[4] + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); // 0 0 q[7] q[6] + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); // 0 0 q[9] q[8] + xmm5 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+10)); // 0 0 q[11] q[10] + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); // 0 q[1] 0 q[0] + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); // 0 q[3] 0 q[2] + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); // 0 q[5] 0 q[4] + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); // 0 q[7] 0 q[6] + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); // 0 q[9] 0 q[8] + xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(3,1,2,0)); // 0 q[11] 0 q[10] + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[11] * (FLAC__int64)data[i-12]; + //sum += qlp_coeff[10] * (FLAC__int64)data[i-11]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-12)); // 0 0 d[i-11] d[i-12] + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); // 0 d[i-12] 0 d[i-11] + xmm7 = _mm_mul_epi32(xmm7, xmm5); + + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm4); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 11 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + xmm5 = _mm_cvtsi32_si128(qlp_coeff[10]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[10] * (FLAC__int64)data[i-11]; + xmm7 = _mm_cvtsi32_si128(data[i-11]); + xmm7 = _mm_mul_epi32(xmm7, xmm5); + + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm4); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + } + else { /* order == 9, 10 */ + if(order == 10) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[9] * (FLAC__int64)data[i-10]; + //sum += qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-10)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm4); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 9 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + xmm4 = _mm_cvtsi32_si128(qlp_coeff[8]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[8] * (FLAC__int64)data[i-9]; + xmm7 = _mm_cvtsi32_si128(data[i-9]); + xmm7 = _mm_mul_epi32(xmm7, xmm4); + + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm3); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + } + } + else if(order > 4) { /* order == 5, 6, 7, 8 */ + if(order > 6) { /* order == 7, 8 */ + if(order == 8) { + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[7] * (FLAC__int64)data[i-8]; + //sum += qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-8)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm3); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 7 */ + __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + xmm3 = _mm_cvtsi32_si128(qlp_coeff[6]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[6] * (FLAC__int64)data[i-7]; + xmm7 = _mm_cvtsi32_si128(data[i-7]); + xmm7 = _mm_mul_epi32(xmm7, xmm3); + + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm2); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + } + else { /* order == 5, 6 */ + if(order == 6) { + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[5] * (FLAC__int64)data[i-6]; + //sum += qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-6)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm2); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 5 */ + __m128i xmm0, xmm1, xmm2, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + xmm2 = _mm_cvtsi32_si128(qlp_coeff[4]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[4] * (FLAC__int64)data[i-5]; + xmm7 = _mm_cvtsi32_si128(data[i-5]); + xmm7 = _mm_mul_epi32(xmm7, xmm2); + + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm1); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + } + } + else { /* order == 1, 2, 3, 4 */ + if(order > 2) { /* order == 3, 4 */ + if(order == 4) { + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2)); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[3] * (FLAC__int64)data[i-4]; + //sum += qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-4)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm1); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 3 */ + __m128i xmm0, xmm1, xmm6, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm1 = _mm_cvtsi32_si128(qlp_coeff[2]); + + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum = qlp_coeff[2] * (FLAC__int64)data[i-3]; + xmm7 = _mm_cvtsi32_si128(data[i-3]); + xmm7 = _mm_mul_epi32(xmm7, xmm1); + + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1)); + xmm6 = _mm_mul_epi32(xmm6, xmm0); + xmm7 = _mm_add_epi64(xmm7, xmm6); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + } + else { /* order == 1, 2 */ + if(order == 2) { + __m128i xmm0, xmm7; + xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0)); + xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); + + for(i = 0; i < (int)data_len; i++) { + //sum = 0; + //sum += qlp_coeff[1] * (FLAC__int64)data[i-2]; + //sum += qlp_coeff[0] * (FLAC__int64)data[i-1]; + xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-2)); + xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); + xmm7 = _mm_mul_epi32(xmm7, xmm0); + + xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8)); + DATA_RESULT(xmm7); + } + } + else { /* order == 1 */ + for(i = 0; i < (int)data_len; i++) + data[i] = residual[i] + (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization); + } + } + } + } + else { /* order > 12 */ + FLAC__int64 sum; + for(i = 0; i < (int)data_len; i++) { + sum = 0; + switch(order) { + case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32]; + case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31]; + case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30]; + case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29]; + case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28]; + case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27]; + case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26]; + case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25]; + case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24]; + case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23]; + case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22]; + case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21]; + case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20]; + case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19]; + case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18]; + case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17]; + case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16]; + case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15]; + case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14]; + case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13]; + sum += qlp_coeff[11] * (FLAC__int64)data[i-12]; + sum += qlp_coeff[10] * (FLAC__int64)data[i-11]; + sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10]; + sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9]; + sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8]; + sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7]; + sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6]; + sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5]; + sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4]; + sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3]; + sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2]; + sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1]; + } + data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization); + } + } +} + +#endif /* FLAC__SSE4_SUPPORTED */ +#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ +#endif /* FLAC__NO_ASM */ +#endif /* FLAC__INTEGER_ONLY_LIBRARY */ diff --git a/src/libFLAC/lpc_x86intrin.c b/src/libFLAC/lpc_x86intrin.c deleted file mode 100644 index ba26847c..00000000 --- a/src/libFLAC/lpc_x86intrin.c +++ /dev/null @@ -1,566 +0,0 @@ -/* libFLAC - Free Lossless Audio Codec library - * Copyright (C) 2000-2009 Josh Coalson - * Copyright (C) 2011-2013 Xiph.Org Foundation - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * - Neither the name of the Xiph.org Foundation nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#if HAVE_CONFIG_H -# include <config.h> -#endif - -#ifndef FLAC__INTEGER_ONLY_LIBRARY -#ifndef FLAC__NO_ASM -#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN - -#include "FLAC/assert.h" -#include "FLAC/format.h" -#include "private/lpc.h" - -#include <emmintrin.h> /* SSE2 */ - -void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) -{ - __m128 xmm0, xmm2, xmm5; - - (void) lag; - FLAC__ASSERT(lag > 0); - FLAC__ASSERT(lag <= 4); - FLAC__ASSERT(lag <= data_len); - FLAC__ASSERT(data_len > 0); - - xmm5 = _mm_setzero_ps(); - - xmm0 = _mm_load_ss(data++); - xmm2 = xmm0; - xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); - - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm5 = _mm_add_ps(xmm5, xmm0); - - data_len--; - - while(data_len) - { - xmm0 = _mm_load1_ps(data++); - - xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); - xmm2 = _mm_move_ss(xmm2, xmm0); - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm5 = _mm_add_ps(xmm5, xmm0); - - data_len--; - } - - _mm_storeu_ps(autoc, xmm5); -} - -void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) -{ - __m128 xmm0, xmm1, xmm2, xmm3, xmm5, xmm6; - - (void) lag; - FLAC__ASSERT(lag > 0); - FLAC__ASSERT(lag <= 8); - FLAC__ASSERT(lag <= data_len); - FLAC__ASSERT(data_len > 0); - - xmm5 = _mm_setzero_ps(); - xmm6 = _mm_setzero_ps(); - - xmm0 = _mm_load_ss(data++); - xmm2 = xmm0; - xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); - xmm3 = _mm_setzero_ps(); - - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm5 = _mm_add_ps(xmm5, xmm0); - - data_len--; - - while(data_len) /* see /src/libFLAC/ia32/lpc_asm.nasm */ - { - xmm0 = _mm_load1_ps(data++); - - xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); - xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); - - xmm3 = _mm_move_ss(xmm3, xmm2); - xmm1 = xmm0; - xmm2 = _mm_move_ss(xmm2, xmm0); - - xmm1 = _mm_mul_ps(xmm1, xmm3); - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm6 = _mm_add_ps(xmm6, xmm1); - xmm5 = _mm_add_ps(xmm5, xmm0); - - data_len--; - } - - _mm_storeu_ps(autoc, xmm5); - _mm_storeu_ps(autoc+4, xmm6); -} - -void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) -{ - __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; - - (void) lag; - FLAC__ASSERT(lag > 0); - FLAC__ASSERT(lag <= 12); - FLAC__ASSERT(lag <= data_len); - FLAC__ASSERT(data_len > 0); - - xmm5 = _mm_setzero_ps(); - xmm6 = _mm_setzero_ps(); - xmm7 = _mm_setzero_ps(); - - xmm0 = _mm_load_ss(data++); - xmm2 = xmm0; - xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); - xmm3 = _mm_setzero_ps(); - xmm4 = _mm_setzero_ps(); - - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm5 = _mm_add_ps(xmm5, xmm0); - - data_len--; - - while(data_len) - { - xmm0 = _mm_load1_ps(data++); - - xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); - xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); - xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3)); - xmm4 = _mm_move_ss(xmm4, xmm3); - xmm3 = _mm_move_ss(xmm3, xmm2); - xmm2 = _mm_move_ss(xmm2, xmm0); - - xmm1 = xmm0; - xmm1 = _mm_mul_ps(xmm1, xmm2); - xmm5 = _mm_add_ps(xmm5, xmm1); - xmm1 = xmm0; - xmm1 = _mm_mul_ps(xmm1, xmm3); - xmm6 = _mm_add_ps(xmm6, xmm1); - xmm0 = _mm_mul_ps(xmm0, xmm4); - xmm7 = _mm_add_ps(xmm7, xmm0); - - data_len--; - } - - _mm_storeu_ps(autoc, xmm5); - _mm_storeu_ps(autoc+4, xmm6); - _mm_storeu_ps(autoc+8, xmm7); -} - -void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[]) -{ - __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9; - - (void) lag; - FLAC__ASSERT(lag > 0); - FLAC__ASSERT(lag <= 16); - FLAC__ASSERT(lag <= data_len); - FLAC__ASSERT(data_len > 0); - - xmm6 = _mm_setzero_ps(); - xmm7 = _mm_setzero_ps(); - xmm8 = _mm_setzero_ps(); - xmm9 = _mm_setzero_ps(); - - xmm0 = _mm_load_ss(data++); - xmm2 = xmm0; - xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0); - xmm3 = _mm_setzero_ps(); - xmm4 = _mm_setzero_ps(); - xmm5 = _mm_setzero_ps(); - - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm6 = _mm_add_ps(xmm6, xmm0); - - data_len--; - - while(data_len) - { - xmm0 = _mm_load1_ps(data++); - - /* shift xmm5:xmm4:xmm3:xmm2 left by one float */ - xmm5 = _mm_shuffle_ps(xmm5, xmm5, _MM_SHUFFLE(2,1,0,3)); - xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3)); - xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3)); - xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3)); - xmm5 = _mm_move_ss(xmm5, xmm4); - xmm4 = _mm_move_ss(xmm4, xmm3); - xmm3 = _mm_move_ss(xmm3, xmm2); - xmm2 = _mm_move_ss(xmm2, xmm0); - - /* xmm9|xmm8|xmm7|xmm6 += xmm0|xmm0|xmm0|xmm0 * xmm5|xmm4|xmm3|xmm2 */ - xmm1 = xmm0; - xmm1 = _mm_mul_ps(xmm1, xmm5); - xmm9 = _mm_add_ps(xmm9, xmm1); - xmm1 = xmm0; - xmm1 = _mm_mul_ps(xmm1, xmm4); - xmm8 = _mm_add_ps(xmm8, xmm1); - xmm1 = xmm0; - xmm1 = _mm_mul_ps(xmm1, xmm3); - xmm7 = _mm_add_ps(xmm7, xmm1); - xmm0 = _mm_mul_ps(xmm0, xmm2); - xmm6 = _mm_add_ps(xmm6, xmm0); - - data_len--; - } - - _mm_storeu_ps(autoc, xmm6); - _mm_storeu_ps(autoc+4, xmm7); - _mm_storeu_ps(autoc+8, xmm8); - _mm_storeu_ps(autoc+12,xmm9); -} - -void FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[]) -{ - int i; - FLAC__int32 sum; - - FLAC__ASSERT(order > 0); - FLAC__ASSERT(order <= 32); - FLAC__ASSERT(data_len > 0); - - if(order <= 12) { - FLAC__int32 curr; - if(order > 8) { /* order == 9, 10, 11, 12 */ /* can be modified to work with order <= 15 but the subset limit is 12 */ - __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; - xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); - xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); - xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */ - switch(order) /* ...and zero them out */ - { - case 9: - xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); - break; - case 10: - xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); - break; - case 11: - xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); - break; - } - xmm2 = _mm_setzero_si128(); - xmm0 = _mm_packs_epi32(xmm0, xmm6); - xmm1 = _mm_packs_epi32(xmm1, xmm2); - - xmm4 = _mm_loadu_si128((const __m128i*)(data-12)); - xmm5 = _mm_loadu_si128((const __m128i*)(data-8)); - xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); - xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3)); - xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3)); - xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); - xmm4 = _mm_packs_epi32(xmm4, xmm2); - xmm3 = _mm_packs_epi32(xmm3, xmm5); - - xmm7 = _mm_slli_si128(xmm1, 2); - xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14)); - xmm2 = _mm_slli_si128(xmm0, 2); - - /* xmm0, xmm1: qlp_coeff - xmm2, xmm7: qlp_coeff << 16bit - xmm3, xmm4: data */ - - xmm5 = xmm4; - xmm5 = _mm_madd_epi16(xmm5, xmm1); - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, xmm5); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - - if(data_len & 1) - { - xmm4 = _mm_slli_si128(xmm4, 2); - xmm6 = xmm3; - xmm3 = _mm_slli_si128(xmm3, 2); - xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 14)); - xmm3 = _mm_insert_epi16(xmm3, curr, 0); - - xmm5 = xmm4; - xmm5 = _mm_madd_epi16(xmm5, xmm1); - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, xmm5); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - } - - while(data_len) { /* data_len is even */ - /* 2 shifts per 2 cycles less + 2x loop unwind, but we need shifted qlp_coeff in xmm2:xmm7 */ - xmm4 = _mm_slli_si128(xmm4, 4); - xmm6 = xmm3; - xmm3 = _mm_slli_si128(xmm3, 4); - xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 12)); - xmm3 = _mm_insert_epi16(xmm3, curr, 1); - - xmm5 = xmm4; - xmm5 = _mm_madd_epi16(xmm5, xmm7); - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm2); - xmm6 = _mm_add_epi32(xmm6, xmm5); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - xmm3 = _mm_insert_epi16(xmm3, curr, 0); - xmm5 = xmm4; - xmm5 = _mm_madd_epi16(xmm5, xmm1); - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, xmm5); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len-=2; - } - } - else if(order > 4) { /* order == 5, 6, 7, 8 */ - if(order == 8) { - __m128i xmm0, xmm1, xmm3, xmm6; - xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); - xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); - xmm0 = _mm_packs_epi32(xmm0, xmm1); - - xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); - xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); - xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); - xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); - xmm3 = _mm_packs_epi32(xmm3, xmm1); - - /* xmm0: qlp_coeff - xmm3: data */ - - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - - while(data_len) { - xmm3 = _mm_slli_si128(xmm3, 2); - xmm3 = _mm_insert_epi16(xmm3, curr, 0); - - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - } - } - else { /* order == 5, 6, 7 */ - __m128i xmm0, xmm1, xmm2, xmm3, xmm6; - xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0)); - xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4)); - switch(order) - { - case 5: - xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); - break; - case 6: - xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); - break; - case 7: - xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); - break; - } - xmm0 = _mm_packs_epi32(xmm0, xmm1); - - xmm1 = _mm_loadu_si128((const __m128i*)(data-8)); - xmm3 = _mm_loadu_si128((const __m128i*)(data-4)); - xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3)); - xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3)); - xmm3 = _mm_packs_epi32(xmm3, xmm1); - xmm2 = _mm_slli_si128(xmm0, 2); - - /* xmm0: qlp_coeff - xmm2: qlp_coeff << 16bit - xmm3: data */ - - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - - if(data_len & 1) - { - xmm3 = _mm_slli_si128(xmm3, 2); - xmm3 = _mm_insert_epi16(xmm3, curr, 0); - - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len--; - } - - while(data_len) { /* data_len is even */ - xmm3 = _mm_slli_si128(xmm3, 4); - xmm3 = _mm_insert_epi16(xmm3, curr, 1); - - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm2); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - xmm3 = _mm_insert_epi16(xmm3, curr, 0); - xmm6 = xmm3; - xmm6 = _mm_madd_epi16(xmm6, xmm0); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8)); - xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4)); - - curr = *data++; - *residual++ = curr - (_mm_cvtsi128_si32(xmm6) >> lp_quantization); - - data_len-=2; - } - } - } - else { /* order == 1, 2, 3, 4 */ - if(order > 2) { - if(order == 4) { - for(i = 0; i < (int)data_len; i++) { - sum = 0; - sum += qlp_coeff[3] * data[i-4]; - sum += qlp_coeff[2] * data[i-3]; - sum += qlp_coeff[1] * data[i-2]; - sum += qlp_coeff[0] * data[i-1]; - residual[i] = data[i] - (sum >> lp_quantization); - } - } - else { /* order == 3 */ - for(i = 0; i < (int)data_len; i++) { - sum = 0; - sum += qlp_coeff[2] * data[i-3]; - sum += qlp_coeff[1] * data[i-2]; - sum += qlp_coeff[0] * data[i-1]; - residual[i] = data[i] - (sum >> lp_quantization); - } - } - } - else { - if(order == 2) { - for(i = 0; i < (int)data_len; i++) { - sum = 0; - sum += qlp_coeff[1] * data[i-2]; - sum += qlp_coeff[0] * data[i-1]; - residual[i] = data[i] - (sum >> lp_quantization); - } - } - else { /* order == 1 */ - for(i = 0; i < (int)data_len; i++) - residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization); - } - } - } - } - else { /* order > 12 */ - for(i = 0; i < (int)data_len; i++) { - sum = 0; - switch(order) { - case 32: sum += qlp_coeff[31] * data[i-32]; - case 31: sum += qlp_coeff[30] * data[i-31]; - case 30: sum += qlp_coeff[29] * data[i-30]; - case 29: sum += qlp_coeff[28] * data[i-29]; - case 28: sum += qlp_coeff[27] * data[i-28]; - case 27: sum += qlp_coeff[26] * data[i-27]; - case 26: sum += qlp_coeff[25] * data[i-26]; - case 25: sum += qlp_coeff[24] * data[i-25]; - case 24: sum += qlp_coeff[23] * data[i-24]; - case 23: sum += qlp_coeff[22] * data[i-23]; - case 22: sum += qlp_coeff[21] * data[i-22]; - case 21: sum += qlp_coeff[20] * data[i-21]; - case 20: sum += qlp_coeff[19] * data[i-20]; - case 19: sum += qlp_coeff[18] * data[i-19]; - case 18: sum += qlp_coeff[17] * data[i-18]; - case 17: sum += qlp_coeff[16] * data[i-17]; - case 16: sum += qlp_coeff[15] * data[i-16]; - case 15: sum += qlp_coeff[14] * data[i-15]; - case 14: sum += qlp_coeff[13] * data[i-14]; - case 13: sum += qlp_coeff[12] * data[i-13]; - sum += qlp_coeff[11] * data[i-12]; - sum += qlp_coeff[10] * data[i-11]; - sum += qlp_coeff[ 9] * data[i-10]; - sum += qlp_coeff[ 8] * data[i- 9]; - sum += qlp_coeff[ 7] * data[i- 8]; - sum += qlp_coeff[ 6] * data[i- 7]; - sum += qlp_coeff[ 5] * data[i- 6]; - sum += qlp_coeff[ 4] * data[i- 5]; - sum += qlp_coeff[ 3] * data[i- 4]; - sum += qlp_coeff[ 2] * data[i- 3]; - sum += qlp_coeff[ 1] * data[i- 2]; - sum += qlp_coeff[ 0] * data[i- 1]; - } - residual[i] = data[i] - (sum >> lp_quantization); - } - } -} - -#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ -#endif /* FLAC__NO_ASM */ -#endif /* FLAC__INTEGER_ONLY_LIBRARY */ diff --git a/src/libFLAC/stream_decoder.c b/src/libFLAC/stream_decoder.c index 8f59e971..be9e2965 100644 --- a/src/libFLAC/stream_decoder.c +++ b/src/libFLAC/stream_decoder.c @@ -415,6 +415,12 @@ static FLAC__StreamDecoderInitStatus init_stream_internal_( decoder->private_->local_lpc_restore_signal_16bit_order8 = FLAC__lpc_restore_signal_asm_ia32; } #endif +#ifdef FLAC__HAS_X86INTRIN +# ifdef FLAC__SSE4_SUPPORTED + if(decoder->private_->cpuinfo.ia32.sse41) + decoder->private_->local_lpc_restore_signal_64bit = FLAC__lpc_restore_signal_wide_intrin_sse41; +# endif +#endif #elif defined FLAC__CPU_PPC FLAC__ASSERT(decoder->private_->cpuinfo.type == FLAC__CPUINFO_TYPE_PPC); if(decoder->private_->cpuinfo.ppc.altivec) { diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c index 0800b5b3..53f09b19 100644 --- a/src/libFLAC/stream_encoder.c +++ b/src/libFLAC/stream_encoder.c @@ -39,6 +39,7 @@ #include <stdlib.h> /* for malloc() */ #include <string.h> /* for memcpy() */ #include <sys/types.h> /* for off_t */ +#include "share/compat.h" #include "FLAC/assert.h" #include "FLAC/stream_decoder.h" #include "protected/stream_encoder.h" @@ -56,10 +57,10 @@ #include "private/ogg_helper.h" #include "private/ogg_mapping.h" #endif +#include "private/stream_encoder.h" #include "private/stream_encoder_framing.h" #include "private/window.h" #include "share/alloc.h" -#include "share/compat.h" #include "share/private.h" @@ -344,6 +345,7 @@ typedef struct FLAC__StreamEncoderPrivate { unsigned current_frame_number; FLAC__MD5Context md5context; FLAC__CPUInfo cpuinfo; + void (*local_precompute_partition_info_sums)(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps); #ifndef FLAC__INTEGER_ONLY_LIBRARY unsigned (*local_fixed_compute_best_predictor)(const FLAC__int32 data[], unsigned data_len, FLAC__float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]); #else @@ -875,6 +877,7 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_( #ifndef FLAC__INTEGER_ONLY_LIBRARY encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation; #endif + encoder->private_->local_precompute_partition_info_sums = precompute_partition_info_sums_; encoder->private_->local_fixed_compute_best_predictor = FLAC__fixed_compute_best_predictor; #ifndef FLAC__INTEGER_ONLY_LIBRARY encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients; @@ -915,6 +918,16 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_( if(encoder->private_->cpuinfo.ia32.mmx && encoder->private_->cpuinfo.ia32.cmov) encoder->private_->local_fixed_compute_best_predictor = FLAC__fixed_compute_best_predictor_asm_ia32_mmx_cmov; # endif /* FLAC__HAS_NASM */ +# ifdef FLAC__HAS_X86INTRIN + if(encoder->private_->cpuinfo.ia32.sse2) { + encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2; + encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_16bit = FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2; + } +# ifdef FLAC__SSE4_SUPPORTED + if(encoder->private_->cpuinfo.ia32.sse41) + encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_64bit = FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41; +# endif +# endif /* FLAC__HAS_X86INTRIN */ # elif defined FLAC__CPU_X86_64 FLAC__ASSERT(encoder->private_->cpuinfo.type == FLAC__CPUINFO_TYPE_X86_64); # ifdef FLAC__HAS_X86INTRIN @@ -927,12 +940,37 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_( else if(encoder->protected_->max_lpc_order < 16) encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16; + encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2; encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_16bit = FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2; +# ifdef FLAC__SSE4_SUPPORTED + if(encoder->private_->cpuinfo.x86_64.sse41) + encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_64bit = FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41; +# endif # endif /* FLAC__HAS_X86INTRIN */ # endif /* FLAC__CPU_... */ } # endif /* !FLAC__NO_ASM */ #endif /* !FLAC__INTEGER_ONLY_LIBRARY */ +#if !defined FLAC__NO_ASM && defined FLAC__HAS_X86INTRIN + if(encoder->private_->cpuinfo.use_asm) { +# if defined FLAC__CPU_IA32 +# ifdef FLAC__SSSE3_SUPPORTED + if(encoder->private_->cpuinfo.ia32.ssse3) + encoder->private_->local_precompute_partition_info_sums = precompute_partition_info_sums_intrin_ssse3; + else +# endif + if(encoder->private_->cpuinfo.ia32.sse2) + encoder->private_->local_precompute_partition_info_sums = precompute_partition_info_sums_intrin_sse2; +# elif defined FLAC__CPU_X86_64 +# ifdef FLAC__SSSE3_SUPPORTED + if(encoder->private_->cpuinfo.x86_64.ssse3) + encoder->private_->local_precompute_partition_info_sums = precompute_partition_info_sums_intrin_ssse3; + else +# endif + encoder->private_->local_precompute_partition_info_sums = precompute_partition_info_sums_intrin_sse2; +# endif /* FLAC__CPU_... */ + } +#endif /* !FLAC__NO_ASM && FLAC__HAS_X86INTRIN */ /* finally override based on wide-ness if necessary */ if(encoder->private_->use_wide_by_block) { encoder->private_->local_fixed_compute_best_predictor = FLAC__fixed_compute_best_predictor_wide; @@ -3572,7 +3610,7 @@ unsigned evaluate_lpc_subframe_( FLAC__EntropyCodingMethod_PartitionedRiceContents *partitioned_rice_contents ) { - FLAC__int32 qlp_coeff[FLAC__MAX_LPC_ORDER]; /* WATCHOUT: the size is important; x86 intrinsic routines need more than 'order' elements */ + FLAC__int32 qlp_coeff[FLAC__MAX_LPC_ORDER]; /* WATCHOUT: the size is important; some x86 intrinsic routines need more than lpc order elements */ unsigned i, residual_bits, estimate; int quantization, ret; const unsigned residual_samples = blocksize - order; @@ -3687,7 +3725,7 @@ unsigned find_best_partition_order_( max_partition_order = FLAC__format_get_max_rice_partition_order_from_blocksize_limited_max_and_predictor_order(max_partition_order, blocksize, predictor_order); min_partition_order = flac_min(min_partition_order, max_partition_order); - precompute_partition_info_sums_(residual, abs_residual_partition_sums, residual_samples, predictor_order, min_partition_order, max_partition_order, bps); + private_->local_precompute_partition_info_sums(residual, abs_residual_partition_sums, residual_samples, predictor_order, min_partition_order, max_partition_order, bps); if(do_escape_coding) precompute_partition_info_escapes_(residual, raw_bits_per_partition, residual_samples, predictor_order, min_partition_order, max_partition_order); @@ -3759,7 +3797,7 @@ unsigned find_best_partition_order_( return best_residual_bits; } -#if defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && defined FLAC__HAS_NASM +#if defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && defined FLAC__HAS_NASM && 0 extern void precompute_partition_info_sums_32bit_asm_ia32_( const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], @@ -3785,7 +3823,7 @@ void precompute_partition_info_sums_( FLAC__ASSERT(default_partition_samples > predictor_order); -#if defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && defined FLAC__HAS_NASM +#if defined(FLAC__CPU_IA32) && !defined FLAC__NO_ASM && defined FLAC__HAS_NASM && 0 /* slightly pessimistic but still catches all common cases */ /* WATCHOUT: "+ bps" is an assumption that the average residual magnitude will not be more than "bps" bits */ if(bps <= 16) { diff --git a/src/libFLAC/stream_encoder_intrin_sse2.c b/src/libFLAC/stream_encoder_intrin_sse2.c new file mode 100644 index 00000000..406ae733 --- /dev/null +++ b/src/libFLAC/stream_encoder_intrin_sse2.c @@ -0,0 +1,161 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if HAVE_CONFIG_H +# include <config.h> +#endif + +#ifndef FLAC__NO_ASM +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN + +#include <stdlib.h> /* for abs() */ +#include <emmintrin.h> /* SSE2 */ +#include "FLAC/assert.h" +#include "private/stream_encoder.h" + +void precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], + unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps) +{ + const unsigned default_partition_samples = (residual_samples + predictor_order) >> max_partition_order; + unsigned partitions = 1u << max_partition_order; + + FLAC__ASSERT(default_partition_samples > predictor_order); + + /* first do max_partition_order */ + { + unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order); + unsigned e1, e3; + __m128i mm_res, mm_sum, mm_mask; + + if(bps <= 16) { + FLAC__uint32 abs_residual_partition_sum; + + for(partition = residual_sample = 0; partition < partitions; partition++) { + end += default_partition_samples; + abs_residual_partition_sum = 0; + mm_sum = _mm_setzero_si128(); + + e1 = (residual_sample + 3) & ~3; e3 = end & ~3; + if(e1 > end) + e1 = end; /* try flac -l 1 -b 16 and you'll be here */ + + /* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast*/ + for( ; residual_sample < e1; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */ + + for( ; residual_sample < e3; residual_sample+=4) { + mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample)); + + mm_mask = _mm_srai_epi32(mm_res, 31); + mm_res = _mm_xor_si128(mm_res, mm_mask); + mm_res = _mm_sub_epi32(mm_res, mm_mask); + + mm_sum = _mm_add_epi32(mm_sum, mm_res); + } + + mm_sum = _mm_add_epi32(mm_sum, _mm_srli_si128(mm_sum, 8)); + mm_sum = _mm_add_epi32(mm_sum, _mm_srli_si128(mm_sum, 4)); + abs_residual_partition_sum += _mm_cvtsi128_si32(mm_sum); + + for( ; residual_sample < end; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + abs_residual_partition_sums[partition] = abs_residual_partition_sum; + } + } + else { /* have to pessimistically use 64 bits for accumulator */ + FLAC__uint64 abs_residual_partition_sum; + + for(partition = residual_sample = 0; partition < partitions; partition++) { + end += default_partition_samples; + abs_residual_partition_sum = 0; + mm_sum = _mm_setzero_si128(); + + e1 = (residual_sample + 1) & ~1; e3 = end & ~1; + FLAC__ASSERT(e1 <= end); + + for( ; residual_sample < e1; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + for( ; residual_sample < e3; residual_sample+=2) { + mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */ + + mm_mask = _mm_srai_epi32(mm_res, 31); + mm_res = _mm_xor_si128(mm_res, mm_mask); + mm_res = _mm_sub_epi32(mm_res, mm_mask); /* 0 0 |r1| |r0| */ + + mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */ + mm_sum = _mm_add_epi64(mm_sum, mm_res); + } + + mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8)); +#ifdef FLAC__CPU_IA32 +#ifdef _MSC_VER + abs_residual_partition_sum += mm_sum.m128i_u64[0]; +#else + { + FLAC__uint64 tmp[2]; + _mm_storel_epi64((__m128i *)tmp, mm_sum); + abs_residual_partition_sum += tmp[0]; + } +#endif +#else + abs_residual_partition_sum += _mm_cvtsi128_si64(mm_sum); +#endif + + for( ; residual_sample < end; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + abs_residual_partition_sums[partition] = abs_residual_partition_sum; + } + } + } + + /* now merge partitions for lower orders */ + { + unsigned from_partition = 0, to_partition = partitions; + int partition_order; + for(partition_order = (int)max_partition_order - 1; partition_order >= (int)min_partition_order; partition_order--) { + unsigned i; + partitions >>= 1; + for(i = 0; i < partitions; i++) { + abs_residual_partition_sums[to_partition++] = + abs_residual_partition_sums[from_partition ] + + abs_residual_partition_sums[from_partition+1]; + from_partition += 2; + } + } + } +} + +#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ +#endif /* FLAC__NO_ASM */ diff --git a/src/libFLAC/stream_encoder_intrin_ssse3.c b/src/libFLAC/stream_encoder_intrin_ssse3.c new file mode 100644 index 00000000..15f9ff01 --- /dev/null +++ b/src/libFLAC/stream_encoder_intrin_ssse3.c @@ -0,0 +1,161 @@ +/* libFLAC - Free Lossless Audio Codec library + * Copyright (C) 2000-2009 Josh Coalson + * Copyright (C) 2011-2013 Xiph.Org Foundation + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * - Neither the name of the Xiph.org Foundation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#if HAVE_CONFIG_H +# include <config.h> +#endif + +#include "share/compat.h" + +#ifndef FLAC__NO_ASM +#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN +#ifdef FLAC__SSSE3_SUPPORTED + +#include <stdlib.h> /* for abs() */ +#include <tmmintrin.h> /* SSSE3 */ +#include "FLAC/assert.h" +#include "private/stream_encoder.h" + +void precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residual[], FLAC__uint64 abs_residual_partition_sums[], + unsigned residual_samples, unsigned predictor_order, unsigned min_partition_order, unsigned max_partition_order, unsigned bps) +{ + const unsigned default_partition_samples = (residual_samples + predictor_order) >> max_partition_order; + unsigned partitions = 1u << max_partition_order; + + FLAC__ASSERT(default_partition_samples > predictor_order); + + /* first do max_partition_order */ + { + unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order); + unsigned e1, e3; + __m128i mm_res, mm_sum; + + if(bps <= 16) { + FLAC__uint32 abs_residual_partition_sum; + + for(partition = residual_sample = 0; partition < partitions; partition++) { + end += default_partition_samples; + abs_residual_partition_sum = 0; + mm_sum = _mm_setzero_si128(); + + e1 = (residual_sample + 3) & ~3; e3 = end & ~3; + if(e1 > end) + e1 = end; /* try flac -l 1 -b 16 and you'll be here */ + + /* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast*/ + for( ; residual_sample < e1; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */ + + for( ; residual_sample < e3; residual_sample+=4) { + mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample)); + + mm_res = _mm_abs_epi32(mm_res); + + mm_sum = _mm_add_epi32(mm_sum, mm_res); + } + + mm_sum = _mm_hadd_epi32(mm_sum, mm_sum); + mm_sum = _mm_hadd_epi32(mm_sum, mm_sum); + abs_residual_partition_sum += _mm_cvtsi128_si32(mm_sum); + + for( ; residual_sample < end; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + abs_residual_partition_sums[partition] = abs_residual_partition_sum; + } + } + else { /* have to pessimistically use 64 bits for accumulator */ + FLAC__uint64 abs_residual_partition_sum; + + for(partition = residual_sample = 0; partition < partitions; partition++) { + end += default_partition_samples; + abs_residual_partition_sum = 0; + mm_sum = _mm_setzero_si128(); + + e1 = (residual_sample + 1) & ~1; e3 = end & ~1; + FLAC__ASSERT(e1 <= end); + + for( ; residual_sample < e1; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + for( ; residual_sample < e3; residual_sample+=2) { + mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */ + + mm_res = _mm_abs_epi32(mm_res); /* 0 0 |r1| |r0| */ + + mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */ + mm_sum = _mm_add_epi64(mm_sum, mm_res); + } + + mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8)); +#ifdef FLAC__CPU_IA32 +#ifdef _MSC_VER + abs_residual_partition_sum += mm_sum.m128i_u64[0]; +#else + { + FLAC__uint64 tmp[2]; + _mm_storel_epi64((__m128i *)tmp, mm_sum); + abs_residual_partition_sum += tmp[0]; + } +#endif +#else + abs_residual_partition_sum += _mm_cvtsi128_si64(mm_sum); +#endif + + for( ; residual_sample < end; residual_sample++) + abs_residual_partition_sum += abs(residual[residual_sample]); + + abs_residual_partition_sums[partition] = abs_residual_partition_sum; + } + } + } + + /* now merge partitions for lower orders */ + { + unsigned from_partition = 0, to_partition = partitions; + int partition_order; + for(partition_order = (int)max_partition_order - 1; partition_order >= (int)min_partition_order; partition_order--) { + unsigned i; + partitions >>= 1; + for(i = 0; i < partitions; i++) { + abs_residual_partition_sums[to_partition++] = + abs_residual_partition_sums[from_partition ] + + abs_residual_partition_sums[from_partition+1]; + from_partition += 2; + } + } + } +} + +#endif /* FLAC__SSSE3_SUPPORTED */ +#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */ +#endif /* FLAC__NO_ASM */ |