summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--cmake/CheckA64NEON.c.in6
-rw-r--r--cmake/CheckA64NEON.cmake14
-rw-r--r--cmake/CheckCPUArch.cmake8
-rw-r--r--config.cmake.h.in3
-rw-r--r--configure.ac30
-rw-r--r--src/libFLAC/CMakeLists.txt8
-rw-r--r--src/libFLAC/Makefile.am1
-rw-r--r--src/libFLAC/Makefile.lite1
-rw-r--r--src/libFLAC/include/private/lpc.h6
-rw-r--r--src/libFLAC/lpc_intrin_neon.c1249
-rw-r--r--src/libFLAC/stream_encoder.c8
12 files changed, 1333 insertions, 3 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7e6ae233..4b6d0682 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -104,6 +104,8 @@ enable_testing()
check_include_file("byteswap.h" HAVE_BYTESWAP_H)
check_include_file("inttypes.h" HAVE_INTTYPES_H)
check_include_file("stdint.h" HAVE_STDINT_H)
+check_include_file("arm_neon.h" FLAC__HAS_NEONINTRIN)
+
if(MSVC)
check_include_file("intrin.h" FLAC__HAS_X86INTRIN)
else()
diff --git a/cmake/CheckA64NEON.c.in b/cmake/CheckA64NEON.c.in
new file mode 100644
index 00000000..4d43d4fa
--- /dev/null
+++ b/cmake/CheckA64NEON.c.in
@@ -0,0 +1,6 @@
+#include <arm_neon.h>
+int main (void)
+{
+ float64x2_t tmp;
+ tmp = vdupq_n_f64(0.0f);
+}
diff --git a/cmake/CheckA64NEON.cmake b/cmake/CheckA64NEON.cmake
new file mode 100644
index 00000000..247b8648
--- /dev/null
+++ b/cmake/CheckA64NEON.cmake
@@ -0,0 +1,14 @@
+macro(CHECK_A64NEON VARIABLE)
+ if(NOT DEFINED HAVE_${VARIABLE})
+ message(STATUS "Check whether A64 NEON can be used")
+ configure_file(${PROJECT_SOURCE_DIR}/cmake/CheckA64NEON.c.in ${PROJECT_BINARY_DIR}/CMakeFiles/CMakeTmp/CheckA64NEON.c @ONLY)
+ try_compile(HAVE_${VARIABLE} "${PROJECT_BINARY_DIR}"
+ "${PROJECT_BINARY_DIR}/CMakeFiles/CMakeTmp/CheckA64NEON.c")
+ if(HAVE_${VARIABLE})
+ message(STATUS "Check whether A64 NEON can be used - yes")
+ set(${VARIABLE} 1 CACHE INTERNAL "Result of CHECK_A64NEON" FORCE)
+ else ()
+ message(STATUS "Check whether A64 NEON can be used - no")
+ endif()
+ endif ()
+endmacro(CHECK_A64NEON)
diff --git a/cmake/CheckCPUArch.cmake b/cmake/CheckCPUArch.cmake
index c9b7a5c3..4c444363 100644
--- a/cmake/CheckCPUArch.cmake
+++ b/cmake/CheckCPUArch.cmake
@@ -7,7 +7,7 @@ macro(_CHECK_CPU_ARCH ARCH ARCH_DEFINES VARIABLE)
"${PROJECT_BINARY_DIR}/CMakeFiles/CMakeTmp/CheckCPUArch.c")
if(HAVE_${VARIABLE})
message(STATUS "Check CPU architecture is ${ARCH} - yes")
- set(${VARIABLE} 1 CACHE INTERNAL "Result of CHECK_CPU_ARCH_X64" FORCE)
+ set(${VARIABLE} 1 CACHE INTERNAL "Result of CHECK_CPU_ARCH" FORCE)
else ()
message(STATUS "Check CPU architecture is ${ARCH} - no")
endif()
@@ -24,4 +24,8 @@ endmacro(CHECK_CPU_ARCH_X86)
macro(CHECK_CPU_ARCH_PPC64 VARIABLE)
_CHECK_CPU_ARCH(ppc64 "defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) ||defined(_ARCH_PPC64)" ${VARIABLE})
-endmacro(CHECK_CPU_ARCH_PPC64) \ No newline at end of file
+endmacro(CHECK_CPU_ARCH_PPC64)
+
+macro(CHECK_CPU_ARCH_ARM64 VARIABLE)
+ _CHECK_CPU_ARCH(arm64 "defined(__aarch64__) || defined(__arm64__)" ${VARIABLE})
+endmacro(CHECK_CPU_ARCH_ARM64)
diff --git a/config.cmake.h.in b/config.cmake.h.in
index 4f424977..3755cd40 100644
--- a/config.cmake.h.in
+++ b/config.cmake.h.in
@@ -40,6 +40,9 @@
/* Set to 1 if <x86intrin.h> is available. */
#cmakedefine01 FLAC__HAS_X86INTRIN
+/* Set to 1 if <arm_neon.h> is available. */
+#cmakedefine01 FLAC__HAS_NEONINTRIN
+
/* define if building for Darwin / MacOS X */
#cmakedefine FLAC__SYS_DARWIN
diff --git a/configure.ac b/configure.ac
index dc302735..8a27028d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -58,7 +58,7 @@ AM_PROG_CC_C_O
AC_C_INLINE
AC_C_TYPEOF
-AC_CHECK_HEADERS([stdint.h inttypes.h byteswap.h sys/param.h sys/ioctl.h termios.h x86intrin.h cpuid.h])
+AC_CHECK_HEADERS([stdint.h inttypes.h byteswap.h sys/param.h sys/ioctl.h termios.h x86intrin.h cpuid.h arm_neon.h])
XIPH_C_BSWAP32
XIPH_C_BSWAP16
@@ -145,6 +145,12 @@ case "$host_cpu" in
AH_TEMPLATE(FLAC__CPU_PPC, [define if building for PowerPC])
asm_optimisation=$asm_opt
;;
+ arm64|aarch64)
+ cpu_arm64=true
+ AC_DEFINE(FLAC__CPU_ARM64)
+ AH_TEMPLATE(FLAC__CPU_ARM64, [define if building for ARM])
+ asm_optimisation=$asm_opt
+ ;;
sparc)
cpu_sparc=true
AC_DEFINE(FLAC__CPU_SPARC)
@@ -156,6 +162,7 @@ AM_CONDITIONAL(FLAC__CPU_X86_64, test "x$cpu_x86_64" = xtrue)
AM_CONDITIONAL(FLaC__CPU_IA32, test "x$cpu_ia32" = xtrue)
AM_CONDITIONAL(FLaC__CPU_PPC, test "x$cpu_ppc" = xtrue)
AM_CONDITIONAL(FLaC__CPU_PPC64, test "x$cpu_ppc64" = xtrue)
+AM_CONDITIONAL(FLAC__CPU_ARM64, test "x$cpu_arm64" = xtrue)
AM_CONDITIONAL(FLaC__CPU_SPARC, test "x$cpu_sparc" = xtrue)
if test "x$ac_cv_header_x86intrin_h" = xyes; then
@@ -164,6 +171,26 @@ else
AC_DEFINE([FLAC__HAS_X86INTRIN], 0)
fi
+neon=no
+if test "x$ac_cv_header_arm_neon_h" = xyes; then
+AC_DEFINE([FLAC__HAS_NEONINTRIN], 1, [Set to 1 if <arm_neon.h> is available.])
+neon=yes
+ AC_MSG_CHECKING([whether arm_neon.h has A64 functions])
+ AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM([[#include <arm_neon.h>]],
+ [[float64x2_t sum5; sum5 = vdupq_n_f64(0.0f);]])],
+ [AC_MSG_RESULT([yes])
+ has_a64neon=yes],
+ [AC_MSG_RESULT([no])])
+ if test "x$has_a64neon" = xyes; then
+ AC_DEFINE([FLAC__HAS_A64NEONINTRIN], 1, [Set to 1 if <arm_neon.h> has A64 instructions.])
+ else
+ AC_DEFINE([FLAC__HAS_A64NEONINTRIN], 0)
+ fi
+else
+AC_DEFINE([FLAC__HAS_NEONINTRIN], 0)
+fi
+
if test x"$cpu_ppc64" = xtrue ; then
AC_C_ATTRIBUTE([target("cpu=power8")],
@@ -664,6 +691,7 @@ if test x$ac_cv_c_compiler_gnu = xyes ; then
fi
echo " Compiler is Clang : ....................... ${xiph_cv_c_compiler_clang}"
echo " SSE optimizations : ....................... ${sse_os}"
+ echo " Neon optimizations : ...................... ${neon}"
echo " Asm optimizations : ....................... ${asm_optimisation}"
echo " Ogg/FLAC support : ........................ ${have_ogg}"
echo " Stack protector : ........................ ${enable_stack_smash_protection}"
diff --git a/src/libFLAC/CMakeLists.txt b/src/libFLAC/CMakeLists.txt
index 5395c2ed..56a63452 100644
--- a/src/libFLAC/CMakeLists.txt
+++ b/src/libFLAC/CMakeLists.txt
@@ -10,6 +10,7 @@ include(CheckCSourceCompiles)
include(CheckCPUArch)
include(CheckAttribute)
include(CheckVSX)
+include(CheckA64NEON)
check_cpu_arch_x64(FLAC__CPU_X86_64)
if(NOT FLAC__CPU_X86_64)
@@ -26,9 +27,15 @@ else()
check_attribute_power8(FLAC__HAS_TARGET_POWER8)
check_attribute_power9(FLAC__HAS_TARGET_POWER9)
check_vsx(FLAC__USE_VSX)
+ else()
+ check_cpu_arch_arm64(FLAC__CPU_ARM64)
+ if(FLAC__CPU_ARM64)
+ check_a64neon(FLAC__HAS_A64NEONINTRIN)
+ endif()
endif()
endif()
+
include(CheckLanguage)
check_language(ASM_NASM)
if(CMAKE_ASM_NASM_COMPILER)
@@ -68,6 +75,7 @@ add_library(FLAC
float.c
format.c
lpc.c
+ lpc_intrin_neon.c
lpc_intrin_sse2.c
lpc_intrin_sse41.c
lpc_intrin_avx2.c
diff --git a/src/libFLAC/Makefile.am b/src/libFLAC/Makefile.am
index 0203429a..b089d8cc 100644
--- a/src/libFLAC/Makefile.am
+++ b/src/libFLAC/Makefile.am
@@ -117,6 +117,7 @@ libFLAC_sources = \
lpc_intrin_sse41.c \
lpc_intrin_avx2.c \
lpc_intrin_vsx.c \
+ lpc_intrin_neon.c \
md5.c \
memory.c \
metadata_iterators.c \
diff --git a/src/libFLAC/Makefile.lite b/src/libFLAC/Makefile.lite
index b6b29ca4..9e96f9f1 100644
--- a/src/libFLAC/Makefile.lite
+++ b/src/libFLAC/Makefile.lite
@@ -90,6 +90,7 @@ SRCS_C = \
lpc_intrin_sse2.c \
lpc_intrin_sse41.c \
lpc_intrin_avx2.c \
+ lpc_intrin_neon.c \
md5.c \
memory.c \
metadata_iterators.c \
diff --git a/src/libFLAC/include/private/lpc.h b/src/libFLAC/include/private/lpc.h
index c6fe2f8f..a9076903 100644
--- a/src/libFLAC/include/private/lpc.h
+++ b/src/libFLAC/include/private/lpc.h
@@ -151,7 +151,13 @@ int FLAC__lpc_quantize_coefficients(const FLAC__real lp_coeff[], uint32_t order,
*/
void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
void FLAC__lpc_compute_residual_from_qlp_coefficients_wide(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
+
#ifndef FLAC__NO_ASM
+# ifdef FLAC__CPU_ARM64
+void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
+void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
+# endif
+
# ifdef FLAC__CPU_IA32
# ifdef FLAC__HAS_NASM
void FLAC__lpc_compute_residual_from_qlp_coefficients_asm_ia32(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
diff --git a/src/libFLAC/lpc_intrin_neon.c b/src/libFLAC/lpc_intrin_neon.c
new file mode 100644
index 00000000..ab8f71ea
--- /dev/null
+++ b/src/libFLAC/lpc_intrin_neon.c
@@ -0,0 +1,1249 @@
+/* libFLAC - Free Lossless Audio Codec library
+ * Copyright (C) 2000-2009 Josh Coalson
+ * Copyright (C) 2011-2016 Xiph.Org Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of the Xiph.org Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "private/cpu.h"
+
+#ifndef FLAC__INTEGER_ONLY_LIBRARY
+#ifndef FLAC__NO_ASM
+#if defined FLAC__CPU_ARM64 && FLAC__HAS_NEONINTRIN
+#include "private/lpc.h"
+#include "FLAC/assert.h"
+#include "FLAC/format.h"
+#include "private/macros.h"
+#include <arm_neon.h>
+
+
+#define MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_vec, lane) \
+ summ_0 = vmulq_laneq_s32(tmp_vec[0], qlp_coeff_vec, lane); \
+ summ_1 = vmulq_laneq_s32(tmp_vec[4], qlp_coeff_vec, lane); \
+ summ_2 = vmulq_laneq_s32(tmp_vec[8], qlp_coeff_vec, lane);
+
+
+#define MACC_32BIT_LOOP_UNROOL_3(tmp_vec_ind, qlp_coeff_vec, lane) \
+ summ_0 = vmlaq_laneq_s32(summ_0,tmp_vec[tmp_vec_ind] ,qlp_coeff_vec, lane); \
+ summ_1 = vmlaq_laneq_s32(summ_1,tmp_vec[tmp_vec_ind+4] ,qlp_coeff_vec, lane); \
+ summ_2 = vmlaq_laneq_s32(summ_2,tmp_vec[tmp_vec_ind+8] ,qlp_coeff_vec, lane);
+
+void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[])
+{
+ int i;
+ FLAC__int32 sum;
+ FLAC__ASSERT(order > 0);
+ FLAC__ASSERT(order <= 32);
+
+ int32x4_t tmp_vec[20];
+
+ // Using prologue reads is valid as encoder->private_->local_lpc_compute_residual_from_qlp_coefficients(signal+order,....)
+ if(order <= 12) {
+ if(order > 8) {
+ if(order > 10) {
+ if (order == 12) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], qlp_coeff[10], qlp_coeff[11]};
+
+ tmp_vec[0] = vld1q_s32(data - 12);
+ tmp_vec[1] = vld1q_s32(data - 11);
+ tmp_vec[2] = vld1q_s32(data - 10);
+ tmp_vec[3] = vld1q_s32(data - 9);
+ tmp_vec[4] = vld1q_s32(data - 8);
+ tmp_vec[5] = vld1q_s32(data - 7);
+ tmp_vec[6] = vld1q_s32(data - 6);
+ tmp_vec[7] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+
+ tmp_vec[8] = vld1q_s32(data + i - 4);
+ tmp_vec[9] = vld1q_s32(data+i-3);
+ tmp_vec[10] = vld1q_s32(data+i-2);
+ tmp_vec[11] = vld1q_s32(data+i-1);
+ tmp_vec[12] = vld1q_s32(data+i);
+ tmp_vec[13] = vld1q_s32(data+i+1);
+ tmp_vec[14] = vld1q_s32(data+i+2);
+ tmp_vec[15] = vld1q_s32(data+i+3);
+ tmp_vec[16] = vld1q_s32(data + i + 4);
+ tmp_vec[17] = vld1q_s32(data + i + 5);
+ tmp_vec[18] = vld1q_s32(data + i + 6);
+ tmp_vec[19] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 3)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 2)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 1)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_2, 0)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 3)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(10, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(11, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ tmp_vec[6] = tmp_vec[18];
+ tmp_vec[7] = tmp_vec[19];
+ }
+ }
+
+ else { /* order == 11 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], qlp_coeff[10], 0};
+
+ tmp_vec[0] = vld1q_s32(data - 11);
+ tmp_vec[1] = vld1q_s32(data - 10);
+ tmp_vec[2] = vld1q_s32(data - 9);
+ tmp_vec[3] = vld1q_s32(data - 8);
+ tmp_vec[4] = vld1q_s32(data - 7);
+ tmp_vec[5] = vld1q_s32(data - 6);
+ tmp_vec[6] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[7] = vld1q_s32(data + i - 4);
+ tmp_vec[8] = vld1q_s32(data + i - 3);
+ tmp_vec[9] = vld1q_s32(data + i - 2);
+ tmp_vec[10] = vld1q_s32(data + i - 1);
+ tmp_vec[11] = vld1q_s32(data + i - 0);
+ tmp_vec[12] = vld1q_s32(data + i + 1);
+ tmp_vec[13] = vld1q_s32(data + i + 2);
+ tmp_vec[14] = vld1q_s32(data + i + 3);
+ tmp_vec[15] = vld1q_s32(data + i + 4);
+ tmp_vec[16] = vld1q_s32(data + i + 5);
+ tmp_vec[17] = vld1q_s32(data + i + 6);
+ tmp_vec[18] = vld1q_s32(data + i + 7);
+
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 2)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 1)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 0)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 3)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(10, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ tmp_vec[6] = tmp_vec[18];
+ }
+ }
+ }
+ else {
+ if(order == 10) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 10);
+ tmp_vec[1] = vld1q_s32(data - 9);
+ tmp_vec[2] = vld1q_s32(data - 8);
+ tmp_vec[3] = vld1q_s32(data - 7);
+ tmp_vec[4] = vld1q_s32(data - 6);
+ tmp_vec[5] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[6] = vld1q_s32(data + i - 4);
+ tmp_vec[7] = vld1q_s32(data + i - 3);
+ tmp_vec[8] = vld1q_s32(data + i - 2);
+ tmp_vec[9] = vld1q_s32(data + i - 1);
+ tmp_vec[10] = vld1q_s32(data + i - 0);
+ tmp_vec[11] = vld1q_s32(data + i + 1);
+ tmp_vec[12] = vld1q_s32(data + i + 2);
+ tmp_vec[13] = vld1q_s32(data + i + 3);
+ tmp_vec[14] = vld1q_s32(data + i + 4);
+ tmp_vec[15] = vld1q_s32(data + i + 5);
+ tmp_vec[16] = vld1q_s32(data + i + 6);
+ tmp_vec[17] = vld1q_s32(data + i + 7);
+
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 1)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 0)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 3)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ }
+ }
+ else { /* order == 9 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], 0, 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 9);
+ tmp_vec[1] = vld1q_s32(data - 8);
+ tmp_vec[2] = vld1q_s32(data - 7);
+ tmp_vec[3] = vld1q_s32(data - 6);
+ tmp_vec[4] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[5] = vld1q_s32(data + i - 4);
+ tmp_vec[6] = vld1q_s32(data + i - 3);
+ tmp_vec[7] = vld1q_s32(data + i - 2);
+ tmp_vec[8] = vld1q_s32(data + i - 1);
+ tmp_vec[9] = vld1q_s32(data + i - 0);
+ tmp_vec[10] = vld1q_s32(data + i + 1);
+ tmp_vec[11] = vld1q_s32(data + i + 2);
+ tmp_vec[12] = vld1q_s32(data + i + 3);
+ tmp_vec[13] = vld1q_s32(data + i + 4);
+ tmp_vec[14] = vld1q_s32(data + i + 5);
+ tmp_vec[15] = vld1q_s32(data + i + 6);
+ tmp_vec[16] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_2, 0)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 3)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ }
+ }
+ }
+ }
+ else if(order > 4) {
+ if(order > 6) {
+ if(order == 8) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+
+ tmp_vec[0] = vld1q_s32(data - 8);
+ tmp_vec[1] = vld1q_s32(data - 7);
+ tmp_vec[2] = vld1q_s32(data - 6);
+ tmp_vec[3] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[4] = vld1q_s32(data + i - 4);
+ tmp_vec[5] = vld1q_s32(data + i - 3);
+ tmp_vec[6] = vld1q_s32(data + i - 2);
+ tmp_vec[7] = vld1q_s32(data + i - 1);
+ tmp_vec[8] = vld1q_s32(data + i - 0);
+ tmp_vec[9] = vld1q_s32(data + i + 1);
+ tmp_vec[10] = vld1q_s32(data + i + 2);
+ tmp_vec[11] = vld1q_s32(data + i + 3);
+ tmp_vec[12] = vld1q_s32(data + i + 4);
+ tmp_vec[13] = vld1q_s32(data + i + 5);
+ tmp_vec[14] = vld1q_s32(data + i + 6);
+ tmp_vec[15] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 3)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ }
+ }
+ else { /* order == 7 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], 0};
+
+ tmp_vec[0] = vld1q_s32(data - 7);
+ tmp_vec[1] = vld1q_s32(data - 6);
+ tmp_vec[2] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[3] = vld1q_s32(data + i - 4);
+ tmp_vec[4] = vld1q_s32(data + i - 3);
+ tmp_vec[5] = vld1q_s32(data + i - 2);
+ tmp_vec[6] = vld1q_s32(data + i - 1);
+ tmp_vec[7] = vld1q_s32(data + i - 0);
+ tmp_vec[8] = vld1q_s32(data + i + 1);
+ tmp_vec[9] = vld1q_s32(data + i + 2);
+ tmp_vec[10] = vld1q_s32(data + i + 3);
+ tmp_vec[11] = vld1q_s32(data + i + 4);
+ tmp_vec[12] = vld1q_s32(data + i + 5);
+ tmp_vec[13] = vld1q_s32(data + i + 6);
+ tmp_vec[14] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 2)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ }
+ }
+ }
+ else {
+ if(order == 6) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 6);
+ tmp_vec[1] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[2] = vld1q_s32(data + i - 4);
+ tmp_vec[3] = vld1q_s32(data + i - 3);
+ tmp_vec[4] = vld1q_s32(data + i - 2);
+ tmp_vec[5] = vld1q_s32(data + i - 1);
+ tmp_vec[6] = vld1q_s32(data + i - 0);
+ tmp_vec[7] = vld1q_s32(data + i + 1);
+ tmp_vec[8] = vld1q_s32(data + i + 2);
+ tmp_vec[9] = vld1q_s32(data + i + 3);
+ tmp_vec[10] = vld1q_s32(data + i + 4);
+ tmp_vec[11] = vld1q_s32(data + i + 5);
+ tmp_vec[12] = vld1q_s32(data + i + 6);
+ tmp_vec[13] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 1)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ }
+ }
+ else { /* order == 5 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], 0, 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+
+ tmp_vec[1] = vld1q_s32(data + i - 4);
+ tmp_vec[2] = vld1q_s32(data + i - 3);
+ tmp_vec[3] = vld1q_s32(data + i - 2);
+ tmp_vec[4] = vld1q_s32(data + i - 1);
+ tmp_vec[5] = vld1q_s32(data + i - 0);
+ tmp_vec[6] = vld1q_s32(data + i + 1);
+ tmp_vec[7] = vld1q_s32(data + i + 2);
+ tmp_vec[8] = vld1q_s32(data + i + 3);
+ tmp_vec[9] = vld1q_s32(data + i + 4);
+ tmp_vec[10] = vld1q_s32(data + i + 5);
+ tmp_vec[11] = vld1q_s32(data + i + 6);
+ tmp_vec[12] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_1, 0)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+
+ tmp_vec[0] = tmp_vec[12];
+ }
+ }
+ }
+ }
+ else {
+ if(order > 2) {
+ if(order == 4) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[0] = vld1q_s32(data + i - 4);
+ tmp_vec[1] = vld1q_s32(data + i - 3);
+ tmp_vec[2] = vld1q_s32(data + i - 2);
+ tmp_vec[3] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i - 0);
+ tmp_vec[5] = vld1q_s32(data + i + 1);
+ tmp_vec[6] = vld1q_s32(data + i + 2);
+ tmp_vec[7] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 4);
+ tmp_vec[9] = vld1q_s32(data + i + 5);
+ tmp_vec[10] = vld1q_s32(data + i + 6);
+ tmp_vec[11] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 3)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+ }
+ }
+ else { /* order == 3 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], 0};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[0] = vld1q_s32(data + i - 3);
+ tmp_vec[1] = vld1q_s32(data + i - 2);
+ tmp_vec[2] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 1);
+ tmp_vec[5] = vld1q_s32(data + i + 2);
+ tmp_vec[6] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 5);
+ tmp_vec[9] = vld1q_s32(data + i + 6);
+ tmp_vec[10] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 2)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+ }
+ }
+ }
+ else {
+ if(order == 2) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], 0, 0};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[0] = vld1q_s32(data + i - 2);
+ tmp_vec[1] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 2);
+ tmp_vec[5] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 6);
+ tmp_vec[9] = vld1q_s32(data + i + 7);
+
+ MUL_32_BIT_LOOP_UNROOL_3(qlp_coeff_0, 1)
+ MACC_32BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 0)
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+ }
+ }
+ else { /* order == 1 */
+ int32x4_t qlp_coeff_0 = vdupq_n_s32(qlp_coeff[0]);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int32x4_t summ_0, summ_1, summ_2;
+ tmp_vec[0] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 7);
+
+ summ_0 = vmulq_s32(tmp_vec[0], qlp_coeff_0);
+ summ_1 = vmulq_s32(tmp_vec[4], qlp_coeff_0);
+ summ_2 = vmulq_s32(tmp_vec[8], qlp_coeff_0);
+
+ vst1q_s32(residual+i + 0, vsubq_s32(vld1q_s32(data+i + 0) , vshlq_s32(summ_0,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 4, vsubq_s32(vld1q_s32(data+i + 4) , vshlq_s32(summ_1,vdupq_n_s32(-lp_quantization))));
+ vst1q_s32(residual+i + 8, vsubq_s32(vld1q_s32(data+i + 8) , vshlq_s32(summ_2,vdupq_n_s32(-lp_quantization))));
+ }
+ }
+ }
+ }
+ for(; i < (int)data_len; i++) {
+ sum = 0;
+ switch(order) {
+ case 12: sum += qlp_coeff[11] * data[i-12]; /* Falls through. */
+ case 11: sum += qlp_coeff[10] * data[i-11]; /* Falls through. */
+ case 10: sum += qlp_coeff[ 9] * data[i-10]; /* Falls through. */
+ case 9: sum += qlp_coeff[ 8] * data[i- 9]; /* Falls through. */
+ case 8: sum += qlp_coeff[ 7] * data[i- 8]; /* Falls through. */
+ case 7: sum += qlp_coeff[ 6] * data[i- 7]; /* Falls through. */
+ case 6: sum += qlp_coeff[ 5] * data[i- 6]; /* Falls through. */
+ case 5: sum += qlp_coeff[ 4] * data[i- 5]; /* Falls through. */
+ case 4: sum += qlp_coeff[ 3] * data[i- 4]; /* Falls through. */
+ case 3: sum += qlp_coeff[ 2] * data[i- 3]; /* Falls through. */
+ case 2: sum += qlp_coeff[ 1] * data[i- 2]; /* Falls through. */
+ case 1: sum += qlp_coeff[ 0] * data[i- 1];
+ }
+ residual[i] = data[i] - (sum >> lp_quantization);
+ }
+ }
+ else { /* order > 12 */
+ for(i = 0; i < (int)data_len; i++) {
+ sum = 0;
+ switch(order) {
+ case 32: sum += qlp_coeff[31] * data[i-32]; /* Falls through. */
+ case 31: sum += qlp_coeff[30] * data[i-31]; /* Falls through. */
+ case 30: sum += qlp_coeff[29] * data[i-30]; /* Falls through. */
+ case 29: sum += qlp_coeff[28] * data[i-29]; /* Falls through. */
+ case 28: sum += qlp_coeff[27] * data[i-28]; /* Falls through. */
+ case 27: sum += qlp_coeff[26] * data[i-27]; /* Falls through. */
+ case 26: sum += qlp_coeff[25] * data[i-26]; /* Falls through. */
+ case 25: sum += qlp_coeff[24] * data[i-25]; /* Falls through. */
+ case 24: sum += qlp_coeff[23] * data[i-24]; /* Falls through. */
+ case 23: sum += qlp_coeff[22] * data[i-23]; /* Falls through. */
+ case 22: sum += qlp_coeff[21] * data[i-22]; /* Falls through. */
+ case 21: sum += qlp_coeff[20] * data[i-21]; /* Falls through. */
+ case 20: sum += qlp_coeff[19] * data[i-20]; /* Falls through. */
+ case 19: sum += qlp_coeff[18] * data[i-19]; /* Falls through. */
+ case 18: sum += qlp_coeff[17] * data[i-18]; /* Falls through. */
+ case 17: sum += qlp_coeff[16] * data[i-17]; /* Falls through. */
+ case 16: sum += qlp_coeff[15] * data[i-16]; /* Falls through. */
+ case 15: sum += qlp_coeff[14] * data[i-15]; /* Falls through. */
+ case 14: sum += qlp_coeff[13] * data[i-14]; /* Falls through. */
+ case 13: sum += qlp_coeff[12] * data[i-13];
+ sum += qlp_coeff[11] * data[i-12];
+ sum += qlp_coeff[10] * data[i-11];
+ sum += qlp_coeff[ 9] * data[i-10];
+ sum += qlp_coeff[ 8] * data[i- 9];
+ sum += qlp_coeff[ 7] * data[i- 8];
+ sum += qlp_coeff[ 6] * data[i- 7];
+ sum += qlp_coeff[ 5] * data[i- 6];
+ sum += qlp_coeff[ 4] * data[i- 5];
+ sum += qlp_coeff[ 3] * data[i- 4];
+ sum += qlp_coeff[ 2] * data[i- 3];
+ sum += qlp_coeff[ 1] * data[i- 2];
+ sum += qlp_coeff[ 0] * data[i- 1];
+ }
+ residual[i] = data[i] - (sum >> lp_quantization);
+ }
+ }
+}
+
+
+
+#define MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_vec, lane) \
+ summ_l_0 = vmull_laneq_s32(vget_low_s32(tmp_vec[0]),qlp_coeff_vec, lane); \
+ summ_h_0 = vmull_high_laneq_s32(tmp_vec[0], qlp_coeff_vec, lane);\
+ summ_l_1 = vmull_laneq_s32(vget_low_s32(tmp_vec[4]),qlp_coeff_vec, lane); \
+ summ_h_1 = vmull_high_laneq_s32(tmp_vec[4], qlp_coeff_vec, lane);\
+ summ_l_2 = vmull_laneq_s32(vget_low_s32(tmp_vec[8]),qlp_coeff_vec, lane);\
+ summ_h_2 = vmull_high_laneq_s32(tmp_vec[8], qlp_coeff_vec, lane);
+
+
+#define MACC_64_BIT_LOOP_UNROOL_3(tmp_vec_ind, qlp_coeff_vec, lane) \
+ summ_l_0 = vmlal_laneq_s32(summ_l_0,vget_low_s32(tmp_vec[tmp_vec_ind]),qlp_coeff_vec, lane); \
+ summ_h_0 = vmlal_high_laneq_s32(summ_h_0, tmp_vec[tmp_vec_ind], qlp_coeff_vec, lane); \
+ summ_l_1 = vmlal_laneq_s32(summ_l_1, vget_low_s32(tmp_vec[tmp_vec_ind+4]),qlp_coeff_vec, lane); \
+ summ_h_1 = vmlal_high_laneq_s32(summ_h_1, tmp_vec[tmp_vec_ind+4], qlp_coeff_vec, lane); \
+ summ_l_2 = vmlal_laneq_s32(summ_l_2, vget_low_s32(tmp_vec[tmp_vec_ind+8]),qlp_coeff_vec, lane);\
+ summ_h_2 = vmlal_high_laneq_s32(summ_h_2,tmp_vec[tmp_vec_ind+8], qlp_coeff_vec, lane);
+
+#define SHIFT_SUMS_64BITS_AND_STORE_SUB() \
+ res0 = vuzp1q_s32(vreinterpretq_s32_s64(vshlq_s64(summ_l_0,lp_quantization_vec)), vreinterpretq_s32_s64(vshlq_s64(summ_h_0,lp_quantization_vec))); \
+ res1 = vuzp1q_s32(vreinterpretq_s32_s64(vshlq_s64(summ_l_1,lp_quantization_vec)), vreinterpretq_s32_s64(vshlq_s64(summ_h_1,lp_quantization_vec))); \
+ res2 = vuzp1q_s32(vreinterpretq_s32_s64(vshlq_s64(summ_l_2,lp_quantization_vec)), vreinterpretq_s32_s64(vshlq_s64(summ_h_2,lp_quantization_vec))); \
+ vst1q_s32(residual+i+0, vsubq_s32(vld1q_s32(data+i+0), res0));\
+ vst1q_s32(residual+i+4, vsubq_s32(vld1q_s32(data+i+4), res1));\
+ vst1q_s32(residual+i+8, vsubq_s32(vld1q_s32(data+i+8), res2));
+
+void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_neon(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]) {
+ int i;
+ FLAC__int64 sum;
+
+ int32x4_t tmp_vec[20];
+ int32x4_t res0, res1, res2;
+ int64x2_t lp_quantization_vec = vdupq_n_s64(-lp_quantization);
+
+ FLAC__ASSERT(order > 0);
+ FLAC__ASSERT(order <= 32);
+
+ // Using prologue reads is valid as encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_64bit(signal+order,....)
+ if(order <= 12) {
+ if(order > 8) {
+ if(order > 10) {
+ if(order == 12) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4],qlp_coeff[5],qlp_coeff[6],qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8],qlp_coeff[9],qlp_coeff[10],qlp_coeff[11]};
+
+ tmp_vec[0] = vld1q_s32(data - 12);
+ tmp_vec[1] = vld1q_s32(data - 11);
+ tmp_vec[2] = vld1q_s32(data - 10);
+ tmp_vec[3] = vld1q_s32(data - 9);
+ tmp_vec[4] = vld1q_s32(data - 8);
+ tmp_vec[5] = vld1q_s32(data - 7);
+ tmp_vec[6] = vld1q_s32(data - 6);
+ tmp_vec[7] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[8] = vld1q_s32(data+i-4);
+ tmp_vec[9] = vld1q_s32(data+i-3);
+ tmp_vec[10] = vld1q_s32(data+i-2);
+ tmp_vec[11] = vld1q_s32(data+i-1);
+ tmp_vec[12] = vld1q_s32(data+i);
+ tmp_vec[13] = vld1q_s32(data+i+1);
+ tmp_vec[14] = vld1q_s32(data+i+2);
+ tmp_vec[15] = vld1q_s32(data+i+3);
+ tmp_vec[16] = vld1q_s32(data + i + 4);
+ tmp_vec[17] = vld1q_s32(data + i + 5);
+ tmp_vec[18] = vld1q_s32(data + i + 6);
+ tmp_vec[19] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_2, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(10,qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(11,qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ tmp_vec[6] = tmp_vec[18];
+ tmp_vec[7] = tmp_vec[19];
+ }
+ }
+ else { /* order == 11 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4],qlp_coeff[5],qlp_coeff[6],qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8],qlp_coeff[9],qlp_coeff[10],0};
+
+ tmp_vec[0] = vld1q_s32(data - 11);
+ tmp_vec[1] = vld1q_s32(data - 10);
+ tmp_vec[2] = vld1q_s32(data - 9);
+ tmp_vec[3] = vld1q_s32(data - 8);
+ tmp_vec[4] = vld1q_s32(data - 7);
+ tmp_vec[5] = vld1q_s32(data - 6);
+ tmp_vec[6] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[7] = vld1q_s32(data+i-4);
+ tmp_vec[8] = vld1q_s32(data+i-3);
+ tmp_vec[9] = vld1q_s32(data+i-2);
+ tmp_vec[10] = vld1q_s32(data+i-1);
+ tmp_vec[11] = vld1q_s32(data+i);
+ tmp_vec[12] = vld1q_s32(data+i+1);
+ tmp_vec[13] = vld1q_s32(data+i+2);
+ tmp_vec[14] = vld1q_s32(data+i+3);
+ tmp_vec[15] = vld1q_s32(data + i + 4);
+ tmp_vec[16] = vld1q_s32(data + i + 5);
+ tmp_vec[17] = vld1q_s32(data + i + 6);
+ tmp_vec[18] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_2, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(10,qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ tmp_vec[6] = tmp_vec[18];
+ }
+ }
+ }
+ else
+ {
+ if (order == 10) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], qlp_coeff[9], 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 10);
+ tmp_vec[1] = vld1q_s32(data - 9);
+ tmp_vec[2] = vld1q_s32(data - 8);
+ tmp_vec[3] = vld1q_s32(data - 7);
+ tmp_vec[4] = vld1q_s32(data - 6);
+ tmp_vec[5] = vld1q_s32(data - 5);
+
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[6] = vld1q_s32(data + i - 4);
+ tmp_vec[7] = vld1q_s32(data + i - 3);
+ tmp_vec[8] = vld1q_s32(data + i - 2);
+ tmp_vec[9] = vld1q_s32(data + i - 1);
+ tmp_vec[10] = vld1q_s32(data + i - 0);
+ tmp_vec[11] = vld1q_s32(data + i + 1);
+ tmp_vec[12] = vld1q_s32(data + i + 2);
+ tmp_vec[13] = vld1q_s32(data + i + 3);
+ tmp_vec[14] = vld1q_s32(data + i + 4);
+ tmp_vec[15] = vld1q_s32(data + i + 5);
+ tmp_vec[16] = vld1q_s32(data + i + 6);
+ tmp_vec[17] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_2, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(9, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ tmp_vec[5] = tmp_vec[17];
+ }
+ }
+
+ else /* order == 9 */ {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+ int32x4_t qlp_coeff_2 = {qlp_coeff[8], 0, 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 9);
+ tmp_vec[1] = vld1q_s32(data - 8);
+ tmp_vec[2] = vld1q_s32(data - 7);
+ tmp_vec[3] = vld1q_s32(data - 6);
+ tmp_vec[4] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[5] = vld1q_s32(data + i - 4);
+ tmp_vec[6] = vld1q_s32(data + i - 3);
+ tmp_vec[7] = vld1q_s32(data + i - 2);
+ tmp_vec[8] = vld1q_s32(data + i - 1);
+ tmp_vec[9] = vld1q_s32(data + i - 0);
+ tmp_vec[10] = vld1q_s32(data + i + 1);
+ tmp_vec[11] = vld1q_s32(data + i + 2);
+ tmp_vec[12] = vld1q_s32(data + i + 3);
+ tmp_vec[13] = vld1q_s32(data + i + 4);
+ tmp_vec[14] = vld1q_s32(data + i + 5);
+ tmp_vec[15] = vld1q_s32(data + i + 6);
+ tmp_vec[16] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_2, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(8, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ tmp_vec[4] = tmp_vec[16];
+ }
+ }
+ }
+ }
+ else if (order > 4)
+ {
+ if (order > 6)
+ {
+ if (order == 8)
+ {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], qlp_coeff[7]};
+
+ tmp_vec[0] = vld1q_s32(data - 8);
+ tmp_vec[1] = vld1q_s32(data - 7);
+ tmp_vec[2] = vld1q_s32(data - 6);
+ tmp_vec[3] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[4] = vld1q_s32(data + i - 4);
+ tmp_vec[5] = vld1q_s32(data + i - 3);
+ tmp_vec[6] = vld1q_s32(data + i - 2);
+ tmp_vec[7] = vld1q_s32(data + i - 1);
+ tmp_vec[8] = vld1q_s32(data + i - 0);
+ tmp_vec[9] = vld1q_s32(data + i + 1);
+ tmp_vec[10] = vld1q_s32(data + i + 2);
+ tmp_vec[11] = vld1q_s32(data + i + 3);
+ tmp_vec[12] = vld1q_s32(data + i + 4);
+ tmp_vec[13] = vld1q_s32(data + i + 5);
+ tmp_vec[14] = vld1q_s32(data + i + 6);
+ tmp_vec[15] = vld1q_s32(data + i + 7);
+
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(7, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ tmp_vec[3] = tmp_vec[15];
+ }
+ }
+ else /* order == 7 */
+ {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], qlp_coeff[6], 0};
+
+ tmp_vec[0] = vld1q_s32(data - 7);
+ tmp_vec[1] = vld1q_s32(data - 6);
+ tmp_vec[2] = vld1q_s32(data - 5);
+
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[3] = vld1q_s32(data +i - 4);
+ tmp_vec[4] = vld1q_s32(data + i - 3);
+ tmp_vec[5] = vld1q_s32(data + i - 2);
+ tmp_vec[6] = vld1q_s32(data + i - 1);
+ tmp_vec[7] = vld1q_s32(data + i - 0);
+ tmp_vec[8] = vld1q_s32(data + i + 1);
+ tmp_vec[9] = vld1q_s32(data + i + 2);
+ tmp_vec[10] = vld1q_s32(data + i + 3);
+ tmp_vec[11] = vld1q_s32(data + i + 4);
+ tmp_vec[12] = vld1q_s32(data + i + 5);
+ tmp_vec[13] = vld1q_s32(data + i + 6);
+ tmp_vec[14] = vld1q_s32(data + i + 7);
+
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(6, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ tmp_vec[2] = tmp_vec[14];
+ }
+ }
+ }
+ else
+ {
+ if (order == 6) {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], qlp_coeff[5], 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 6);
+ tmp_vec[1] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+
+ tmp_vec[2] = vld1q_s32(data + i - 4);
+ tmp_vec[3] = vld1q_s32(data + i - 3);
+ tmp_vec[4] = vld1q_s32(data + i - 2);
+ tmp_vec[5] = vld1q_s32(data + i - 1);
+ tmp_vec[6] = vld1q_s32(data + i - 0);
+ tmp_vec[7] = vld1q_s32(data + i + 1);
+ tmp_vec[8] = vld1q_s32(data + i + 2);
+ tmp_vec[9] = vld1q_s32(data + i + 3);
+ tmp_vec[10] = vld1q_s32(data + i + 4);
+ tmp_vec[11] = vld1q_s32(data + i + 5);
+ tmp_vec[12] = vld1q_s32(data + i + 6);
+ tmp_vec[13] = vld1q_s32(data + i + 7);
+
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(5, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ tmp_vec[1] = tmp_vec[13];
+ }
+ }
+
+ else
+ { /* order == 5 */
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+ int32x4_t qlp_coeff_1 = {qlp_coeff[4], 0, 0, 0};
+
+ tmp_vec[0] = vld1q_s32(data - 5);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[1] = vld1q_s32(data + i - 4);
+ tmp_vec[2] = vld1q_s32(data + i - 3);
+ tmp_vec[3] = vld1q_s32(data + i - 2);
+ tmp_vec[4] = vld1q_s32(data + i - 1);
+ tmp_vec[5] = vld1q_s32(data + i - 0);
+ tmp_vec[6] = vld1q_s32(data + i + 1);
+ tmp_vec[7] = vld1q_s32(data + i + 2);
+ tmp_vec[8] = vld1q_s32(data + i + 3);
+ tmp_vec[9] = vld1q_s32(data + i + 4);
+ tmp_vec[10] = vld1q_s32(data + i + 5);
+ tmp_vec[11] = vld1q_s32(data + i + 6);
+ tmp_vec[12] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_1, 0)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(4, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+
+ tmp_vec[0] = tmp_vec[12];
+ }
+ }
+ }
+ }
+ else
+ {
+ if (order > 2)
+ {
+ if (order == 4)
+ {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], qlp_coeff[3]};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[0] = vld1q_s32(data + i - 4);
+ tmp_vec[1] = vld1q_s32(data + i - 3);
+ tmp_vec[2] = vld1q_s32(data + i - 2);
+ tmp_vec[3] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i - 0);
+ tmp_vec[5] = vld1q_s32(data + i + 1);
+ tmp_vec[6] = vld1q_s32(data + i + 2);
+ tmp_vec[7] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 4);
+ tmp_vec[9] = vld1q_s32(data + i + 5);
+ tmp_vec[10] = vld1q_s32(data + i + 6);
+ tmp_vec[11] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 3)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(3, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+ }
+ }
+ else
+ { /* order == 3 */
+
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], qlp_coeff[2], 0};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[0] = vld1q_s32(data + i - 3);
+ tmp_vec[1] = vld1q_s32(data + i - 2);
+ tmp_vec[2] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 1);
+ tmp_vec[5] = vld1q_s32(data + i + 2);
+ tmp_vec[6] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 5);
+ tmp_vec[9] = vld1q_s32(data + i + 6);
+ tmp_vec[10] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 2)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(2, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+ }
+ }
+ }
+ else
+ {
+ if (order == 2)
+ {
+ int32x4_t qlp_coeff_0 = {qlp_coeff[0], qlp_coeff[1], 0, 0};
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[0] = vld1q_s32(data + i - 2);
+ tmp_vec[1] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 2);
+ tmp_vec[5] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 6);
+ tmp_vec[9] = vld1q_s32(data + i + 7);
+
+ MUL_64_BIT_LOOP_UNROOL_3(qlp_coeff_0, 1)
+ MACC_64_BIT_LOOP_UNROOL_3(1, qlp_coeff_0, 0)
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+ }
+ }
+
+ else
+ { /* order == 1 */
+
+ int32x2_t qlp_coeff_0_2 = vdup_n_s32(qlp_coeff[0]);
+ int32x4_t qlp_coeff_0_4 = vdupq_n_s32(qlp_coeff[0]);
+
+ for (i = 0; i < (int)data_len - 11; i += 12)
+ {
+ int64x2_t summ_l_0, summ_h_0, summ_l_1, summ_h_1, summ_l_2, summ_h_2;
+ tmp_vec[0] = vld1q_s32(data + i - 1);
+ tmp_vec[4] = vld1q_s32(data + i + 3);
+ tmp_vec[8] = vld1q_s32(data + i + 7);
+
+ summ_l_0 = vmull_s32(vget_low_s32(tmp_vec[0]), qlp_coeff_0_2);
+ summ_h_0 = vmull_high_s32(tmp_vec[0], qlp_coeff_0_4);
+
+ summ_l_1 = vmull_s32(vget_low_s32(tmp_vec[4]), qlp_coeff_0_2);
+ summ_h_1 = vmull_high_s32(tmp_vec[4], qlp_coeff_0_4);
+
+ summ_l_2 = vmull_s32(vget_low_s32(tmp_vec[8]), qlp_coeff_0_2);
+ summ_h_2 = vmull_high_s32(tmp_vec[8], qlp_coeff_0_4);
+
+ SHIFT_SUMS_64BITS_AND_STORE_SUB()
+ }
+ }
+ }
+ }
+ for (; i < (int)data_len; i++)
+ {
+ sum = 0;
+ switch (order)
+ {
+ case 12:
+ sum += qlp_coeff[11] * (FLAC__int64)data[i - 12]; /* Falls through. */
+ case 11:
+ sum += qlp_coeff[10] * (FLAC__int64)data[i - 11]; /* Falls through. */
+ case 10:
+ sum += qlp_coeff[9] * (FLAC__int64)data[i - 10]; /* Falls through. */
+ case 9:
+ sum += qlp_coeff[8] * (FLAC__int64)data[i - 9]; /* Falls through. */
+ case 8:
+ sum += qlp_coeff[7] * (FLAC__int64)data[i - 8]; /* Falls through. */
+ case 7:
+ sum += qlp_coeff[6] * (FLAC__int64)data[i - 7]; /* Falls through. */
+ case 6:
+ sum += qlp_coeff[5] * (FLAC__int64)data[i - 6]; /* Falls through. */
+ case 5:
+ sum += qlp_coeff[4] * (FLAC__int64)data[i - 5]; /* Falls through. */
+ case 4:
+ sum += qlp_coeff[3] * (FLAC__int64)data[i - 4]; /* Falls through. */
+ case 3:
+ sum += qlp_coeff[2] * (FLAC__int64)data[i - 3]; /* Falls through. */
+ case 2:
+ sum += qlp_coeff[1] * (FLAC__int64)data[i - 2]; /* Falls through. */
+ case 1:
+ sum += qlp_coeff[0] * (FLAC__int64)data[i - 1];
+ }
+ residual[i] = data[i] - (sum >> lp_quantization);
+ }
+ }
+ else
+ { /* order > 12 */
+ for (i = 0; i < (int)data_len; i++)
+ {
+ sum = 0;
+ switch (order)
+ {
+ case 32:
+ sum += qlp_coeff[31] * (FLAC__int64)data[i - 32]; /* Falls through. */
+ case 31:
+ sum += qlp_coeff[30] * (FLAC__int64)data[i - 31]; /* Falls through. */
+ case 30:
+ sum += qlp_coeff[29] * (FLAC__int64)data[i - 30]; /* Falls through. */
+ case 29:
+ sum += qlp_coeff[28] * (FLAC__int64)data[i - 29]; /* Falls through. */
+ case 28:
+ sum += qlp_coeff[27] * (FLAC__int64)data[i - 28]; /* Falls through. */
+ case 27:
+ sum += qlp_coeff[26] * (FLAC__int64)data[i - 27]; /* Falls through. */
+ case 26:
+ sum += qlp_coeff[25] * (FLAC__int64)data[i - 26]; /* Falls through. */
+ case 25:
+ sum += qlp_coeff[24] * (FLAC__int64)data[i - 25]; /* Falls through. */
+ case 24:
+ sum += qlp_coeff[23] * (FLAC__int64)data[i - 24]; /* Falls through. */
+ case 23:
+ sum += qlp_coeff[22] * (FLAC__int64)data[i - 23]; /* Falls through. */
+ case 22:
+ sum += qlp_coeff[21] * (FLAC__int64)data[i - 22]; /* Falls through. */
+ case 21:
+ sum += qlp_coeff[20] * (FLAC__int64)data[i - 21]; /* Falls through. */
+ case 20:
+ sum += qlp_coeff[19] * (FLAC__int64)data[i - 20]; /* Falls through. */
+ case 19:
+ sum += qlp_coeff[18] * (FLAC__int64)data[i - 19]; /* Falls through. */
+ case 18:
+ sum += qlp_coeff[17] * (FLAC__int64)data[i - 18]; /* Falls through. */
+ case 17:
+ sum += qlp_coeff[16] * (FLAC__int64)data[i - 17]; /* Falls through. */
+ case 16:
+ sum += qlp_coeff[15] * (FLAC__int64)data[i - 16]; /* Falls through. */
+ case 15:
+ sum += qlp_coeff[14] * (FLAC__int64)data[i - 15]; /* Falls through. */
+ case 14:
+ sum += qlp_coeff[13] * (FLAC__int64)data[i - 14]; /* Falls through. */
+ case 13:
+ sum += qlp_coeff[12] * (FLAC__int64)data[i - 13];
+ sum += qlp_coeff[11] * (FLAC__int64)data[i - 12];
+ sum += qlp_coeff[10] * (FLAC__int64)data[i - 11];
+ sum += qlp_coeff[9] * (FLAC__int64)data[i - 10];
+ sum += qlp_coeff[8] * (FLAC__int64)data[i - 9];
+ sum += qlp_coeff[7] * (FLAC__int64)data[i - 8];
+ sum += qlp_coeff[6] * (FLAC__int64)data[i - 7];
+ sum += qlp_coeff[5] * (FLAC__int64)data[i - 6];
+ sum += qlp_coeff[4] * (FLAC__int64)data[i - 5];
+ sum += qlp_coeff[3] * (FLAC__int64)data[i - 4];
+ sum += qlp_coeff[2] * (FLAC__int64)data[i - 3];
+ sum += qlp_coeff[1] * (FLAC__int64)data[i - 2];
+ sum += qlp_coeff[0] * (FLAC__int64)data[i - 1];
+ }
+ residual[i] = data[i] - (sum >> lp_quantization);
+ }
+ }
+
+ return;
+}
+
+#endif /* FLAC__CPU_ARM64 && FLAC__HAS_ARCH64INTRIN */
+#endif /* FLAC__NO_ASM */
+#endif /* FLAC__INTEGER_ONLY_LIBRARY */
diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c
index 811250a8..38b19486 100644
--- a/src/libFLAC/stream_encoder.c
+++ b/src/libFLAC/stream_encoder.c
@@ -1003,8 +1003,16 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_(
# endif
# endif /* FLAC__HAS_X86INTRIN */
# endif /* FLAC__CPU_... */
+
+ #if defined FLAC__CPU_ARM64
+ encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_16bit = FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon;
+ encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_neon;
+ encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_64bit = FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_neon;
+ # endif
+
}
# endif /* !FLAC__NO_ASM */
+
#endif /* !FLAC__INTEGER_ONLY_LIBRARY */
#if !defined FLAC__NO_ASM && FLAC__HAS_X86INTRIN
if(encoder->private_->cpuinfo.use_asm) {