summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/libFLAC/CMakeLists.txt1
-rw-r--r--src/libFLAC/Makefile.am1
-rw-r--r--src/libFLAC/Makefile.lite1
-rw-r--r--src/libFLAC/ia32/lpc_asm-unrolled.nasm785
-rw-r--r--src/libFLAC/ia32/lpc_asm.nasm667
-rw-r--r--src/libFLAC/include/private/lpc.h40
-rw-r--r--src/libFLAC/libFLAC_dynamic.vcproj4
-rw-r--r--src/libFLAC/libFLAC_dynamic.vcxproj1
-rw-r--r--src/libFLAC/libFLAC_dynamic.vcxproj.filters5
-rw-r--r--src/libFLAC/libFLAC_static.vcproj4
-rw-r--r--src/libFLAC/libFLAC_static.vcxproj1
-rw-r--r--src/libFLAC/libFLAC_static.vcxproj.filters5
-rw-r--r--src/libFLAC/lpc.c10
-rw-r--r--src/libFLAC/lpc_intrin_sse.c454
-rw-r--r--src/libFLAC/lpc_intrin_sse2.c166
-rw-r--r--src/libFLAC/lpc_intrin_vsx.c1198
-rw-r--r--src/libFLAC/stream_encoder.c101
17 files changed, 682 insertions, 2762 deletions
diff --git a/src/libFLAC/CMakeLists.txt b/src/libFLAC/CMakeLists.txt
index 6fffeb42..5cbd0fd4 100644
--- a/src/libFLAC/CMakeLists.txt
+++ b/src/libFLAC/CMakeLists.txt
@@ -58,7 +58,6 @@ add_library(FLAC
float.c
format.c
lpc.c
- lpc_intrin_sse.c
lpc_intrin_sse2.c
lpc_intrin_sse41.c
lpc_intrin_avx2.c
diff --git a/src/libFLAC/Makefile.am b/src/libFLAC/Makefile.am
index 468939d5..fbba34ed 100644
--- a/src/libFLAC/Makefile.am
+++ b/src/libFLAC/Makefile.am
@@ -111,7 +111,6 @@ libFLAC_sources = \
float.c \
format.c \
lpc.c \
- lpc_intrin_sse.c \
lpc_intrin_sse2.c \
lpc_intrin_sse41.c \
lpc_intrin_avx2.c \
diff --git a/src/libFLAC/Makefile.lite b/src/libFLAC/Makefile.lite
index aa3e1759..b6b29ca4 100644
--- a/src/libFLAC/Makefile.lite
+++ b/src/libFLAC/Makefile.lite
@@ -87,7 +87,6 @@ SRCS_C = \
float.c \
format.c \
lpc.c \
- lpc_intrin_sse.c \
lpc_intrin_sse2.c \
lpc_intrin_sse41.c \
lpc_intrin_avx2.c \
diff --git a/src/libFLAC/ia32/lpc_asm-unrolled.nasm b/src/libFLAC/ia32/lpc_asm-unrolled.nasm
deleted file mode 100644
index 02c0363c..00000000
--- a/src/libFLAC/ia32/lpc_asm-unrolled.nasm
+++ /dev/null
@@ -1,785 +0,0 @@
-; vim:filetype=nasm ts=8
-
-; libFLAC - Free Lossless Audio Codec library
-; Copyright (C) 2001-2009 Josh Coalson
-; Copyright (C) 2011-2016 Xiph.Org Foundation
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; - Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-;
-; - Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; - Neither the name of the Xiph.org Foundation nor the names of its
-; contributors may be used to endorse or promote products derived from
-; this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
-; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; [CR] is a note to flag that the instruction can be easily reordered
-
-%include "nasm.h"
-
- data_section
-
-cglobal FLAC__lpc_compute_autocorrelation_asm
-
- code_section
-
-; **********************************************************************
-;
-; void FLAC__lpc_compute_autocorrelation_asm(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
-; {
-; FLAC__real d;
-; unsigned sample, coeff;
-; const unsigned limit = data_len - lag;
-;
-; assert(lag > 0);
-; assert(lag <= data_len);
-;
-; for(coeff = 0; coeff < lag; coeff++)
-; autoc[coeff] = 0.0;
-; for(sample = 0; sample <= limit; sample++){
-; d = data[sample];
-; for(coeff = 0; coeff < lag; coeff++)
-; autoc[coeff] += d * data[sample+coeff];
-; }
-; for(; sample < data_len; sample++){
-; d = data[sample];
-; for(coeff = 0; coeff < data_len - sample; coeff++)
-; autoc[coeff] += d * data[sample+coeff];
-; }
-; }
-;
-FLAC__lpc_compute_autocorrelation_asm:
-
- push ebp
- lea ebp, [esp + 8]
- push ebx
- push esi
- push edi
-
- mov edx, [ebp + 8] ; edx == lag
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
-
- cmp edx, 1
- ja short .lag_above_1
-.lag_eq_1:
- fldz ; will accumulate autoc[0]
- ALIGN 16
-.lag_1_loop:
- fld dword [esi]
- add esi, byte 4 ; sample++
- fmul st0, st0
- faddp st1, st0
- dec ecx
- jnz .lag_1_loop
- fstp dword [edi]
- jmp .end
-
-.lag_above_1:
- cmp edx, 2
- ja short .lag_above_2
-.lag_eq_2:
- fldz ; will accumulate autoc[1]
- dec ecx
- fldz ; will accumulate autoc[0]
- fld dword [esi]
- ALIGN 16
-.lag_2_loop:
- add esi, byte 4 ; [CR] sample++
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi]
- fmul st1, st0
- fxch
- faddp st3, st0 ; add to autoc[1]
- dec ecx
- jnz .lag_2_loop
- ; clean up the leftovers
- fmul st0, st0
- faddp st1, st0 ; add to autoc[0]
- fstp dword [edi]
- fstp dword [edi + 4]
- jmp .end
-
-.lag_above_2:
- cmp edx, 3
- ja short .lag_above_3
-.lag_eq_3:
- fldz ; will accumulate autoc[2]
- dec ecx
- fldz ; will accumulate autoc[1]
- dec ecx
- fldz ; will accumulate autoc[0]
- ALIGN 16
-.lag_3_loop:
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[2]
- dec ecx
- jnz .lag_3_loop
- ; clean up the leftovers
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st1, st0
- fxch
- faddp st3, st0 ; add to autoc[1]
- fmul st0, st0
- faddp st1, st0 ; add to autoc[0]
- fstp dword [edi]
- fstp dword [edi + 4]
- fstp dword [edi + 8]
- jmp .end
-
-.lag_above_3:
- cmp edx, 4
- ja near .lag_above_4
-.lag_eq_4:
- fldz ; will accumulate autoc[3]
- dec ecx
- fldz ; will accumulate autoc[2]
- dec ecx
- fldz ; will accumulate autoc[1]
- dec ecx
- fldz ; will accumulate autoc[0]
- ALIGN 16
-.lag_4_loop:
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[3]
- dec ecx
- jnz .lag_4_loop
- ; clean up the leftovers
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[2]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st1, st0
- fxch
- faddp st3, st0 ; add to autoc[1]
- fmul st0, st0
- faddp st1, st0 ; add to autoc[0]
- fstp dword [edi]
- fstp dword [edi + 4]
- fstp dword [edi + 8]
- fstp dword [edi + 12]
- jmp .end
-
-.lag_above_4:
- cmp edx, 5
- ja near .lag_above_5
-.lag_eq_5:
- fldz ; will accumulate autoc[4]
- fldz ; will accumulate autoc[3]
- fldz ; will accumulate autoc[2]
- fldz ; will accumulate autoc[1]
- fldz ; will accumulate autoc[0]
- sub ecx, byte 4
- ALIGN 16
-.lag_5_loop:
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[3]
- fld dword [esi + 16]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st5, st0 ; add to autoc[4]
- dec ecx
- jnz .lag_5_loop
- ; clean up the leftovers
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[3]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[2]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st1, st0
- fxch
- faddp st3, st0 ; add to autoc[1]
- fmul st0, st0
- faddp st1, st0 ; add to autoc[0]
- fstp dword [edi]
- fstp dword [edi + 4]
- fstp dword [edi + 8]
- fstp dword [edi + 12]
- fstp dword [edi + 16]
- jmp .end
-
-.lag_above_5:
- cmp edx, 6
- ja .lag_above_6
-.lag_eq_6:
- fldz ; will accumulate autoc[5]
- fldz ; will accumulate autoc[4]
- fldz ; will accumulate autoc[3]
- fldz ; will accumulate autoc[2]
- fldz ; will accumulate autoc[1]
- fldz ; will accumulate autoc[0]
- sub ecx, byte 5
- ALIGN 16
-.lag_6_loop:
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[3]
- fld dword [esi + 16]
- fmul st0, st1
- faddp st6, st0 ; add to autoc[4]
- fld dword [esi + 20]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st6, st0 ; add to autoc[5]
- dec ecx
- jnz .lag_6_loop
- ; clean up the leftovers
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[3]
- fld dword [esi + 16]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st5, st0 ; add to autoc[4]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[2]
- fld dword [esi + 12]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[3]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[1]
- fld dword [esi + 8]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[2]
- fld dword [esi]
- fld st0
- fmul st0, st0
- faddp st2, st0 ; add to autoc[0]
- fld dword [esi + 4]
- fmul st1, st0
- fxch
- faddp st3, st0 ; add to autoc[1]
- fmul st0, st0
- faddp st1, st0 ; add to autoc[0]
- fstp dword [edi]
- fstp dword [edi + 4]
- fstp dword [edi + 8]
- fstp dword [edi + 12]
- fstp dword [edi + 16]
- fstp dword [edi + 20]
- jmp .end
-
-.lag_above_6:
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- lea ecx, [edx * 2] ; ecx = # of dwords of 0 to write
- xor eax, eax
- rep stosd
- mov ecx, [ebp + 4] ; ecx == data_len
- mov edi, [ebp + 12] ; edi == autoc
- ; const unsigned limit = data_len - lag;
- sub ecx, edx
- inc ecx ; we are looping <= limit so we add one to the counter
- ; for(sample = 0; sample <= limit; sample++){
- ; d = data[sample];
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] += d * data[sample+coeff];
- ; }
- xor eax, eax ; eax == sample <- 0
- ALIGN 16
-.outer_loop:
- push eax ; save sample
- fld dword [esi + eax * 4] ; ST = d <- data[sample]
- mov ebx, eax ; ebx == sample+coeff <- sample
- mov edx, [ebp + 8] ; edx <- lag
- xor eax, eax ; eax == coeff <- 0
- ALIGN 16
-.inner_loop:
- fld st0 ; ST = d d
- fmul dword [esi + ebx * 4] ; ST = d*data[sample+coeff] d
- fadd dword [edi + eax * 4] ; ST = autoc[coeff]+d*data[sample+coeff] d
- fstp dword [edi + eax * 4] ; autoc[coeff]+=d*data[sample+coeff] ST = d
- inc ebx ; (sample+coeff)++
- inc eax ; coeff++
- dec edx
- jnz .inner_loop
- pop eax ; restore sample
- fstp st0 ; pop d, ST = empty
- inc eax ; sample++
- loop .outer_loop
- ; for(; sample < data_len; sample++){
- ; d = data[sample];
- ; for(coeff = 0; coeff < data_len - sample; coeff++)
- ; autoc[coeff] += d * data[sample+coeff];
- ; }
- mov ecx, [ebp + 8] ; ecx <- lag
- dec ecx ; ecx <- lag - 1
- jz .outer_end ; skip loop if 0
-.outer_loop2:
- push eax ; save sample
- fld dword [esi + eax * 4] ; ST = d <- data[sample]
- mov ebx, eax ; ebx == sample+coeff <- sample
- mov edx, [ebp + 4] ; edx <- data_len
- sub edx, eax ; edx <- data_len-sample
- xor eax, eax ; eax == coeff <- 0
-.inner_loop2:
- fld st0 ; ST = d d
- fmul dword [esi + ebx * 4] ; ST = d*data[sample+coeff] d
- fadd dword [edi + eax * 4] ; ST = autoc[coeff]+d*data[sample+coeff] d
- fstp dword [edi + eax * 4] ; autoc[coeff]+=d*data[sample+coeff] ST = d
- inc ebx ; (sample+coeff)++
- inc eax ; coeff++
- dec edx
- jnz .inner_loop2
- pop eax ; restore sample
- fstp st0 ; pop d, ST = empty
- inc eax ; sample++
- loop .outer_loop2
-.outer_end:
- jmp .end
-
-.lag_eq_6_plus_1:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[6]
- sub ecx, byte 6
- ALIGN 16
-.lag_6_1_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st1, st0 ; add to autoc[6]
- dec ecx
- jnz .lag_6_1_loop
- fstp dword [edi + 24]
- jmp .end
-
-.lag_eq_6_plus_2:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[7]
- fldz ; will accumulate autoc[6]
- sub ecx, byte 7
- ALIGN 16
-.lag_6_2_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st2, st0 ; add to autoc[7]
- dec ecx
- jnz .lag_6_2_loop
- ; clean up the leftovers
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- faddp st1, st0 ; add to autoc[6]
- fstp dword [edi + 24]
- fstp dword [edi + 28]
- jmp .end
-
-.lag_eq_6_plus_3:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[8]
- fldz ; will accumulate autoc[7]
- fldz ; will accumulate autoc[6]
- sub ecx, byte 8
- ALIGN 16
-.lag_6_3_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[8]
- dec ecx
- jnz .lag_6_3_loop
- ; clean up the leftovers
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st2, st0 ; add to autoc[7]
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- faddp st1, st0 ; add to autoc[6]
- fstp dword [edi + 24]
- fstp dword [edi + 28]
- fstp dword [edi + 32]
- jmp .end
-
-.lag_eq_6_plus_4:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[9]
- fldz ; will accumulate autoc[8]
- fldz ; will accumulate autoc[7]
- fldz ; will accumulate autoc[6]
- sub ecx, byte 9
- ALIGN 16
-.lag_6_4_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[9]
- dec ecx
- jnz .lag_6_4_loop
- ; clean up the leftovers
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[8]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st2, st0 ; add to autoc[7]
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- faddp st1, st0 ; add to autoc[6]
- fstp dword [edi + 24]
- fstp dword [edi + 28]
- fstp dword [edi + 32]
- fstp dword [edi + 36]
- jmp .end
-
-.lag_eq_6_plus_5:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[10]
- fldz ; will accumulate autoc[9]
- fldz ; will accumulate autoc[8]
- fldz ; will accumulate autoc[7]
- fldz ; will accumulate autoc[6]
- sub ecx, byte 10
- ALIGN 16
-.lag_6_5_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[9]
- fld dword [esi + 40]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st5, st0 ; add to autoc[10]
- dec ecx
- jnz .lag_6_5_loop
- ; clean up the leftovers
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[9]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[8]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st2, st0 ; add to autoc[7]
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- faddp st1, st0 ; add to autoc[6]
- fstp dword [edi + 24]
- fstp dword [edi + 28]
- fstp dword [edi + 32]
- fstp dword [edi + 36]
- fstp dword [edi + 40]
- jmp .end
-
-.lag_eq_6_plus_6:
- mov ecx, [ebp + 4] ; ecx == data_len
- mov esi, [ebp] ; esi == data
- mov edi, [ebp + 12] ; edi == autoc
- fldz ; will accumulate autoc[11]
- fldz ; will accumulate autoc[10]
- fldz ; will accumulate autoc[9]
- fldz ; will accumulate autoc[8]
- fldz ; will accumulate autoc[7]
- fldz ; will accumulate autoc[6]
- sub ecx, byte 11
- ALIGN 16
-.lag_6_6_loop:
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[9]
- fld dword [esi + 40]
- fmul st0, st1
- faddp st6, st0 ; add to autoc[10]
- fld dword [esi + 44]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st6, st0 ; add to autoc[11]
- dec ecx
- jnz .lag_6_6_loop
- ; clean up the leftovers
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmul st0, st1
- faddp st5, st0 ; add to autoc[9]
- fld dword [esi + 40]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st5, st0 ; add to autoc[10]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmul st0, st1
- faddp st4, st0 ; add to autoc[8]
- fld dword [esi + 36]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st4, st0 ; add to autoc[9]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmul st0, st1
- faddp st3, st0 ; add to autoc[7]
- fld dword [esi + 32]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st3, st0 ; add to autoc[8]
- fld dword [esi]
- fld dword [esi + 24]
- fmul st0, st1
- faddp st2, st0 ; add to autoc[6]
- fld dword [esi + 28]
- fmulp st1, st0
- add esi, byte 4 ; [CR] sample++
- faddp st2, st0 ; add to autoc[7]
- fld dword [esi]
- fld dword [esi + 24]
- fmulp st1, st0
- faddp st1, st0 ; add to autoc[6]
- fstp dword [edi + 24]
- fstp dword [edi + 28]
- fstp dword [edi + 32]
- fstp dword [edi + 36]
- fstp dword [edi + 40]
- fstp dword [edi + 44]
- jmp .end
-
-.end:
- pop edi
- pop esi
- pop ebx
- pop ebp
- ret
-
-; end
diff --git a/src/libFLAC/ia32/lpc_asm.nasm b/src/libFLAC/ia32/lpc_asm.nasm
index 272fb7bd..b6117605 100644
--- a/src/libFLAC/ia32/lpc_asm.nasm
+++ b/src/libFLAC/ia32/lpc_asm.nasm
@@ -35,11 +35,6 @@
data_section
-cglobal FLAC__lpc_compute_autocorrelation_asm_ia32
-cglobal FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_4_old
-cglobal FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_8_old
-cglobal FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_12_old
-cglobal FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_16_old
cglobal FLAC__lpc_compute_residual_from_qlp_coefficients_asm_ia32
cglobal FLAC__lpc_compute_residual_from_qlp_coefficients_asm_ia32_mmx
cglobal FLAC__lpc_compute_residual_from_qlp_coefficients_wide_asm_ia32
@@ -49,668 +44,6 @@ cglobal FLAC__lpc_restore_signal_wide_asm_ia32
code_section
-; **********************************************************************
-;
-; void FLAC__lpc_compute_autocorrelation_asm(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
-; {
-; FLAC__real d;
-; unsigned sample, coeff;
-; const unsigned limit = data_len - lag;
-;
-; FLAC__ASSERT(lag > 0);
-; FLAC__ASSERT(lag <= data_len);
-;
-; for(coeff = 0; coeff < lag; coeff++)
-; autoc[coeff] = 0.0;
-; for(sample = 0; sample <= limit; sample++) {
-; d = data[sample];
-; for(coeff = 0; coeff < lag; coeff++)
-; autoc[coeff] += d * data[sample+coeff];
-; }
-; for(; sample < data_len; sample++) {
-; d = data[sample];
-; for(coeff = 0; coeff < data_len - sample; coeff++)
-; autoc[coeff] += d * data[sample+coeff];
-; }
-; }
-;
- ALIGN 16
-cident FLAC__lpc_compute_autocorrelation_asm_ia32
- ;[esp + 28] == autoc[]
- ;[esp + 24] == lag
- ;[esp + 20] == data_len
- ;[esp + 16] == data[]
-
- ;ASSERT(lag > 0)
- ;ASSERT(lag <= 33)
- ;ASSERT(lag <= data_len)
-
-.begin:
- push esi
- push edi
- push ebx
-
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- mov edi, [esp + 28] ; edi == autoc
- mov ecx, [esp + 24] ; ecx = # of dwords (=lag) of 0 to write
- xor eax, eax
- rep stosd
-
- ; const unsigned limit = data_len - lag;
- mov eax, [esp + 24] ; eax == lag
- mov ecx, [esp + 20]
- sub ecx, eax ; ecx == limit
-
- mov edi, [esp + 28] ; edi == autoc
- mov esi, [esp + 16] ; esi == data
- inc ecx ; we are looping <= limit so we add one to the counter
-
- ; for(sample = 0; sample <= limit; sample++) {
- ; d = data[sample];
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] += d * data[sample+coeff];
- ; }
- fld dword [esi] ; ST = d <- data[sample]
- ; each iteration is 11 bytes so we need (-eax)*11, so we do (-12*eax + eax)
- lea edx, [eax + eax*2]
- neg edx
- lea edx, [eax + edx*4 + .jumper1_0 - .get_eip1]
- call .mov_eip_to_ebx
-.get_eip1:
- add edx, ebx
- inc edx ; compensate for the shorter opcode on the last iteration
- inc edx ; compensate for the shorter opcode on the last iteration
- inc edx ; compensate for the shorter opcode on the last iteration
- cmp eax, 33
- jne .loop1_start
- sub edx, byte 9 ; compensate for the longer opcodes on the first iteration
-.loop1_start:
- jmp edx
-
-.mov_eip_to_ebx:
- mov ebx, [esp]
- ret
-
- fld st0 ; ST = d d
- fmul dword [esi + (32*4)] ; ST = d*data[sample+32] d WATCHOUT: not a byte displacement here!
- fadd dword [edi + (32*4)] ; ST = autoc[32]+d*data[sample+32] d WATCHOUT: not a byte displacement here!
- fstp dword [edi + (32*4)] ; autoc[32]+=d*data[sample+32] ST = d WATCHOUT: not a byte displacement here!
- fld st0 ; ST = d d
- fmul dword [esi + (31*4)] ; ST = d*data[sample+31] d
- fadd dword [edi + (31*4)] ; ST = autoc[31]+d*data[sample+31] d
- fstp dword [edi + (31*4)] ; autoc[31]+=d*data[sample+31] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (30*4)] ; ST = d*data[sample+30] d
- fadd dword [edi + (30*4)] ; ST = autoc[30]+d*data[sample+30] d
- fstp dword [edi + (30*4)] ; autoc[30]+=d*data[sample+30] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (29*4)] ; ST = d*data[sample+29] d
- fadd dword [edi + (29*4)] ; ST = autoc[29]+d*data[sample+29] d
- fstp dword [edi + (29*4)] ; autoc[29]+=d*data[sample+29] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (28*4)] ; ST = d*data[sample+28] d
- fadd dword [edi + (28*4)] ; ST = autoc[28]+d*data[sample+28] d
- fstp dword [edi + (28*4)] ; autoc[28]+=d*data[sample+28] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (27*4)] ; ST = d*data[sample+27] d
- fadd dword [edi + (27*4)] ; ST = autoc[27]+d*data[sample+27] d
- fstp dword [edi + (27*4)] ; autoc[27]+=d*data[sample+27] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (26*4)] ; ST = d*data[sample+26] d
- fadd dword [edi + (26*4)] ; ST = autoc[26]+d*data[sample+26] d
- fstp dword [edi + (26*4)] ; autoc[26]+=d*data[sample+26] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (25*4)] ; ST = d*data[sample+25] d
- fadd dword [edi + (25*4)] ; ST = autoc[25]+d*data[sample+25] d
- fstp dword [edi + (25*4)] ; autoc[25]+=d*data[sample+25] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (24*4)] ; ST = d*data[sample+24] d
- fadd dword [edi + (24*4)] ; ST = autoc[24]+d*data[sample+24] d
- fstp dword [edi + (24*4)] ; autoc[24]+=d*data[sample+24] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (23*4)] ; ST = d*data[sample+23] d
- fadd dword [edi + (23*4)] ; ST = autoc[23]+d*data[sample+23] d
- fstp dword [edi + (23*4)] ; autoc[23]+=d*data[sample+23] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (22*4)] ; ST = d*data[sample+22] d
- fadd dword [edi + (22*4)] ; ST = autoc[22]+d*data[sample+22] d
- fstp dword [edi + (22*4)] ; autoc[22]+=d*data[sample+22] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (21*4)] ; ST = d*data[sample+21] d
- fadd dword [edi + (21*4)] ; ST = autoc[21]+d*data[sample+21] d
- fstp dword [edi + (21*4)] ; autoc[21]+=d*data[sample+21] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (20*4)] ; ST = d*data[sample+20] d
- fadd dword [edi + (20*4)] ; ST = autoc[20]+d*data[sample+20] d
- fstp dword [edi + (20*4)] ; autoc[20]+=d*data[sample+20] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (19*4)] ; ST = d*data[sample+19] d
- fadd dword [edi + (19*4)] ; ST = autoc[19]+d*data[sample+19] d
- fstp dword [edi + (19*4)] ; autoc[19]+=d*data[sample+19] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (18*4)] ; ST = d*data[sample+18] d
- fadd dword [edi + (18*4)] ; ST = autoc[18]+d*data[sample+18] d
- fstp dword [edi + (18*4)] ; autoc[18]+=d*data[sample+18] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (17*4)] ; ST = d*data[sample+17] d
- fadd dword [edi + (17*4)] ; ST = autoc[17]+d*data[sample+17] d
- fstp dword [edi + (17*4)] ; autoc[17]+=d*data[sample+17] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (16*4)] ; ST = d*data[sample+16] d
- fadd dword [edi + (16*4)] ; ST = autoc[16]+d*data[sample+16] d
- fstp dword [edi + (16*4)] ; autoc[16]+=d*data[sample+16] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (15*4)] ; ST = d*data[sample+15] d
- fadd dword [edi + (15*4)] ; ST = autoc[15]+d*data[sample+15] d
- fstp dword [edi + (15*4)] ; autoc[15]+=d*data[sample+15] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (14*4)] ; ST = d*data[sample+14] d
- fadd dword [edi + (14*4)] ; ST = autoc[14]+d*data[sample+14] d
- fstp dword [edi + (14*4)] ; autoc[14]+=d*data[sample+14] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (13*4)] ; ST = d*data[sample+13] d
- fadd dword [edi + (13*4)] ; ST = autoc[13]+d*data[sample+13] d
- fstp dword [edi + (13*4)] ; autoc[13]+=d*data[sample+13] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (12*4)] ; ST = d*data[sample+12] d
- fadd dword [edi + (12*4)] ; ST = autoc[12]+d*data[sample+12] d
- fstp dword [edi + (12*4)] ; autoc[12]+=d*data[sample+12] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (11*4)] ; ST = d*data[sample+11] d
- fadd dword [edi + (11*4)] ; ST = autoc[11]+d*data[sample+11] d
- fstp dword [edi + (11*4)] ; autoc[11]+=d*data[sample+11] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (10*4)] ; ST = d*data[sample+10] d
- fadd dword [edi + (10*4)] ; ST = autoc[10]+d*data[sample+10] d
- fstp dword [edi + (10*4)] ; autoc[10]+=d*data[sample+10] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 9*4)] ; ST = d*data[sample+9] d
- fadd dword [edi + ( 9*4)] ; ST = autoc[9]+d*data[sample+9] d
- fstp dword [edi + ( 9*4)] ; autoc[9]+=d*data[sample+9] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 8*4)] ; ST = d*data[sample+8] d
- fadd dword [edi + ( 8*4)] ; ST = autoc[8]+d*data[sample+8] d
- fstp dword [edi + ( 8*4)] ; autoc[8]+=d*data[sample+8] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 7*4)] ; ST = d*data[sample+7] d
- fadd dword [edi + ( 7*4)] ; ST = autoc[7]+d*data[sample+7] d
- fstp dword [edi + ( 7*4)] ; autoc[7]+=d*data[sample+7] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 6*4)] ; ST = d*data[sample+6] d
- fadd dword [edi + ( 6*4)] ; ST = autoc[6]+d*data[sample+6] d
- fstp dword [edi + ( 6*4)] ; autoc[6]+=d*data[sample+6] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 5*4)] ; ST = d*data[sample+4] d
- fadd dword [edi + ( 5*4)] ; ST = autoc[4]+d*data[sample+4] d
- fstp dword [edi + ( 5*4)] ; autoc[4]+=d*data[sample+4] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 4*4)] ; ST = d*data[sample+4] d
- fadd dword [edi + ( 4*4)] ; ST = autoc[4]+d*data[sample+4] d
- fstp dword [edi + ( 4*4)] ; autoc[4]+=d*data[sample+4] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 3*4)] ; ST = d*data[sample+3] d
- fadd dword [edi + ( 3*4)] ; ST = autoc[3]+d*data[sample+3] d
- fstp dword [edi + ( 3*4)] ; autoc[3]+=d*data[sample+3] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 2*4)] ; ST = d*data[sample+2] d
- fadd dword [edi + ( 2*4)] ; ST = autoc[2]+d*data[sample+2] d
- fstp dword [edi + ( 2*4)] ; autoc[2]+=d*data[sample+2] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 1*4)] ; ST = d*data[sample+1] d
- fadd dword [edi + ( 1*4)] ; ST = autoc[1]+d*data[sample+1] d
- fstp dword [edi + ( 1*4)] ; autoc[1]+=d*data[sample+1] ST = d
- fld st0 ; ST = d d
- fmul dword [esi] ; ST = d*data[sample] d WATCHOUT: no displacement byte here!
- fadd dword [edi] ; ST = autoc[0]+d*data[sample] d WATCHOUT: no displacement byte here!
- fstp dword [edi] ; autoc[0]+=d*data[sample] ST = d WATCHOUT: no displacement byte here!
-.jumper1_0:
-
- fstp st0 ; pop d, ST = empty
- add esi, byte 4 ; sample++
- dec ecx
- jz .loop1_end
- fld dword [esi] ; ST = d <- data[sample]
- jmp edx
-.loop1_end:
-
- ; for(; sample < data_len; sample++) {
- ; d = data[sample];
- ; for(coeff = 0; coeff < data_len - sample; coeff++)
- ; autoc[coeff] += d * data[sample+coeff];
- ; }
- mov ecx, [esp + 24] ; ecx <- lag
- dec ecx ; ecx <- lag - 1
- jz near .end ; skip loop if 0 (i.e. lag == 1)
-
- fld dword [esi] ; ST = d <- data[sample]
- mov eax, ecx ; eax <- lag - 1 == data_len - sample the first time through
- ; each iteration is 11 bytes so we need (-eax)*11, so we do (-12*eax + eax)
- lea edx, [eax + eax*2]
- neg edx
- lea edx, [eax + edx*4 + .jumper2_0 - .get_eip2]
- call .mov_eip_to_ebx
-.get_eip2:
- add edx, ebx
- inc edx ; compensate for the shorter opcode on the last iteration
- inc edx ; compensate for the shorter opcode on the last iteration
- inc edx ; compensate for the shorter opcode on the last iteration
- jmp edx
-
- fld st0 ; ST = d d
- fmul dword [esi + (31*4)] ; ST = d*data[sample+31] d
- fadd dword [edi + (31*4)] ; ST = autoc[31]+d*data[sample+31] d
- fstp dword [edi + (31*4)] ; autoc[31]+=d*data[sample+31] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (30*4)] ; ST = d*data[sample+30] d
- fadd dword [edi + (30*4)] ; ST = autoc[30]+d*data[sample+30] d
- fstp dword [edi + (30*4)] ; autoc[30]+=d*data[sample+30] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (29*4)] ; ST = d*data[sample+29] d
- fadd dword [edi + (29*4)] ; ST = autoc[29]+d*data[sample+29] d
- fstp dword [edi + (29*4)] ; autoc[29]+=d*data[sample+29] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (28*4)] ; ST = d*data[sample+28] d
- fadd dword [edi + (28*4)] ; ST = autoc[28]+d*data[sample+28] d
- fstp dword [edi + (28*4)] ; autoc[28]+=d*data[sample+28] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (27*4)] ; ST = d*data[sample+27] d
- fadd dword [edi + (27*4)] ; ST = autoc[27]+d*data[sample+27] d
- fstp dword [edi + (27*4)] ; autoc[27]+=d*data[sample+27] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (26*4)] ; ST = d*data[sample+26] d
- fadd dword [edi + (26*4)] ; ST = autoc[26]+d*data[sample+26] d
- fstp dword [edi + (26*4)] ; autoc[26]+=d*data[sample+26] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (25*4)] ; ST = d*data[sample+25] d
- fadd dword [edi + (25*4)] ; ST = autoc[25]+d*data[sample+25] d
- fstp dword [edi + (25*4)] ; autoc[25]+=d*data[sample+25] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (24*4)] ; ST = d*data[sample+24] d
- fadd dword [edi + (24*4)] ; ST = autoc[24]+d*data[sample+24] d
- fstp dword [edi + (24*4)] ; autoc[24]+=d*data[sample+24] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (23*4)] ; ST = d*data[sample+23] d
- fadd dword [edi + (23*4)] ; ST = autoc[23]+d*data[sample+23] d
- fstp dword [edi + (23*4)] ; autoc[23]+=d*data[sample+23] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (22*4)] ; ST = d*data[sample+22] d
- fadd dword [edi + (22*4)] ; ST = autoc[22]+d*data[sample+22] d
- fstp dword [edi + (22*4)] ; autoc[22]+=d*data[sample+22] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (21*4)] ; ST = d*data[sample+21] d
- fadd dword [edi + (21*4)] ; ST = autoc[21]+d*data[sample+21] d
- fstp dword [edi + (21*4)] ; autoc[21]+=d*data[sample+21] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (20*4)] ; ST = d*data[sample+20] d
- fadd dword [edi + (20*4)] ; ST = autoc[20]+d*data[sample+20] d
- fstp dword [edi + (20*4)] ; autoc[20]+=d*data[sample+20] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (19*4)] ; ST = d*data[sample+19] d
- fadd dword [edi + (19*4)] ; ST = autoc[19]+d*data[sample+19] d
- fstp dword [edi + (19*4)] ; autoc[19]+=d*data[sample+19] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (18*4)] ; ST = d*data[sample+18] d
- fadd dword [edi + (18*4)] ; ST = autoc[18]+d*data[sample+18] d
- fstp dword [edi + (18*4)] ; autoc[18]+=d*data[sample+18] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (17*4)] ; ST = d*data[sample+17] d
- fadd dword [edi + (17*4)] ; ST = autoc[17]+d*data[sample+17] d
- fstp dword [edi + (17*4)] ; autoc[17]+=d*data[sample+17] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (16*4)] ; ST = d*data[sample+16] d
- fadd dword [edi + (16*4)] ; ST = autoc[16]+d*data[sample+16] d
- fstp dword [edi + (16*4)] ; autoc[16]+=d*data[sample+16] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (15*4)] ; ST = d*data[sample+15] d
- fadd dword [edi + (15*4)] ; ST = autoc[15]+d*data[sample+15] d
- fstp dword [edi + (15*4)] ; autoc[15]+=d*data[sample+15] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (14*4)] ; ST = d*data[sample+14] d
- fadd dword [edi + (14*4)] ; ST = autoc[14]+d*data[sample+14] d
- fstp dword [edi + (14*4)] ; autoc[14]+=d*data[sample+14] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (13*4)] ; ST = d*data[sample+13] d
- fadd dword [edi + (13*4)] ; ST = autoc[13]+d*data[sample+13] d
- fstp dword [edi + (13*4)] ; autoc[13]+=d*data[sample+13] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (12*4)] ; ST = d*data[sample+12] d
- fadd dword [edi + (12*4)] ; ST = autoc[12]+d*data[sample+12] d
- fstp dword [edi + (12*4)] ; autoc[12]+=d*data[sample+12] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (11*4)] ; ST = d*data[sample+11] d
- fadd dword [edi + (11*4)] ; ST = autoc[11]+d*data[sample+11] d
- fstp dword [edi + (11*4)] ; autoc[11]+=d*data[sample+11] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + (10*4)] ; ST = d*data[sample+10] d
- fadd dword [edi + (10*4)] ; ST = autoc[10]+d*data[sample+10] d
- fstp dword [edi + (10*4)] ; autoc[10]+=d*data[sample+10] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 9*4)] ; ST = d*data[sample+9] d
- fadd dword [edi + ( 9*4)] ; ST = autoc[9]+d*data[sample+9] d
- fstp dword [edi + ( 9*4)] ; autoc[9]+=d*data[sample+9] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 8*4)] ; ST = d*data[sample+8] d
- fadd dword [edi + ( 8*4)] ; ST = autoc[8]+d*data[sample+8] d
- fstp dword [edi + ( 8*4)] ; autoc[8]+=d*data[sample+8] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 7*4)] ; ST = d*data[sample+7] d
- fadd dword [edi + ( 7*4)] ; ST = autoc[7]+d*data[sample+7] d
- fstp dword [edi + ( 7*4)] ; autoc[7]+=d*data[sample+7] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 6*4)] ; ST = d*data[sample+6] d
- fadd dword [edi + ( 6*4)] ; ST = autoc[6]+d*data[sample+6] d
- fstp dword [edi + ( 6*4)] ; autoc[6]+=d*data[sample+6] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 5*4)] ; ST = d*data[sample+4] d
- fadd dword [edi + ( 5*4)] ; ST = autoc[4]+d*data[sample+4] d
- fstp dword [edi + ( 5*4)] ; autoc[4]+=d*data[sample+4] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 4*4)] ; ST = d*data[sample+4] d
- fadd dword [edi + ( 4*4)] ; ST = autoc[4]+d*data[sample+4] d
- fstp dword [edi + ( 4*4)] ; autoc[4]+=d*data[sample+4] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 3*4)] ; ST = d*data[sample+3] d
- fadd dword [edi + ( 3*4)] ; ST = autoc[3]+d*data[sample+3] d
- fstp dword [edi + ( 3*4)] ; autoc[3]+=d*data[sample+3] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 2*4)] ; ST = d*data[sample+2] d
- fadd dword [edi + ( 2*4)] ; ST = autoc[2]+d*data[sample+2] d
- fstp dword [edi + ( 2*4)] ; autoc[2]+=d*data[sample+2] ST = d
- fld st0 ; ST = d d
- fmul dword [esi + ( 1*4)] ; ST = d*data[sample+1] d
- fadd dword [edi + ( 1*4)] ; ST = autoc[1]+d*data[sample+1] d
- fstp dword [edi + ( 1*4)] ; autoc[1]+=d*data[sample+1] ST = d
- fld st0 ; ST = d d
- fmul dword [esi] ; ST = d*data[sample] d WATCHOUT: no displacement byte here!
- fadd dword [edi] ; ST = autoc[0]+d*data[sample] d WATCHOUT: no displacement byte here!
- fstp dword [edi] ; autoc[0]+=d*data[sample] ST = d WATCHOUT: no displacement byte here!
-.jumper2_0:
-
- fstp st0 ; pop d, ST = empty
- add esi, byte 4 ; sample++
- dec ecx
- jz .loop2_end
- add edx, byte 11 ; adjust our inner loop counter by adjusting the jump target
- fld dword [esi] ; ST = d <- data[sample]
- jmp edx
-.loop2_end:
-
-.end:
- pop ebx
- pop edi
- pop esi
- ret
-
- ALIGN 16
-cident FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_4_old
- ;[esp + 16] == autoc[]
- ;[esp + 12] == lag
- ;[esp + 8] == data_len
- ;[esp + 4] == data[]
-
- ;ASSERT(lag > 0)
- ;ASSERT(lag <= 4)
- ;ASSERT(lag <= data_len)
-
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- xorps xmm5, xmm5
-
- mov edx, [esp + 8] ; edx == data_len
- mov eax, [esp + 4] ; eax == &data[sample] <- &data[0]
-
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[0]
- add eax, 4
- movaps xmm2, xmm0 ; xmm2 = 0,0,0,data[0]
- shufps xmm0, xmm0, 0 ; xmm0 == data[sample],data[sample],data[sample],data[sample] = data[0],data[0],data[0],data[0]
-.warmup: ; xmm2 == data[sample-3],data[sample-2],data[sample-1],data[sample]
- mulps xmm0, xmm2 ; xmm0 = xmm0 * xmm2
- addps xmm5, xmm0 ; xmm5 += xmm0 * xmm2
- dec edx
- jz .loop_end
- ALIGN 16
-.loop_start:
- ; start by reading the next sample
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[sample]
- add eax, 4
- shufps xmm0, xmm0, 0 ; xmm0 = data[sample],data[sample],data[sample],data[sample]
- shufps xmm2, xmm2, 93h ; 93h=2-1-0-3 => xmm2 gets rotated left by one float
- movss xmm2, xmm0
- mulps xmm0, xmm2 ; xmm0 = xmm0 * xmm2
- addps xmm5, xmm0 ; xmm5 += xmm0 * xmm2
- dec edx
- jnz .loop_start
-.loop_end:
- ; store autoc
- mov edx, [esp + 16] ; edx == autoc
- movups [edx], xmm5
-
-.end:
- ret
-
- ALIGN 16
-cident FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_8_old
- ;[esp + 16] == autoc[]
- ;[esp + 12] == lag
- ;[esp + 8] == data_len
- ;[esp + 4] == data[]
-
- ;ASSERT(lag > 0)
- ;ASSERT(lag <= 8)
- ;ASSERT(lag <= data_len)
-
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- xorps xmm5, xmm5
- xorps xmm6, xmm6
-
- mov edx, [esp + 8] ; edx == data_len
- mov eax, [esp + 4] ; eax == &data[sample] <- &data[0]
-
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[0]
- add eax, 4
- movaps xmm2, xmm0 ; xmm2 = 0,0,0,data[0]
- shufps xmm0, xmm0, 0 ; xmm0 == data[sample],data[sample],data[sample],data[sample] = data[0],data[0],data[0],data[0]
- movaps xmm1, xmm0 ; xmm1 == data[sample],data[sample],data[sample],data[sample] = data[0],data[0],data[0],data[0]
- xorps xmm3, xmm3 ; xmm3 = 0,0,0,0
-.warmup: ; xmm3:xmm2 == data[sample-7],data[sample-6],...,data[sample]
- mulps xmm0, xmm2
- mulps xmm1, xmm3 ; xmm1:xmm0 = xmm1:xmm0 * xmm3:xmm2
- addps xmm5, xmm0
- addps xmm6, xmm1 ; xmm6:xmm5 += xmm1:xmm0 * xmm3:xmm2
- dec edx
- jz .loop_end
- ALIGN 16
-.loop_start:
- ; start by reading the next sample
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[sample]
- ; here we reorder the instructions; see the (#) indexes for a logical order
- shufps xmm2, xmm2, 93h ; (3) 93h=2-1-0-3 => xmm2 gets rotated left by one float
- add eax, 4 ; (0)
- shufps xmm3, xmm3, 93h ; (4) 93h=2-1-0-3 => xmm3 gets rotated left by one float
- shufps xmm0, xmm0, 0 ; (1) xmm0 = data[sample],data[sample],data[sample],data[sample]
- movss xmm3, xmm2 ; (5)
- movaps xmm1, xmm0 ; (2) xmm1 = data[sample],data[sample],data[sample],data[sample]
- movss xmm2, xmm0 ; (6)
- mulps xmm1, xmm3 ; (8)
- mulps xmm0, xmm2 ; (7) xmm1:xmm0 = xmm1:xmm0 * xmm3:xmm2
- addps xmm6, xmm1 ; (10)
- addps xmm5, xmm0 ; (9) xmm6:xmm5 += xmm1:xmm0 * xmm3:xmm2
- dec edx
- jnz .loop_start
-.loop_end:
- ; store autoc
- mov edx, [esp + 16] ; edx == autoc
- movups [edx], xmm5
- movups [edx + 16], xmm6
-
-.end:
- ret
-
- ALIGN 16
-cident FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_12_old
- ;[esp + 16] == autoc[]
- ;[esp + 12] == lag
- ;[esp + 8] == data_len
- ;[esp + 4] == data[]
-
- ;ASSERT(lag > 0)
- ;ASSERT(lag <= 12)
- ;ASSERT(lag <= data_len)
-
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- xorps xmm5, xmm5
- xorps xmm6, xmm6
- xorps xmm7, xmm7
-
- mov edx, [esp + 8] ; edx == data_len
- mov eax, [esp + 4] ; eax == &data[sample] <- &data[0]
-
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[0]
- add eax, 4
- movaps xmm2, xmm0 ; xmm2 = 0,0,0,data[0]
- shufps xmm0, xmm0, 0 ; xmm0 == data[sample],data[sample],data[sample],data[sample] = data[0],data[0],data[0],data[0]
- xorps xmm3, xmm3 ; xmm3 = 0,0,0,0
- xorps xmm4, xmm4 ; xmm4 = 0,0,0,0
-.warmup: ; xmm3:xmm2 == data[sample-7],data[sample-6],...,data[sample]
- movaps xmm1, xmm0
- mulps xmm1, xmm2
- addps xmm5, xmm1
- movaps xmm1, xmm0
- mulps xmm1, xmm3
- addps xmm6, xmm1
- mulps xmm0, xmm4
- addps xmm7, xmm0 ; xmm7:xmm6:xmm5 += xmm0:xmm0:xmm0 * xmm4:xmm3:xmm2
- dec edx
- jz .loop_end
- ALIGN 16
-.loop_start:
- ; start by reading the next sample
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[sample]
- add eax, 4
- shufps xmm0, xmm0, 0 ; xmm0 = data[sample],data[sample],data[sample],data[sample]
-
- ; shift xmm4:xmm3:xmm2 left by one float
- shufps xmm2, xmm2, 93h ; 93h=2-1-0-3 => xmm2 gets rotated left by one float
- shufps xmm3, xmm3, 93h ; 93h=2-1-0-3 => xmm3 gets rotated left by one float
- shufps xmm4, xmm4, 93h ; 93h=2-1-0-3 => xmm4 gets rotated left by one float
- movss xmm4, xmm3
- movss xmm3, xmm2
- movss xmm2, xmm0
-
- ; xmm7:xmm6:xmm5 += xmm0:xmm0:xmm0 * xmm4:xmm3:xmm2
- movaps xmm1, xmm0
- mulps xmm1, xmm2
- addps xmm5, xmm1
- movaps xmm1, xmm0
- mulps xmm1, xmm3
- addps xmm6, xmm1
- mulps xmm0, xmm4
- addps xmm7, xmm0
-
- dec edx
- jnz .loop_start
-.loop_end:
- ; store autoc
- mov edx, [esp + 16] ; edx == autoc
- movups [edx], xmm5
- movups [edx + 16], xmm6
- movups [edx + 32], xmm7
-
-.end:
- ret
-
- ALIGN 16
-cident FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_16_old
- ;[ebp + 20] == autoc[]
- ;[ebp + 16] == lag
- ;[ebp + 12] == data_len
- ;[ebp + 8] == data[]
- ;[esp] == __m128
- ;[esp + 16] == __m128
-
- push ebp
- mov ebp, esp
- and esp, -16 ; stack realign for SSE instructions 'movaps' and 'addps'
- sub esp, 32
-
- ;ASSERT(lag > 0)
- ;ASSERT(lag <= 16)
- ;ASSERT(lag <= data_len)
- ;ASSERT(data_len > 0)
-
- ; for(coeff = 0; coeff < lag; coeff++)
- ; autoc[coeff] = 0.0;
- xorps xmm5, xmm5
- xorps xmm6, xmm6
- movaps [esp], xmm5
- movaps [esp + 16], xmm6
-
- mov edx, [ebp + 12] ; edx == data_len
- mov eax, [ebp + 8] ; eax == &data[sample] <- &data[0]
-
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[0]
- add eax, 4
- movaps xmm1, xmm0 ; xmm1 = 0,0,0,data[0]
- shufps xmm0, xmm0, 0 ; xmm0 == data[sample],data[sample],data[sample],data[sample] = data[0],data[0],data[0],data[0]
- xorps xmm2, xmm2 ; xmm2 = 0,0,0,0
- xorps xmm3, xmm3 ; xmm3 = 0,0,0,0
- xorps xmm4, xmm4 ; xmm4 = 0,0,0,0
- movaps xmm7, xmm0
- mulps xmm7, xmm1
- addps xmm5, xmm7
- dec edx
- jz .loop_end
- ALIGN 16
-.loop_start:
- ; start by reading the next sample
- movss xmm0, [eax] ; xmm0 = 0,0,0,data[sample]
- add eax, 4
- shufps xmm0, xmm0, 0 ; xmm0 = data[sample],data[sample],data[sample],data[sample]
-
- ; shift xmm4:xmm3:xmm2:xmm1 left by one float
- shufps xmm1, xmm1, 93h
- shufps xmm2, xmm2, 93h
- shufps xmm3, xmm3, 93h
- shufps xmm4, xmm4, 93h
- movss xmm4, xmm3
- movss xmm3, xmm2
- movss xmm2, xmm1
- movss xmm1, xmm0
-
- ; xmmB:xmmA:xmm6:xmm5 += xmm0:xmm0:xmm0:xmm0 * xmm4:xmm3:xmm2:xmm1
- movaps xmm7, xmm0
- mulps xmm7, xmm1
- addps xmm5, xmm7
- movaps xmm7, xmm0
- mulps xmm7, xmm2
- addps xmm6, xmm7
- movaps xmm7, xmm0
- mulps xmm7, xmm3
- mulps xmm0, xmm4
- addps xmm7, [esp]
- addps xmm0, [esp + 16]
- movaps [esp], xmm7
- movaps [esp + 16], xmm0
-
- dec edx
- jnz .loop_start
-.loop_end:
- ; store autoc
- mov edx, [ebp + 20] ; edx == autoc
- movups [edx], xmm5
- movups [edx + 16], xmm6
- movaps xmm5, [esp]
- movaps xmm6, [esp + 16]
- movups [edx + 32], xmm5
- movups [edx + 48], xmm6
-.end:
- mov esp, ebp
- pop ebp
- ret
-
;void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
;
; for(i = 0; i < data_len; i++) {
diff --git a/src/libFLAC/include/private/lpc.h b/src/libFLAC/include/private/lpc.h
index 64dfd1f8..01624149 100644
--- a/src/libFLAC/include/private/lpc.h
+++ b/src/libFLAC/include/private/lpc.h
@@ -68,41 +68,25 @@ void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FL
* IN 0 < lag <= data_len
* OUT autoc[0,lag-1]
*/
-void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
+void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
#ifndef FLAC__NO_ASM
-# ifdef FLAC__CPU_IA32
-# ifdef FLAC__HAS_NASM
-void FLAC__lpc_compute_autocorrelation_asm_ia32(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_4_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_8_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_12_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_16_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-# endif
-# endif
# if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN
-# ifdef FLAC__SSE_SUPPORTED
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
+# ifdef FLAC__SSE2_SUPPORTED
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_10(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
# endif
# endif
#if defined(FLAC__CPU_PPC64) && defined(FLAC__USE_VSX)
#ifdef FLAC__HAS_TARGET_POWER9
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_4(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_16(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
#endif
#ifdef FLAC__HAS_TARGET_POWER8
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_4(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_16(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
#endif
#endif
#endif
@@ -128,7 +112,7 @@ void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_16(const FLAC__real
* in lp_coeff[8][0,8], the LP coefficients for order 8 will be
* in lp_coeff[7][0,7], etc.
*/
-void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], uint32_t *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], double error[]);
+void FLAC__lpc_compute_lp_coefficients(const double autoc[], uint32_t *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], double error[]);
/*
* FLAC__lpc_quantize_coefficients()
diff --git a/src/libFLAC/libFLAC_dynamic.vcproj b/src/libFLAC/libFLAC_dynamic.vcproj
index d88765d7..0bd5f084 100644
--- a/src/libFLAC/libFLAC_dynamic.vcproj
+++ b/src/libFLAC/libFLAC_dynamic.vcproj
@@ -334,10 +334,6 @@
>
</File>
<File
- RelativePath=".\lpc_intrin_sse.c"
- >
- </File>
- <File
RelativePath=".\lpc_intrin_sse2.c"
>
</File>
diff --git a/src/libFLAC/libFLAC_dynamic.vcxproj b/src/libFLAC/libFLAC_dynamic.vcxproj
index 056df9c0..97f7b750 100644
--- a/src/libFLAC/libFLAC_dynamic.vcxproj
+++ b/src/libFLAC/libFLAC_dynamic.vcxproj
@@ -234,7 +234,6 @@
<AdditionalOptions Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">/arch:AVX %(AdditionalOptions)</AdditionalOptions>
<AdditionalOptions Condition="'$(Configuration)|$(Platform)'=='Release|x64'">/arch:AVX %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
- <ClCompile Include="lpc_intrin_sse.c" />
<ClCompile Include="lpc_intrin_sse2.c" />
<ClCompile Include="lpc_intrin_sse41.c" />
<ClCompile Include="md5.c" />
diff --git a/src/libFLAC/libFLAC_dynamic.vcxproj.filters b/src/libFLAC/libFLAC_dynamic.vcxproj.filters
index 875c0216..af105da4 100644
--- a/src/libFLAC/libFLAC_dynamic.vcxproj.filters
+++ b/src/libFLAC/libFLAC_dynamic.vcxproj.filters
@@ -169,9 +169,6 @@
<ClCompile Include="lpc.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="lpc_intrin_sse.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="lpc_intrin_sse2.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -235,4 +232,4 @@
<CustomBuild Include="ia32\fixed_asm.nasm" />
<CustomBuild Include="ia32\lpc_asm.nasm" />
</ItemGroup>
-</Project> \ No newline at end of file
+</Project>
diff --git a/src/libFLAC/libFLAC_static.vcproj b/src/libFLAC/libFLAC_static.vcproj
index 16cdf89a..38e597be 100644
--- a/src/libFLAC/libFLAC_static.vcproj
+++ b/src/libFLAC/libFLAC_static.vcproj
@@ -375,10 +375,6 @@
>
</File>
<File
- RelativePath=".\lpc_intrin_sse.c"
- >
- </File>
- <File
RelativePath=".\lpc_intrin_sse2.c"
>
</File>
diff --git a/src/libFLAC/libFLAC_static.vcxproj b/src/libFLAC/libFLAC_static.vcxproj
index 7b1aadb7..063c3e73 100644
--- a/src/libFLAC/libFLAC_static.vcxproj
+++ b/src/libFLAC/libFLAC_static.vcxproj
@@ -194,7 +194,6 @@
<AdditionalOptions Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">/arch:AVX %(AdditionalOptions)</AdditionalOptions>
<AdditionalOptions Condition="'$(Configuration)|$(Platform)'=='Release|x64'">/arch:AVX %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
- <ClCompile Include="lpc_intrin_sse.c" />
<ClCompile Include="lpc_intrin_sse2.c" />
<ClCompile Include="lpc_intrin_sse41.c" />
<ClCompile Include="md5.c" />
diff --git a/src/libFLAC/libFLAC_static.vcxproj.filters b/src/libFLAC/libFLAC_static.vcxproj.filters
index a600678a..492e2e80 100644
--- a/src/libFLAC/libFLAC_static.vcxproj.filters
+++ b/src/libFLAC/libFLAC_static.vcxproj.filters
@@ -169,9 +169,6 @@
<ClCompile Include="lpc.c">
<Filter>Source Files</Filter>
</ClCompile>
- <ClCompile Include="lpc_intrin_sse.c">
- <Filter>Source Files</Filter>
- </ClCompile>
<ClCompile Include="lpc_intrin_sse2.c">
<Filter>Source Files</Filter>
</ClCompile>
@@ -235,4 +232,4 @@
<CustomBuild Include="ia32\fixed_asm.nasm" />
<CustomBuild Include="ia32\lpc_asm.nasm" />
</ItemGroup>
-</Project> \ No newline at end of file
+</Project>
diff --git a/src/libFLAC/lpc.c b/src/libFLAC/lpc.c
index 9b800363..0c8052bd 100644
--- a/src/libFLAC/lpc.c
+++ b/src/libFLAC/lpc.c
@@ -70,11 +70,11 @@ void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FL
out[i] = in[i] * window[i];
}
-void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
/* a readable, but slower, version */
#if 0
- FLAC__real d;
+ double d;
uint32_t i;
FLAC__ASSERT(lag > 0);
@@ -89,7 +89,7 @@ void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_le
*/
while(lag--) {
for(i = lag, d = 0.0; i < data_len; i++)
- d += data[i] * data[i - lag];
+ d += data[i] * (double)data[i - lag];
autoc[lag] = d;
}
#endif
@@ -98,7 +98,7 @@ void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_le
* this version tends to run faster because of better data locality
* ('data_len' is usually much larger than 'lag')
*/
- FLAC__real d;
+ double d;
uint32_t sample, coeff;
const uint32_t limit = data_len - lag;
@@ -119,7 +119,7 @@ void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], uint32_t data_le
}
}
-void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], uint32_t *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], double error[])
+void FLAC__lpc_compute_lp_coefficients(const double autoc[], uint32_t *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], double error[])
{
uint32_t i, j;
double r, err, lpc[FLAC__MAX_LPC_ORDER];
diff --git a/src/libFLAC/lpc_intrin_sse.c b/src/libFLAC/lpc_intrin_sse.c
deleted file mode 100644
index 8c7902f9..00000000
--- a/src/libFLAC/lpc_intrin_sse.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/* libFLAC - Free Lossless Audio Codec library
- * Copyright (C) 2000-2009 Josh Coalson
- * Copyright (C) 2011-2016 Xiph.Org Foundation
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * - Neither the name of the Xiph.org Foundation nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef HAVE_CONFIG_H
-# include <config.h>
-#endif
-
-#include "private/cpu.h"
-
-#ifndef FLAC__INTEGER_ONLY_LIBRARY
-#ifndef FLAC__NO_ASM
-#if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN
-#include "private/lpc.h"
-#ifdef FLAC__SSE_SUPPORTED
-#include "FLAC/assert.h"
-#include "FLAC/format.h"
-
-#include <xmmintrin.h> /* SSE */
-
-/* new routines: more unaligned loads, less shuffle
- * old routines: less unaligned loads, more shuffle
- * these *_old routines are equivalent to the ASM routines in ia32/lpc_asm.nasm
- */
-
-/* new routines: faster on current Intel (starting from Core i aka Nehalem) and all AMD CPUs */
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- int i;
- int limit = data_len - 4;
- __m128 sum0;
-
- (void) lag;
- FLAC__ASSERT(lag <= 4);
- FLAC__ASSERT(lag <= data_len);
-
- sum0 = _mm_setzero_ps();
-
- for(i = 0; i <= limit; i++) {
- __m128 d, d0;
- d0 = _mm_loadu_ps(data+i);
- d = _mm_shuffle_ps(d0, d0, 0);
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d));
- }
-
- {
- __m128 d0 = _mm_setzero_ps();
- limit++; if(limit < 0) limit = 0;
-
- for(i = data_len-1; i >= limit; i--) {
- __m128 d;
- d = _mm_load_ss(data+i); d = _mm_shuffle_ps(d, d, 0);
- d0 = _mm_shuffle_ps(d0, d0, _MM_SHUFFLE(2,1,0,3));
- d0 = _mm_move_ss(d0, d);
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0));
- }
- }
-
- _mm_storeu_ps(autoc, sum0);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- int i;
- int limit = data_len - 8;
- __m128 sum0, sum1;
-
- (void) lag;
- FLAC__ASSERT(lag <= 8);
- FLAC__ASSERT(lag <= data_len);
-
- sum0 = _mm_setzero_ps();
- sum1 = _mm_setzero_ps();
-
- for(i = 0; i <= limit; i++) {
- __m128 d, d0, d1;
- d0 = _mm_loadu_ps(data+i);
- d1 = _mm_loadu_ps(data+i+4);
- d = _mm_shuffle_ps(d0, d0, 0);
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d));
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d1, d));
- }
-
- {
- __m128 d0 = _mm_setzero_ps();
- __m128 d1 = _mm_setzero_ps();
- limit++; if(limit < 0) limit = 0;
-
- for(i = data_len-1; i >= limit; i--) {
- __m128 d;
- d = _mm_load_ss(data+i); d = _mm_shuffle_ps(d, d, 0);
- d1 = _mm_shuffle_ps(d1, d1, _MM_SHUFFLE(2,1,0,3));
- d0 = _mm_shuffle_ps(d0, d0, _MM_SHUFFLE(2,1,0,3));
- d1 = _mm_move_ss(d1, d0);
- d0 = _mm_move_ss(d0, d);
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d, d1));
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0));
- }
- }
-
- _mm_storeu_ps(autoc, sum0);
- _mm_storeu_ps(autoc+4, sum1);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- int i;
- int limit = data_len - 12;
- __m128 sum0, sum1, sum2;
-
- (void) lag;
- FLAC__ASSERT(lag <= 12);
- FLAC__ASSERT(lag <= data_len);
-
- sum0 = _mm_setzero_ps();
- sum1 = _mm_setzero_ps();
- sum2 = _mm_setzero_ps();
-
- for(i = 0; i <= limit; i++) {
- __m128 d, d0, d1, d2;
- d0 = _mm_loadu_ps(data+i);
- d1 = _mm_loadu_ps(data+i+4);
- d2 = _mm_loadu_ps(data+i+8);
- d = _mm_shuffle_ps(d0, d0, 0);
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d));
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d1, d));
- sum2 = _mm_add_ps(sum2, _mm_mul_ps(d2, d));
- }
-
- {
- __m128 d0 = _mm_setzero_ps();
- __m128 d1 = _mm_setzero_ps();
- __m128 d2 = _mm_setzero_ps();
- limit++; if(limit < 0) limit = 0;
-
- for(i = data_len-1; i >= limit; i--) {
- __m128 d;
- d = _mm_load_ss(data+i); d = _mm_shuffle_ps(d, d, 0);
- d2 = _mm_shuffle_ps(d2, d2, _MM_SHUFFLE(2,1,0,3));
- d1 = _mm_shuffle_ps(d1, d1, _MM_SHUFFLE(2,1,0,3));
- d0 = _mm_shuffle_ps(d0, d0, _MM_SHUFFLE(2,1,0,3));
- d2 = _mm_move_ss(d2, d1);
- d1 = _mm_move_ss(d1, d0);
- d0 = _mm_move_ss(d0, d);
- sum2 = _mm_add_ps(sum2, _mm_mul_ps(d, d2));
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d, d1));
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0));
- }
- }
-
- _mm_storeu_ps(autoc, sum0);
- _mm_storeu_ps(autoc+4, sum1);
- _mm_storeu_ps(autoc+8, sum2);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_new(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- int i;
- int limit = data_len - 16;
- __m128 sum0, sum1, sum2, sum3;
-
- (void) lag;
- FLAC__ASSERT(lag <= 16);
- FLAC__ASSERT(lag <= data_len);
-
- sum0 = _mm_setzero_ps();
- sum1 = _mm_setzero_ps();
- sum2 = _mm_setzero_ps();
- sum3 = _mm_setzero_ps();
-
- for(i = 0; i <= limit; i++) {
- __m128 d, d0, d1, d2, d3;
- d0 = _mm_loadu_ps(data+i);
- d1 = _mm_loadu_ps(data+i+4);
- d2 = _mm_loadu_ps(data+i+8);
- d3 = _mm_loadu_ps(data+i+12);
- d = _mm_shuffle_ps(d0, d0, 0);
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d0, d));
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d1, d));
- sum2 = _mm_add_ps(sum2, _mm_mul_ps(d2, d));
- sum3 = _mm_add_ps(sum3, _mm_mul_ps(d3, d));
- }
-
- {
- __m128 d0 = _mm_setzero_ps();
- __m128 d1 = _mm_setzero_ps();
- __m128 d2 = _mm_setzero_ps();
- __m128 d3 = _mm_setzero_ps();
- limit++; if(limit < 0) limit = 0;
-
- for(i = data_len-1; i >= limit; i--) {
- __m128 d;
- d = _mm_load_ss(data+i); d = _mm_shuffle_ps(d, d, 0);
- d3 = _mm_shuffle_ps(d3, d3, _MM_SHUFFLE(2,1,0,3));
- d2 = _mm_shuffle_ps(d2, d2, _MM_SHUFFLE(2,1,0,3));
- d1 = _mm_shuffle_ps(d1, d1, _MM_SHUFFLE(2,1,0,3));
- d0 = _mm_shuffle_ps(d0, d0, _MM_SHUFFLE(2,1,0,3));
- d3 = _mm_move_ss(d3, d2);
- d2 = _mm_move_ss(d2, d1);
- d1 = _mm_move_ss(d1, d0);
- d0 = _mm_move_ss(d0, d);
- sum3 = _mm_add_ps(sum3, _mm_mul_ps(d, d3));
- sum2 = _mm_add_ps(sum2, _mm_mul_ps(d, d2));
- sum1 = _mm_add_ps(sum1, _mm_mul_ps(d, d1));
- sum0 = _mm_add_ps(sum0, _mm_mul_ps(d, d0));
- }
- }
-
- _mm_storeu_ps(autoc, sum0);
- _mm_storeu_ps(autoc+4, sum1);
- _mm_storeu_ps(autoc+8, sum2);
- _mm_storeu_ps(autoc+12,sum3);
-}
-
-/* old routines: faster on older Intel CPUs (up to Core 2) */
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- __m128 xmm0, xmm2, xmm5;
-
- (void) lag;
- FLAC__ASSERT(lag > 0);
- FLAC__ASSERT(lag <= 4);
- FLAC__ASSERT(lag <= data_len);
- FLAC__ASSERT(data_len > 0);
-
- xmm5 = _mm_setzero_ps();
-
- xmm0 = _mm_load_ss(data++);
- xmm2 = xmm0;
- xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0);
-
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm5 = _mm_add_ps(xmm5, xmm0);
-
- data_len--;
-
- while(data_len)
- {
- xmm0 = _mm_load1_ps(data++);
-
- xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3));
- xmm2 = _mm_move_ss(xmm2, xmm0);
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm5 = _mm_add_ps(xmm5, xmm0);
-
- data_len--;
- }
-
- _mm_storeu_ps(autoc, xmm5);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- __m128 xmm0, xmm1, xmm2, xmm3, xmm5, xmm6;
-
- (void) lag;
- FLAC__ASSERT(lag > 0);
- FLAC__ASSERT(lag <= 8);
- FLAC__ASSERT(lag <= data_len);
- FLAC__ASSERT(data_len > 0);
-
- xmm5 = _mm_setzero_ps();
- xmm6 = _mm_setzero_ps();
-
- xmm0 = _mm_load_ss(data++);
- xmm2 = xmm0;
- xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0);
- xmm3 = _mm_setzero_ps();
-
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm5 = _mm_add_ps(xmm5, xmm0);
-
- data_len--;
-
- while(data_len)
- {
- xmm0 = _mm_load1_ps(data++);
-
- xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3));
- xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3));
- xmm3 = _mm_move_ss(xmm3, xmm2);
- xmm2 = _mm_move_ss(xmm2, xmm0);
-
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm3);
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm6 = _mm_add_ps(xmm6, xmm1);
- xmm5 = _mm_add_ps(xmm5, xmm0);
-
- data_len--;
- }
-
- _mm_storeu_ps(autoc, xmm5);
- _mm_storeu_ps(autoc+4, xmm6);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
-
- (void) lag;
- FLAC__ASSERT(lag > 0);
- FLAC__ASSERT(lag <= 12);
- FLAC__ASSERT(lag <= data_len);
- FLAC__ASSERT(data_len > 0);
-
- xmm5 = _mm_setzero_ps();
- xmm6 = _mm_setzero_ps();
- xmm7 = _mm_setzero_ps();
-
- xmm0 = _mm_load_ss(data++);
- xmm2 = xmm0;
- xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0);
- xmm3 = _mm_setzero_ps();
- xmm4 = _mm_setzero_ps();
-
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm5 = _mm_add_ps(xmm5, xmm0);
-
- data_len--;
-
- while(data_len)
- {
- xmm0 = _mm_load1_ps(data++);
-
- xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3));
- xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3));
- xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3));
- xmm4 = _mm_move_ss(xmm4, xmm3);
- xmm3 = _mm_move_ss(xmm3, xmm2);
- xmm2 = _mm_move_ss(xmm2, xmm0);
-
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm2);
- xmm5 = _mm_add_ps(xmm5, xmm1);
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm3);
- xmm6 = _mm_add_ps(xmm6, xmm1);
- xmm0 = _mm_mul_ps(xmm0, xmm4);
- xmm7 = _mm_add_ps(xmm7, xmm0);
-
- data_len--;
- }
-
- _mm_storeu_ps(autoc, xmm5);
- _mm_storeu_ps(autoc+4, xmm6);
- _mm_storeu_ps(autoc+8, xmm7);
-}
-
-FLAC__SSE_TARGET("sse")
-void FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_old(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- __m128 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9;
-
- (void) lag;
- FLAC__ASSERT(lag > 0);
- FLAC__ASSERT(lag <= 16);
- FLAC__ASSERT(lag <= data_len);
- FLAC__ASSERT(data_len > 0);
-
- xmm6 = _mm_setzero_ps();
- xmm7 = _mm_setzero_ps();
- xmm8 = _mm_setzero_ps();
- xmm9 = _mm_setzero_ps();
-
- xmm0 = _mm_load_ss(data++);
- xmm2 = xmm0;
- xmm0 = _mm_shuffle_ps(xmm0, xmm0, 0);
- xmm3 = _mm_setzero_ps();
- xmm4 = _mm_setzero_ps();
- xmm5 = _mm_setzero_ps();
-
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm6 = _mm_add_ps(xmm6, xmm0);
-
- data_len--;
-
- while(data_len)
- {
- xmm0 = _mm_load1_ps(data++);
-
- /* shift xmm5:xmm4:xmm3:xmm2 left by one float */
- xmm5 = _mm_shuffle_ps(xmm5, xmm5, _MM_SHUFFLE(2,1,0,3));
- xmm4 = _mm_shuffle_ps(xmm4, xmm4, _MM_SHUFFLE(2,1,0,3));
- xmm3 = _mm_shuffle_ps(xmm3, xmm3, _MM_SHUFFLE(2,1,0,3));
- xmm2 = _mm_shuffle_ps(xmm2, xmm2, _MM_SHUFFLE(2,1,0,3));
- xmm5 = _mm_move_ss(xmm5, xmm4);
- xmm4 = _mm_move_ss(xmm4, xmm3);
- xmm3 = _mm_move_ss(xmm3, xmm2);
- xmm2 = _mm_move_ss(xmm2, xmm0);
-
- /* xmm9|xmm8|xmm7|xmm6 += xmm0|xmm0|xmm0|xmm0 * xmm5|xmm4|xmm3|xmm2 */
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm5);
- xmm9 = _mm_add_ps(xmm9, xmm1);
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm4);
- xmm8 = _mm_add_ps(xmm8, xmm1);
- xmm1 = xmm0;
- xmm1 = _mm_mul_ps(xmm1, xmm3);
- xmm7 = _mm_add_ps(xmm7, xmm1);
- xmm0 = _mm_mul_ps(xmm0, xmm2);
- xmm6 = _mm_add_ps(xmm6, xmm0);
-
- data_len--;
- }
-
- _mm_storeu_ps(autoc, xmm6);
- _mm_storeu_ps(autoc+4, xmm7);
- _mm_storeu_ps(autoc+8, xmm8);
- _mm_storeu_ps(autoc+12,xmm9);
-}
-
-#endif /* FLAC__SSE_SUPPORTED */
-#endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */
-#endif /* FLAC__NO_ASM */
-#endif /* FLAC__INTEGER_ONLY_LIBRARY */
diff --git a/src/libFLAC/lpc_intrin_sse2.c b/src/libFLAC/lpc_intrin_sse2.c
index ae00f4bd..2b320161 100644
--- a/src/libFLAC/lpc_intrin_sse2.c
+++ b/src/libFLAC/lpc_intrin_sse2.c
@@ -50,6 +50,172 @@
#define RESIDUAL32_RESULT(xmmN) residual[i] = data[i] - (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
#define DATA32_RESULT(xmmN) data[i] = residual[i] + (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
+
+FLAC__SSE_TARGET("sse2")
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
+{
+ // This function calculates autocorrelation with SSE2
+ // vector functions up to a lag of 10 (or max LPC order of 9)
+ int i;
+ __m128d sum0, sum1, sum2, sum3;
+ __m128d d0, d1, d2, d3;
+
+ (void) lag;
+ FLAC__ASSERT(lag <= 8);
+
+ // Initialize all sum vectors with zero
+ sum0 = _mm_setzero_pd();
+ sum1 = _mm_setzero_pd();
+ sum2 = _mm_setzero_pd();
+ sum3 = _mm_setzero_pd();
+ d0 = _mm_setzero_pd();
+ d1 = _mm_setzero_pd();
+ d2 = _mm_setzero_pd();
+ d3 = _mm_setzero_pd();
+
+ // Loop backwards through samples from data_len to limit
+ for(i = data_len-1; i >= 0; i--) {
+ __m128d d = _mm_set1_pd(data[i]); // both elements of d are set to data[i]
+
+ // The next lines of code work like a queue. The queue
+ // is spread over vectors d0..d3. All items are shifted
+ // one position, the last item (data[i+9]) is dequeued
+ // and a new first item is added (data[i])
+ d3 = _mm_shuffle_pd(d2, d3, _MM_SHUFFLE(0,0,0,1)); // d3 is made of second element of d2 and first element of d3
+ d2 = _mm_shuffle_pd(d1, d2, _MM_SHUFFLE(0,0,0,1)); // d2 is made of second element of d1 and first element of d2
+ d1 = _mm_shuffle_pd(d0, d1, _MM_SHUFFLE(0,0,0,1)); // d1 is made of second element of d0 and first element of d1
+ d0 = _mm_shuffle_pd(d, d0, _MM_SHUFFLE(0,0,0,1)); // d0 is made of second element of d and first element of d0
+
+ // sumn += d*dn
+ sum0 = _mm_add_pd(sum0, _mm_mul_pd(d, d0));
+ sum1 = _mm_add_pd(sum1, _mm_mul_pd(d, d1));
+ sum2 = _mm_add_pd(sum2, _mm_mul_pd(d, d2));
+ sum3 = _mm_add_pd(sum3, _mm_mul_pd(d, d3));
+ }
+
+ // Store sum0..sum6 in autoc[0..14]
+ _mm_storeu_pd(autoc, sum0);
+ _mm_storeu_pd(autoc+2, sum1);
+ _mm_storeu_pd(autoc+4, sum2);
+ _mm_storeu_pd(autoc+6 ,sum3);
+}
+
+FLAC__SSE_TARGET("sse2")
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_10(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
+{
+ // This function calculates autocorrelation with SSE2
+ // vector functions up to a lag of 10 (or max LPC order of 9)
+ int i;
+ __m128d sum0, sum1, sum2, sum3, sum4;
+ __m128d d0, d1, d2, d3, d4;
+
+ (void) lag;
+ FLAC__ASSERT(lag <= 10);
+
+ // Initialize all sum vectors with zero
+ sum0 = _mm_setzero_pd();
+ sum1 = _mm_setzero_pd();
+ sum2 = _mm_setzero_pd();
+ sum3 = _mm_setzero_pd();
+ sum4 = _mm_setzero_pd();
+ d0 = _mm_setzero_pd();
+ d1 = _mm_setzero_pd();
+ d2 = _mm_setzero_pd();
+ d3 = _mm_setzero_pd();
+ d4 = _mm_setzero_pd();
+
+ // Loop backwards through samples from data_len to limit
+ for(i = data_len-1; i >= 0; i--) {
+ __m128d d = _mm_set1_pd(data[i]);
+
+ // The next lines of code work like a queue. For more
+ // information see the lag8 version of this function
+ d4 = _mm_shuffle_pd(d3, d4, _MM_SHUFFLE(0,0,0,1));
+ d3 = _mm_shuffle_pd(d2, d3, _MM_SHUFFLE(0,0,0,1));
+ d2 = _mm_shuffle_pd(d1, d2, _MM_SHUFFLE(0,0,0,1));
+ d1 = _mm_shuffle_pd(d0, d1, _MM_SHUFFLE(0,0,0,1));
+ d0 = _mm_shuffle_pd(d, d0, _MM_SHUFFLE(0,0,0,1));
+
+ // sumn += d*dn
+ sum0 = _mm_add_pd(sum0, _mm_mul_pd(d, d0));
+ sum1 = _mm_add_pd(sum1, _mm_mul_pd(d, d1));
+ sum2 = _mm_add_pd(sum2, _mm_mul_pd(d, d2));
+ sum3 = _mm_add_pd(sum3, _mm_mul_pd(d, d3));
+ sum4 = _mm_add_pd(sum4, _mm_mul_pd(d, d4));
+ }
+
+ // Store sum0..sum6 in autoc[0..14]
+ _mm_storeu_pd(autoc, sum0);
+ _mm_storeu_pd(autoc+2, sum1);
+ _mm_storeu_pd(autoc+4, sum2);
+ _mm_storeu_pd(autoc+6 ,sum3);
+ _mm_storeu_pd(autoc+8, sum4);
+}
+
+
+FLAC__SSE_TARGET("sse2")
+void FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
+{
+ // This function calculates autocorrelation with SSE2
+ // vector functions up to a lag of 14 (or max LPC order of 13)
+ int i;
+ __m128d sum0, sum1, sum2, sum3, sum4, sum5, sum6;
+ __m128d d0, d1, d2, d3, d4, d5, d6;
+
+ (void) lag;
+ FLAC__ASSERT(lag <= 14);
+
+ // Initialize all sum vectors with zero
+ sum0 = _mm_setzero_pd();
+ sum1 = _mm_setzero_pd();
+ sum2 = _mm_setzero_pd();
+ sum3 = _mm_setzero_pd();
+ sum4 = _mm_setzero_pd();
+ sum5 = _mm_setzero_pd();
+ sum6 = _mm_setzero_pd();
+ d0 = _mm_setzero_pd();
+ d1 = _mm_setzero_pd();
+ d2 = _mm_setzero_pd();
+ d3 = _mm_setzero_pd();
+ d4 = _mm_setzero_pd();
+ d5 = _mm_setzero_pd();
+ d6 = _mm_setzero_pd();
+
+ // Loop backwards through samples from data_len to limit
+ for(i = data_len-1; i >= 0; i--) {
+ __m128d d = _mm_set1_pd(data[i]);
+
+ // The next lines of code work like a queue. For more
+ // information see the lag8 version of this function
+ d6 = _mm_shuffle_pd(d5, d6, _MM_SHUFFLE(0,0,0,1));
+ d5 = _mm_shuffle_pd(d4, d5, _MM_SHUFFLE(0,0,0,1));
+ d4 = _mm_shuffle_pd(d3, d4, _MM_SHUFFLE(0,0,0,1));
+ d3 = _mm_shuffle_pd(d2, d3, _MM_SHUFFLE(0,0,0,1));
+ d2 = _mm_shuffle_pd(d1, d2, _MM_SHUFFLE(0,0,0,1));
+ d1 = _mm_shuffle_pd(d0, d1, _MM_SHUFFLE(0,0,0,1));
+ d0 = _mm_shuffle_pd(d, d0, _MM_SHUFFLE(0,0,0,1));
+
+ // sumn += d*dn
+ sum0 = _mm_add_pd(sum0, _mm_mul_pd(d, d0));
+ sum1 = _mm_add_pd(sum1, _mm_mul_pd(d, d1));
+ sum2 = _mm_add_pd(sum2, _mm_mul_pd(d, d2));
+ sum3 = _mm_add_pd(sum3, _mm_mul_pd(d, d3));
+ sum4 = _mm_add_pd(sum4, _mm_mul_pd(d, d4));
+ sum5 = _mm_add_pd(sum5, _mm_mul_pd(d, d5));
+ sum6 = _mm_add_pd(sum6, _mm_mul_pd(d, d6));
+
+ }
+
+ // Store sum0..sum6 in autoc[0..14]
+ _mm_storeu_pd(autoc, sum0);
+ _mm_storeu_pd(autoc+2, sum1);
+ _mm_storeu_pd(autoc+4, sum2);
+ _mm_storeu_pd(autoc+6 ,sum3);
+ _mm_storeu_pd(autoc+8, sum4);
+ _mm_storeu_pd(autoc+10,sum5);
+ _mm_storeu_pd(autoc+12,sum6);
+}
+
FLAC__SSE_TARGET("sse2")
void FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[])
{
diff --git a/src/libFLAC/lpc_intrin_vsx.c b/src/libFLAC/lpc_intrin_vsx.c
index 48c82182..40dfa35b 100644
--- a/src/libFLAC/lpc_intrin_vsx.c
+++ b/src/libFLAC/lpc_intrin_vsx.c
@@ -47,131 +47,129 @@
#ifdef FLAC__HAS_TARGET_POWER8
__attribute__((target("cpu=power8")))
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_16(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 14 (or max LPC order of 13)
long i;
- long limit = (long)data_len - 16;
+ long limit = (long)data_len - 14;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum2 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum3 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum12 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum13 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum22 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum23 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum32 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum33 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2, d3, d4;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum4 = { 0.0f, 0.0f};
+ vector double sum5 = { 0.0f, 0.0f};
+ vector double sum6 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector double sum14 = { 0.0f, 0.0f};
+ vector double sum15 = { 0.0f, 0.0f};
+ vector double sum16 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3, d4, d5, d6;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
- FLAC__ASSERT(lag <= 16);
- FLAC__ASSERT(lag <= data_len);
+ FLAC__ASSERT(lag <= 14);
base = data;
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
- d2 = vec_vsx_ld(32, base);
- d3 = vec_vsx_ld(48, base);
-
- base += 16;
-
- for (i = 0; i <= (limit-4); i += 4) {
- vector float d, d0_orig = d0;
-
- d4 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
- sum3 += d3 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d4, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
- sum12 += d2 * d;
- sum13 += d3 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d4, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
- sum22 += d2 * d;
- sum23 += d3 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d4, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
- sum32 += d2 * d;
- sum33 += d3 * d;
-
- d0 = d1;
- d1 = d2;
- d2 = d3;
- d3 = d4;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm1);
- sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm1);
- sum3 += vec_perm(sum13, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum22, (vector unsigned char)vperm2);
- sum2 += vec_perm(sum22, sum23, (vector unsigned char)vperm2);
- sum3 += vec_perm(sum23, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum32, (vector unsigned char)vperm3);
- sum2 += vec_perm(sum32, sum33, (vector unsigned char)vperm3);
- sum3 += vec_perm(sum33, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
- d2 = vec_vsx_ld(32, data+i);
- d3 = vec_vsx_ld(48, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
- sum3 += d3 * d;
+ // First, check whether it is possible to load
+ // 16 elements at once
+ if(limit > 2){
+ // Convert all floats to doubles
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(32, base);
+ d4 = vec_doubleh(dtemp);
+ d5 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(48, base);
+ d6 = vec_doubleh(dtemp);
+
+ base += 14;
+
+ // Loop until nearing data_len
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d7;
+
+ // Load next 2 datapoints and convert to double
+ // data[i+14] and data[i+15]
+ dtemp = vec_vsx_ld(0, base);
+ d7 = vec_doubleh(dtemp);
+ base += 2;
+
+ // Create vector d with both elements set to the first
+ // element of d0, so both elements data[i]
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d; // Multiply data[i] with data[i] and data[i+1]
+ sum1 += d1 * d; // Multiply data[i] with data[i+2] and data[i+3]
+ sum2 += d2 * d; // Multiply data[i] with data[i+4] and data[i+5]
+ sum3 += d3 * d; // Multiply data[i] with data[i+6] and data[i+7]
+ sum4 += d4 * d; // Multiply data[i] with data[i+8] and data[i+9]
+ sum5 += d5 * d; // Multiply data[i] with data[i+10] and data[i+11]
+ sum6 += d6 * d; // Multiply data[i] with data[i+12] and data[i+13]
+
+ // Set both elements of d to data[i+1]
+ d = vec_splat(d0, 1);
+
+ // Set d0 to data[i+14] and data[i+1]
+ d0 = vec_sel(d0, d7, vsel);
+ sum10 += d0 * d; // Multiply data[i+1] with data[i+14] and data[i+1]
+ sum11 += d1 * d; // Multiply data[i+1] with data[i+2] and data[i+3]
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+ sum14 += d4 * d;
+ sum15 += d5 * d;
+ sum16 += d6 * d; // Multiply data[i+1] with data[i+12] and data[i+13]
+
+ // Shift all loaded values one vector (2 elements) so the next
+ // iterations aligns again
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ d4 = d5;
+ d5 = d6;
+ d6 = d7;
+ }
+
+ // Because the values in sum10..sum16 do not align with
+ // the values in sum0..sum6, these need to be 'left-rotated'
+ // before adding them to sum0..sum6
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum14, (vector unsigned char)vperm);
+ sum4 += vec_perm(sum14, sum15, (vector unsigned char)vperm);
+ sum5 += vec_perm(sum15, sum16, (vector unsigned char)vperm);
+ sum6 += vec_perm(sum16, sum10, (vector unsigned char)vperm);
+ }else{
+ i = 0;
}
+ // Store result
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
vec_vsx_st(sum2, 32, autoc);
vec_vsx_st(sum3, 48, autoc);
+ vec_vsx_st(sum4, 64, autoc);
+ vec_vsx_st(sum5, 80, autoc);
+ vec_vsx_st(sum6, 96, autoc);
+ // Process remainder of samples in a non-VSX way
for (; i < (long)data_len; i++) {
uint32_t coeff;
@@ -182,114 +180,101 @@ void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_16(const FLAC__real
}
__attribute__((target("cpu=power8")))
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 12 (or max LPC order of 11)
+ // For explanation, please see the lag_14 version of this function
long i;
long limit = (long)data_len - 12;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum2 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum12 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum22 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum32 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2, d3;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum4 = { 0.0f, 0.0f};
+ vector double sum5 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector double sum14 = { 0.0f, 0.0f};
+ vector double sum15 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3, d4, d5;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
FLAC__ASSERT(lag <= 12);
- FLAC__ASSERT(lag <= data_len);
base = data;
-
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
- d2 = vec_vsx_ld(32, base);
-
- base += 12;
-
- for (i = 0; i <= (limit-3); i += 4) {
- vector float d, d0_orig = d0;
-
- d3 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d3, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
- sum12 += d2 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d3, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
- sum22 += d2 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d3, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
- sum32 += d2 * d;
-
- d0 = d1;
- d1 = d2;
- d2 = d3;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm1);
- sum2 += vec_perm(sum12, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum22, (vector unsigned char)vperm2);
- sum2 += vec_perm(sum22, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum32, (vector unsigned char)vperm3);
- sum2 += vec_perm(sum32, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
- d2 = vec_vsx_ld(32, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
+ if(limit > 0){
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(32, base);
+ d4 = vec_doubleh(dtemp);
+ d5 = vec_doublel(dtemp);
+
+ base += 12;
+
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d6;
+
+ dtemp = vec_vsx_ld(0, base);
+ d6 = vec_doubleh(dtemp);
+ base += 2;
+
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d;
+ sum1 += d1 * d;
+ sum2 += d2 * d;
+ sum3 += d3 * d;
+ sum4 += d4 * d;
+ sum5 += d5 * d;
+
+ d = vec_splat(d0, 1);
+ d0 = vec_sel(d0, d6, vsel);
+ sum10 += d0 * d;
+ sum11 += d1 * d;
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+ sum14 += d4 * d;
+ sum15 += d5 * d;
+
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ d4 = d5;
+ d5 = d6;
+ }
+
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum14, (vector unsigned char)vperm);
+ sum4 += vec_perm(sum14, sum15, (vector unsigned char)vperm);
+ sum5 += vec_perm(sum15, sum10, (vector unsigned char)vperm);
+ }else{
+ i = 0;
}
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
vec_vsx_st(sum2, 32, autoc);
+ vec_vsx_st(sum3, 48, autoc);
+ vec_vsx_st(sum4, 64, autoc);
+ vec_vsx_st(sum5, 80, autoc);
for (; i < (long)data_len; i++) {
uint32_t coeff;
@@ -301,185 +286,85 @@ void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12(const FLAC__real
}
__attribute__((target("cpu=power8")))
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 8 (or max LPC order of 7)
+ // For explanation, please see the lag_14 version of this function
long i;
long limit = (long)data_len - 8;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
FLAC__ASSERT(lag <= 8);
- FLAC__ASSERT(lag <= data_len);
base = data;
-
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
-
- base += 8;
-
- for (i = 0; i <= (limit-2); i += 4) {
- vector float d, d0_orig = d0;
-
- d2 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d2, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d2, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d2, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
-
- d0 = d1;
- d1 = d2;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
+ if(limit > 0){
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+
+ base += 8;
+
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d4;
+
+ dtemp = vec_vsx_ld(0, base);
+ d4 = vec_doubleh(dtemp);
+ base += 2;
+
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d;
+ sum1 += d1 * d;
+ sum2 += d2 * d;
+ sum3 += d3 * d;
+
+ d = vec_splat(d0, 1);
+ d0 = vec_sel(d0, d4, vsel);
+ sum10 += d0 * d;
+ sum11 += d1 * d;
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ }
+
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum10, (vector unsigned char)vperm);
+
+ }else{
+ i = 0;
}
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
-
- for (; i < (long)data_len; i++) {
- uint32_t coeff;
-
- FLAC__real d = data[i];
- for (coeff = 0; coeff < data_len - i; coeff++)
- autoc[coeff] += d * data[i+coeff];
- }
-}
-
-__attribute__((target("cpu=power8")))
-void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_4(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- long i;
- long limit = (long)data_len - 4;
- const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1;
-#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
-#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
-#endif
-
- (void) lag;
- FLAC__ASSERT(lag <= 4);
- FLAC__ASSERT(lag <= data_len);
-
- base = data;
-
- d0 = vec_vsx_ld(0, base);
-
- base += 4;
-
- for (i = 0; i <= (limit-1); i += 4) {
- vector float d, d0_orig = d0;
-
- d1 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d1, vsel1);
- sum10 += d0 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d1, vsel2);
- sum20 += d0 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d1, vsel3);
- sum30 += d0 * d;
-
- d0 = d1;
- }
-
- sum0 += vec_perm(sum10, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- }
-
- vec_vsx_st(sum0, 0, autoc);
+ vec_vsx_st(sum2, 32, autoc);
+ vec_vsx_st(sum3, 48, autoc);
for (; i < (long)data_len; i++) {
uint32_t coeff;
@@ -493,130 +378,110 @@ void FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_4(const FLAC__real
#ifdef FLAC__HAS_TARGET_POWER9
__attribute__((target("cpu=power9")))
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_16(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_14(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 14 (or max LPC order of 13)
+ // For explanation, please see the power8 version of this function
long i;
- long limit = (long)data_len - 16;
+ long limit = (long)data_len - 14;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum2 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum3 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum12 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum13 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum22 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum23 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum32 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum33 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2, d3, d4;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum4 = { 0.0f, 0.0f};
+ vector double sum5 = { 0.0f, 0.0f};
+ vector double sum6 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector double sum14 = { 0.0f, 0.0f};
+ vector double sum15 = { 0.0f, 0.0f};
+ vector double sum16 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3, d4, d5, d6;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
- FLAC__ASSERT(lag <= 16);
- FLAC__ASSERT(lag <= data_len);
+ FLAC__ASSERT(lag <= 14);
base = data;
-
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
- d2 = vec_vsx_ld(32, base);
- d3 = vec_vsx_ld(48, base);
-
- base += 16;
-
- for (i = 0; i <= (limit-4); i += 4) {
- vector float d, d0_orig = d0;
-
- d4 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
- sum3 += d3 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d4, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
- sum12 += d2 * d;
- sum13 += d3 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d4, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
- sum22 += d2 * d;
- sum23 += d3 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d4, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
- sum32 += d2 * d;
- sum33 += d3 * d;
-
- d0 = d1;
- d1 = d2;
- d2 = d3;
- d3 = d4;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm1);
- sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm1);
- sum3 += vec_perm(sum13, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum22, (vector unsigned char)vperm2);
- sum2 += vec_perm(sum22, sum23, (vector unsigned char)vperm2);
- sum3 += vec_perm(sum23, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum32, (vector unsigned char)vperm3);
- sum2 += vec_perm(sum32, sum33, (vector unsigned char)vperm3);
- sum3 += vec_perm(sum33, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
- d2 = vec_vsx_ld(32, data+i);
- d3 = vec_vsx_ld(48, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
- sum3 += d3 * d;
+ if(limit > 2){
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(32, base);
+ d4 = vec_doubleh(dtemp);
+ d5 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(48, base);
+ d6 = vec_doubleh(dtemp);
+
+ base += 14;
+
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d7;
+
+ dtemp = vec_vsx_ld(0, base);
+ d7 = vec_doubleh(dtemp);
+ base += 2;
+
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d;
+ sum1 += d1 * d;
+ sum2 += d2 * d;
+ sum3 += d3 * d;
+ sum4 += d4 * d;
+ sum5 += d5 * d;
+ sum6 += d6 * d;
+
+ d = vec_splat(d0, 1);
+ d0 = vec_sel(d0, d7, vsel);
+ sum10 += d0 * d;
+ sum11 += d1 * d;
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+ sum14 += d4 * d;
+ sum15 += d5 * d;
+ sum16 += d6 * d;
+
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ d4 = d5;
+ d5 = d6;
+ d6 = d7;
+ }
+
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum14, (vector unsigned char)vperm);
+ sum4 += vec_perm(sum14, sum15, (vector unsigned char)vperm);
+ sum5 += vec_perm(sum15, sum16, (vector unsigned char)vperm);
+ sum6 += vec_perm(sum16, sum10, (vector unsigned char)vperm);
+ }else{
+ i = 0;
}
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
vec_vsx_st(sum2, 32, autoc);
vec_vsx_st(sum3, 48, autoc);
+ vec_vsx_st(sum4, 64, autoc);
+ vec_vsx_st(sum5, 80, autoc);
+ vec_vsx_st(sum6, 96, autoc);
for (; i < (long)data_len; i++) {
uint32_t coeff;
@@ -628,114 +493,101 @@ void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_16(const FLAC__real
}
__attribute__((target("cpu=power9")))
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 12 (or max LPC order of 11)
+ // For explanation, please see the power9, lag_14 version of this function
long i;
long limit = (long)data_len - 12;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum2 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum12 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum22 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum32 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2, d3;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum4 = { 0.0f, 0.0f};
+ vector double sum5 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector double sum14 = { 0.0f, 0.0f};
+ vector double sum15 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3, d4, d5;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
FLAC__ASSERT(lag <= 12);
- FLAC__ASSERT(lag <= data_len);
base = data;
-
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
- d2 = vec_vsx_ld(32, base);
-
- base += 12;
-
- for (i = 0; i <= (limit-3); i += 4) {
- vector float d, d0_orig = d0;
-
- d3 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d3, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
- sum12 += d2 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d3, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
- sum22 += d2 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d3, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
- sum32 += d2 * d;
-
- d0 = d1;
- d1 = d2;
- d2 = d3;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm1);
- sum2 += vec_perm(sum12, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum22, (vector unsigned char)vperm2);
- sum2 += vec_perm(sum22, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum32, (vector unsigned char)vperm3);
- sum2 += vec_perm(sum32, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
- d2 = vec_vsx_ld(32, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
- sum2 += d2 * d;
+ if(limit > 0){
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(32, base);
+ d4 = vec_doubleh(dtemp);
+ d5 = vec_doublel(dtemp);
+
+ base += 12;
+
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d6;
+
+ dtemp = vec_vsx_ld(0, base);
+ d6 = vec_doubleh(dtemp);
+ base += 2;
+
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d;
+ sum1 += d1 * d;
+ sum2 += d2 * d;
+ sum3 += d3 * d;
+ sum4 += d4 * d;
+ sum5 += d5 * d;
+
+ d = vec_splat(d0, 1);
+ d0 = vec_sel(d0, d6, vsel);
+ sum10 += d0 * d;
+ sum11 += d1 * d;
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+ sum14 += d4 * d;
+ sum15 += d5 * d;
+
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ d4 = d5;
+ d5 = d6;
+ }
+
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum14, (vector unsigned char)vperm);
+ sum4 += vec_perm(sum14, sum15, (vector unsigned char)vperm);
+ sum5 += vec_perm(sum15, sum10, (vector unsigned char)vperm);
+ }else{
+ i = 0;
}
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
vec_vsx_st(sum2, 32, autoc);
+ vec_vsx_st(sum3, 48, autoc);
+ vec_vsx_st(sum4, 64, autoc);
+ vec_vsx_st(sum5, 80, autoc);
for (; i < (long)data_len; i++) {
uint32_t coeff;
@@ -747,185 +599,85 @@ void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12(const FLAC__real
}
__attribute__((target("cpu=power9")))
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
+void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_8(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[])
{
+ // This function calculates autocorrelation with POWERPC-specific
+ // vector functions up to a lag of 8 (or max LPC order of 7)
+ // For explanation, please see the power9, lag_14 version of this function
long i;
long limit = (long)data_len - 8;
const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum1 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum11 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum21 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum31 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1, d2;
+ vector double sum0 = { 0.0f, 0.0f};
+ vector double sum1 = { 0.0f, 0.0f};
+ vector double sum2 = { 0.0f, 0.0f};
+ vector double sum3 = { 0.0f, 0.0f};
+ vector double sum10 = { 0.0f, 0.0f};
+ vector double sum11 = { 0.0f, 0.0f};
+ vector double sum12 = { 0.0f, 0.0f};
+ vector double sum13 = { 0.0f, 0.0f};
+ vector float dtemp;
+ vector double d0, d1, d2, d3;
#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
+ vector unsigned long long vperm = { 0x08090A0B0C0D0E0F, 0x1011121314151617 };
+ vector unsigned long long vsel = { 0x0000000000000000, 0xFFFFFFFFFFFFFFFF };
#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
+ vector unsigned long long vperm = { 0x0F0E0D0C0B0A0908, 0x1716151413121110 };
+ vector unsigned long long vsel = { 0xFFFFFFFFFFFFFFFF, 0x0000000000000000 };
#endif
(void) lag;
FLAC__ASSERT(lag <= 8);
- FLAC__ASSERT(lag <= data_len);
base = data;
-
- d0 = vec_vsx_ld(0, base);
- d1 = vec_vsx_ld(16, base);
-
- base += 8;
-
- for (i = 0; i <= (limit-2); i += 4) {
- vector float d, d0_orig = d0;
-
- d2 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d2, vsel1);
- sum10 += d0 * d;
- sum11 += d1 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d2, vsel2);
- sum20 += d0 * d;
- sum21 += d1 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d2, vsel3);
- sum30 += d0 * d;
- sum31 += d1 * d;
-
- d0 = d1;
- d1 = d2;
- }
-
- sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm1);
- sum1 += vec_perm(sum11, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum21, (vector unsigned char)vperm2);
- sum1 += vec_perm(sum21, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum31, (vector unsigned char)vperm3);
- sum1 += vec_perm(sum31, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
- d1 = vec_vsx_ld(16, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- sum1 += d1 * d;
+ if(limit > 0){
+ dtemp = vec_vsx_ld(0, base);
+ d0 = vec_doubleh(dtemp);
+ d1 = vec_doublel(dtemp);
+ dtemp = vec_vsx_ld(16, base);
+ d2 = vec_doubleh(dtemp);
+ d3 = vec_doublel(dtemp);
+
+ base += 8;
+
+ for (i = 0; i <= (limit-2); i += 2) {
+ vector double d, d4;
+
+ dtemp = vec_vsx_ld(0, base);
+ d4 = vec_doubleh(dtemp);
+ base += 2;
+
+ d = vec_splat(d0, 0);
+ sum0 += d0 * d;
+ sum1 += d1 * d;
+ sum2 += d2 * d;
+ sum3 += d3 * d;
+
+ d = vec_splat(d0, 1);
+ d0 = vec_sel(d0, d4, vsel);
+ sum10 += d0 * d;
+ sum11 += d1 * d;
+ sum12 += d2 * d;
+ sum13 += d3 * d;
+
+ d0 = d1;
+ d1 = d2;
+ d2 = d3;
+ d3 = d4;
+ }
+
+ sum0 += vec_perm(sum10, sum11, (vector unsigned char)vperm);
+ sum1 += vec_perm(sum11, sum12, (vector unsigned char)vperm);
+ sum2 += vec_perm(sum12, sum13, (vector unsigned char)vperm);
+ sum3 += vec_perm(sum13, sum10, (vector unsigned char)vperm);
+
+ }else{
+ i = 0;
}
vec_vsx_st(sum0, 0, autoc);
vec_vsx_st(sum1, 16, autoc);
-
- for (; i < (long)data_len; i++) {
- uint32_t coeff;
-
- FLAC__real d = data[i];
- for (coeff = 0; coeff < data_len - i; coeff++)
- autoc[coeff] += d * data[i+coeff];
- }
-}
-
-__attribute__((target("cpu=power9")))
-void FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_4(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[])
-{
- long i;
- long limit = (long)data_len - 4;
- const FLAC__real *base;
- vector float sum0 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum10 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum20 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float sum30 = { 0.0f, 0.0f, 0.0f, 0.0f};
- vector float d0, d1;
-#if WORDS_BIGENDIAN
- vector unsigned int vsel1 = { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF };
- vector unsigned int vsel2 = { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vsel3 = { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- vector unsigned int vperm1 = { 0x04050607, 0x08090A0B, 0x0C0D0E0F, 0x10111213 };
- vector unsigned int vperm2 = { 0x08090A0B, 0x0C0D0E0F, 0x10111213, 0x14151617 };
- vector unsigned int vperm3 = { 0x0C0D0E0F, 0x10111213, 0x14151617, 0x18191A1B };
-#else
- vector unsigned int vsel1 = { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 };
- vector unsigned int vsel2 = { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 };
- vector unsigned int vsel3 = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 };
- vector unsigned int vperm1 = { 0x07060504, 0x0B0A0908, 0x0F0E0D0C, 0x13121110 };
- vector unsigned int vperm2 = { 0x0B0A0908, 0x0F0E0D0C, 0x13121110, 0x17161514 };
- vector unsigned int vperm3 = { 0x0F0E0D0C, 0x13121110, 0x17161514, 0x1B1A1918 };
-#endif
-
- (void) lag;
- FLAC__ASSERT(lag <= 4);
- FLAC__ASSERT(lag <= data_len);
-
- base = data;
-
- d0 = vec_vsx_ld(0, base);
-
- base += 4;
-
- for (i = 0; i <= (limit-1); i += 4) {
- vector float d, d0_orig = d0;
-
- d1 = vec_vsx_ld(0, base);
- base += 4;
-
- d = vec_splat(d0_orig, 0);
- sum0 += d0 * d;
-
- d = vec_splat(d0_orig, 1);
- d0 = vec_sel(d0_orig, d1, vsel1);
- sum10 += d0 * d;
-
- d = vec_splat(d0_orig, 2);
- d0 = vec_sel(d0_orig, d1, vsel2);
- sum20 += d0 * d;
-
- d = vec_splat(d0_orig, 3);
- d0 = vec_sel(d0_orig, d1, vsel3);
- sum30 += d0 * d;
-
- d0 = d1;
- }
-
- sum0 += vec_perm(sum10, sum10, (vector unsigned char)vperm1);
-
- sum0 += vec_perm(sum20, sum20, (vector unsigned char)vperm2);
-
- sum0 += vec_perm(sum30, sum30, (vector unsigned char)vperm3);
-
- for (; i <= limit; i++) {
- vector float d;
-
- d0 = vec_vsx_ld(0, data+i);
-
- d = vec_splat(d0, 0);
- sum0 += d0 * d;
- }
-
- vec_vsx_st(sum0, 0, autoc);
+ vec_vsx_st(sum2, 32, autoc);
+ vec_vsx_st(sum3, 48, autoc);
for (; i < (long)data_len; i++) {
uint32_t coeff;
diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c
index 8152e33e..0ac8cd60 100644
--- a/src/libFLAC/stream_encoder.c
+++ b/src/libFLAC/stream_encoder.c
@@ -356,7 +356,7 @@ typedef struct FLAC__StreamEncoderPrivate {
uint32_t (*local_fixed_compute_best_predictor_wide)(const FLAC__int32 data[], uint32_t data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]);
#endif
#ifndef FLAC__INTEGER_ONLY_LIBRARY
- void (*local_lpc_compute_autocorrelation)(const FLAC__real data[], uint32_t data_len, uint32_t lag, FLAC__real autoc[]);
+ void (*local_lpc_compute_autocorrelation)(const FLAC__real data[], uint32_t data_len, uint32_t lag, double autoc[]);
void (*local_lpc_compute_residual_from_qlp_coefficients)(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
void (*local_lpc_compute_residual_from_qlp_coefficients_64bit)(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
void (*local_lpc_compute_residual_from_qlp_coefficients_16bit)(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[]);
@@ -885,27 +885,23 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_(
#ifdef FLAC__HAS_TARGET_POWER8
#ifdef FLAC__HAS_TARGET_POWER9
if (encoder->private_->cpuinfo.ppc.arch_3_00) {
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_4;
- else if(encoder->protected_->max_lpc_order < 8)
+ if(encoder->protected_->max_lpc_order < 8)
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_8;
else if(encoder->protected_->max_lpc_order < 12)
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_12;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_16;
+ else if(encoder->protected_->max_lpc_order < 14)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power9_vsx_lag_14;
else
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation;
} else
#endif
if (encoder->private_->cpuinfo.ppc.arch_2_07) {
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_4;
- else if(encoder->protected_->max_lpc_order < 8)
+ if(encoder->protected_->max_lpc_order < 8)
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_8;
else if(encoder->protected_->max_lpc_order < 12)
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_12;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_16;
+ else if(encoder->protected_->max_lpc_order < 14)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_power8_vsx_lag_14;
else
encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation;
}
@@ -915,21 +911,6 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_(
# ifdef FLAC__CPU_IA32
FLAC__ASSERT(encoder->private_->cpuinfo.type == FLAC__CPUINFO_TYPE_IA32);
# ifdef FLAC__HAS_NASM
- if (encoder->private_->cpuinfo.x86.sse) {
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_4_old;
- else if(encoder->protected_->max_lpc_order < 8)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_8_old;
- else if(encoder->protected_->max_lpc_order < 12)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_12_old;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32_sse_lag_16_old;
- else
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32;
- }
- else
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_asm_ia32;
-
encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_64bit = FLAC__lpc_compute_residual_from_qlp_coefficients_wide_asm_ia32; /* OPT_IA32: was really necessary for GCC < 4.9 */
if (encoder->private_->cpuinfo.x86.mmx) {
encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients_asm_ia32;
@@ -944,37 +925,15 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_(
encoder->private_->local_fixed_compute_best_predictor = FLAC__fixed_compute_best_predictor_asm_ia32_mmx_cmov;
# endif /* FLAC__HAS_NASM */
# if FLAC__HAS_X86INTRIN
-# if defined FLAC__SSE_SUPPORTED
- if (encoder->private_->cpuinfo.x86.sse) {
- if (encoder->private_->cpuinfo.x86.sse42 || !encoder->private_->cpuinfo.x86.intel) { /* use new autocorrelation functions */
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new;
- else if(encoder->protected_->max_lpc_order < 8)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new;
- else if(encoder->protected_->max_lpc_order < 12)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_new;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_new;
- else
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation;
- }
- else { /* use old autocorrelation functions */
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_old;
- else if(encoder->protected_->max_lpc_order < 8)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_old;
- else if(encoder->protected_->max_lpc_order < 12)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_old;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_old;
- else
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation;
- }
- }
-# endif
-
# ifdef FLAC__SSE2_SUPPORTED
if (encoder->private_->cpuinfo.x86.sse2) {
+ if(encoder->protected_->max_lpc_order < 8)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_8;
+ else if(encoder->protected_->max_lpc_order < 10)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_10;
+ else if(encoder->protected_->max_lpc_order < 14)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_14;
+
encoder->private_->local_lpc_compute_residual_from_qlp_coefficients = FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2;
encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_16bit = FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2;
}
@@ -1009,30 +968,14 @@ static FLAC__StreamEncoderInitStatus init_stream_internal_(
# elif defined FLAC__CPU_X86_64
FLAC__ASSERT(encoder->private_->cpuinfo.type == FLAC__CPUINFO_TYPE_X86_64);
# if FLAC__HAS_X86INTRIN
-# ifdef FLAC__SSE_SUPPORTED
- if(encoder->private_->cpuinfo.x86.sse42 || !encoder->private_->cpuinfo.x86.intel) { /* use new autocorrelation functions */
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_new;
- else if(encoder->protected_->max_lpc_order < 8)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_new;
- else if(encoder->protected_->max_lpc_order < 12)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_new;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_new;
- }
- else {
- if(encoder->protected_->max_lpc_order < 4)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_4_old;
- else if(encoder->protected_->max_lpc_order < 8)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_8_old;
- else if(encoder->protected_->max_lpc_order < 12)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_12_old;
- else if(encoder->protected_->max_lpc_order < 16)
- encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse_lag_16_old;
- }
-# endif
-
# ifdef FLAC__SSE2_SUPPORTED
+ if(encoder->protected_->max_lpc_order < 8)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_8;
+ else if(encoder->protected_->max_lpc_order < 10)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_10;
+ else if(encoder->protected_->max_lpc_order < 14)
+ encoder->private_->local_lpc_compute_autocorrelation = FLAC__lpc_compute_autocorrelation_intrin_sse2_lag_14;
+
encoder->private_->local_lpc_compute_residual_from_qlp_coefficients_16bit = FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2;
# endif
# ifdef FLAC__SSE4_1_SUPPORTED
@@ -3427,7 +3370,7 @@ FLAC__bool process_subframe_(
#endif
#ifndef FLAC__INTEGER_ONLY_LIBRARY
double lpc_residual_bits_per_sample;
- FLAC__real autoc[FLAC__MAX_LPC_ORDER+1]; /* WATCHOUT: the size is important even though encoder->protected_->max_lpc_order might be less; some asm and x86 intrinsic routines need all the space */
+ double autoc[FLAC__MAX_LPC_ORDER+1]; /* WATCHOUT: the size is important even though encoder->protected_->max_lpc_order might be less; some asm and x86 intrinsic routines need all the space */
double lpc_error[FLAC__MAX_LPC_ORDER];
uint32_t min_lpc_order, max_lpc_order, lpc_order;
uint32_t min_qlp_coeff_precision, max_qlp_coeff_precision, qlp_coeff_precision;