summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRhishikesh Agashe <Rhishikesh.Agashe@imgtec.com>2013-11-29 15:47:58 -0500
committerJean-Marc Valin <jmvalin@jmvalin.ca>2013-11-29 15:47:58 -0500
commitaa0375cd65736b5e83cbddcf1f9e2ff23308ba79 (patch)
tree8bc81d7a1af3524923128a6cbe6a9aa29ed1f42d
parent7a0b68233f61b48e1980ca3aa1268f91955806e8 (diff)
downloadopus-aa0375cd65736b5e83cbddcf1f9e2ff23308ba79.tar.gz
Simulates the non-bitexact changes in exp_mips_opt (not for merging)
Signed-off-by: Jean-Marc Valin <jmvalin@jmvalin.ca>
-rw-r--r--celt/celt.c57
-rw-r--r--celt/fixed_generic.h66
-rw-r--r--celt/kiss_fft.c123
-rw-r--r--celt/mdct.c28
-rw-r--r--celt/pitch.h123
-rw-r--r--celt/vq.c36
-rw-r--r--silk/NSQ_del_dec.c112
-rw-r--r--silk/fixed/warped_autocorrelation_FIX.c101
-rw-r--r--silk/macros.h39
9 files changed, 538 insertions, 147 deletions
diff --git a/celt/celt.c b/celt/celt.c
index 3e0ce6e6..0597f0a2 100644
--- a/celt/celt.c
+++ b/celt/celt.c
@@ -144,22 +144,38 @@ void comb_filter(opus_val32 *y, opus_val32 *x, int T0, int T1, int N,
for (i=0;i<overlap;i++)
{
opus_val16 f;
- x0=x[i-T1+2];
+ opus_val32 res;
f = MULT16_16_Q15(window[i],window[i]);
- y[i] = x[i]
- + MULT16_32_Q15(MULT16_16_Q15((Q15ONE-f),g00),x[i-T0])
- + MULT16_32_Q15(MULT16_16_Q15((Q15ONE-f),g01),ADD32(x[i-T0+1],x[i-T0-1]))
- + MULT16_32_Q15(MULT16_16_Q15((Q15ONE-f),g02),ADD32(x[i-T0+2],x[i-T0-2]))
- + MULT16_32_Q15(MULT16_16_Q15(f,g10),x2)
- + MULT16_32_Q15(MULT16_16_Q15(f,g11),ADD32(x1,x3))
- + MULT16_32_Q15(MULT16_16_Q15(f,g12),ADD32(x0,x4));
+ x0= x[i-T1+2];
+
+ {
+ long long ac1 = 0;
+ ac1 = ((long long)MULT16_16_Q15((Q15ONE-f),g00)) * ((long long )x[i-T0]);
+ ac1 += ( ((long long)MULT16_16_Q15((Q15ONE-f),g01)) * ((long long)ADD32(x[i-T0-1],x[i-T0+1])) );
+ ac1 += ( ((long long)MULT16_16_Q15((Q15ONE-f),g02)) * ((long long)ADD32(x[i-T0-2],x[i-T0+2])) );
+ ac1 += ( ((long long)MULT16_16_Q15(f,g10)) * ((long long)x2) );
+ ac1 += ( ((long long)MULT16_16_Q15(f,g11)) * ((long long)ADD32(x3,x1)) );
+ ac1 += ( ((long long)MULT16_16_Q15(f,g12)) * ((long long)ADD32(x4,x0)) );
+
+ ac1 = ac1 >> 15;
+ res = ac1;
+ }
+
+ y[i] = x[i] + res;
+
x4=x3;
x3=x2;
x2=x1;
x1=x0;
-
}
- if (g1==0)
+
+
+ x4 = x[i-T1-2];
+ x3 = x[i-T1-1];
+ x2 = x[i-T1];
+ x1 = x[i-T1+1];
+
+ if (g1==0)
{
/* OPT: Happens to work without the OPUS_MOVE(), but only because the current encoder already copies x to y */
if (x!=y)
@@ -167,8 +183,25 @@ void comb_filter(opus_val32 *y, opus_val32 *x, int T0, int T1, int N,
return;
}
- /* Compute the part with the constant filter. */
- comb_filter_const(y+i, x+i, T1, N-i, g10, g11, g12);
+ for (i=overlap;i<N;i++) {
+
+ opus_val32 res;
+ x0=x[i-T1+2];
+ {
+ long long ac1 = 0;
+ ac1 = ( ((long long)g10) * ((long long)x2) );
+ ac1 += ( ((long long)g11) * ((long long)ADD32(x3,x1)) );
+ ac1 += ( ((long long)g12) * ((long long)ADD32(x4,x0)));
+ ac1 = ac1 >> 15;
+ res = ac1;
+ }
+
+ y[i] = x[i] + res;
+ x4=x3;
+ x3=x2;
+ x2=x1;
+ x1=x0;
+ }
}
const signed char tf_select_table[4][8] = {
diff --git a/celt/fixed_generic.h b/celt/fixed_generic.h
index 657e67c8..e8b90f49 100644
--- a/celt/fixed_generic.h
+++ b/celt/fixed_generic.h
@@ -33,20 +33,68 @@
#ifndef FIXED_GENERIC_H
#define FIXED_GENERIC_H
+static inline int MULT16_16_Q15_ADD(int a, int b, int c, int d) {
+ int m;
+ long long ac1 = ((long long)a * (long long)b);
+ long long ac2 = ((long long)c * (long long)d);
+ ac1 += ac2;
+ ac1 = ac1>>15;
+ m = (int )(ac1);
+ return m;
+}
+
+static inline int MULT16_16_Q15_SUB(int a, int b, int c, int d) {
+ int m;
+ long long ac1 = ((long long)a * (long long)b);
+ long long ac2 = ((long long)c * (long long)d);
+ ac1 -= ac2;
+ ac1 = ac1>>15;
+ m = (int )(ac1);
+ return m;
+}
+
/** Multiply a 16-bit signed value by a 16-bit unsigned value. The result is a 32-bit signed value */
#define MULT16_16SU(a,b) ((opus_val32)(opus_val16)(a)*(opus_val32)(opus_uint16)(b))
/** 16x32 multiplication, followed by a 16-bit shift right. Results fits in 32 bits */
-#define MULT16_32_Q16(a,b) ADD32(MULT16_16((a),SHR((b),16)), SHR(MULT16_16SU((a),((b)&0x0000ffff)),16))
+static inline int MULT16_32_Q16(int a, int b)
+{
+ int c;
+ long long ac1 = ((long long)a * (long long)b);
+ ac1 = ac1>>16;
+ c =(int)(ac1);
+ return c;
+}
/** 16x32 multiplication, followed by a 16-bit shift right (round-to-nearest). Results fits in 32 bits */
-#define MULT16_32_P16(a,b) ADD32(MULT16_16((a),SHR((b),16)), PSHR(MULT16_16((a),((b)&0x0000ffff)),16))
+static inline int MULT16_32_P16(int a, int b)
+{
+ int c;
+ long long ac1 = ((long long)a * (long long)b);
+ ac1 = ac1>>16;
+ c =(int)(ac1);
+ return c;
+}
/** 16x32 multiplication, followed by a 15-bit shift right. Results fits in 32 bits */
-#define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000ffff)),15))
+static inline int MULT16_32_Q15(int a, int b)
+{
+ int c;
+ long long ac1 = ((long long)a * (long long)b);
+ ac1 = ac1>>15;
+ c =(int)(ac1);
+ return c;
+}
/** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */
-#define MULT32_32_Q31(a,b) ADD32(ADD32(SHL(MULT16_16(SHR((a),16),SHR((b),16)),1), SHR(MULT16_16SU(SHR((a),16),((b)&0x0000ffff)),15)), SHR(MULT16_16SU(SHR((b),16),((a)&0x0000ffff)),15))
+static inline int MULT32_32_Q31(int a, int b)
+{
+ int c;
+ long long ac1 = ((long long)a * (long long)b);
+ ac1 = ac1>>31;
+ c =(int)(ac1);
+ return c;
+}
/** Compile-time conversion of float constant to 16-bit value */
#define QCONST16(x,bits) ((opus_val16)(.5+(x)*(((opus_val32)1)<<(bits))))
@@ -123,7 +171,15 @@
#define MULT16_16_P13(a,b) (SHR(ADD32(4096,MULT16_16((a),(b))),13))
#define MULT16_16_P14(a,b) (SHR(ADD32(8192,MULT16_16((a),(b))),14))
-#define MULT16_16_P15(a,b) (SHR(ADD32(16384,MULT16_16((a),(b))),15))
+static inline int MULT16_16_P15(int a, int b)
+{
+ int r;
+ int ac1 = a*b;
+ ac1 += 16384;
+ ac1 = ac1 >> 15;
+ r = (int)(ac1);
+ return r;
+}
/** Divide a 32-bit value by a 16-bit value. Result fits in 16 bits */
#define DIV32_16(a,b) ((opus_val16)(((opus_val32)(a))/((opus_val16)(b))))
diff --git a/celt/kiss_fft.c b/celt/kiss_fft.c
index ad706c73..b0227069 100644
--- a/celt/kiss_fft.c
+++ b/celt/kiss_fft.c
@@ -41,6 +41,95 @@
#include "mathops.h"
#include "stack_alloc.h"
+#ifdef FIXED_POINT
+
+#define S_MUL_ADD(a, b, c, d) (S_MUL(a,b)+S_MUL(c,d))
+#define S_MUL_SUB(a, b, c, d) (S_MUL(a,b)-S_MUL(c,d))
+
+#undef S_MUL_ADD
+static inline int S_MUL_ADD(int a, int b, int c, int d) {
+ int m;
+ long long ac1 = ((long long)a * (long long)b);
+ long long ac2 = ((long long)c * (long long)d);
+ ac1 += ac2;
+ ac1 = ac1>>15;
+ m = (int )(ac1);
+ return m;
+}
+
+
+#undef S_MUL_SUB
+static inline int S_MUL_SUB(int a, int b, int c, int d) {
+ int m;
+ long long ac1 = ((long long)a * (long long)b);
+ long long ac2 = ((long long)c * (long long)d);
+ ac1 -= ac2;
+ ac1 = ac1>>15;
+ m = (int )(ac1);
+ return m;
+}
+
+
+#undef C_MUL
+# define C_MUL(m,a,b) (m=C_MUL_fun(a,b))
+static inline kiss_fft_cpx C_MUL_fun(kiss_fft_cpx a, kiss_twiddle_cpx b) {
+ kiss_fft_cpx m;
+ long long ac1 = ((long long)a.r * (long long)b.r);
+ long long ac2 = ((long long)a.i * (long long)b.i);
+ ac1 = ac1 - ac2;
+ ac1 = ac1 >> 15;
+ m.r = ac1;
+
+ ac1 = ((long long)a.r * (long long)b.i);
+ ac2 = ((long long)a.i * (long long)b.r);
+ ac1 = ac1 + ac2;
+ ac1 = ac1 >> 15;
+ m.i = ac1;
+
+ return m;
+}
+
+
+#undef C_MUL4
+# define C_MUL4(m,a,b) (m=C_MUL4_fun(a,b))
+static inline kiss_fft_cpx C_MUL4_fun(kiss_fft_cpx a, kiss_twiddle_cpx b) {
+ kiss_fft_cpx m;
+ long long ac1 = ((long long)a.r * (long long)b.r);
+ long long ac2 = ((long long)a.i * (long long)b.i);
+ ac1 = ac1 - ac2;
+ ac1 = ac1 >> 17;
+ m.r = ac1;
+
+ ac1 = ((long long)a.r * (long long)b.i);
+ ac2 = ((long long)a.i * (long long)b.r);
+ ac1 = ac1 + ac2;
+ ac1 = ac1 >> 17;
+ m.i = ac1;
+
+ return m;
+}
+
+
+#undef C_MULC
+# define C_MULC(m,a,b) (m=C_MULC_fun(a,b))
+static inline kiss_fft_cpx C_MULC_fun(kiss_fft_cpx a, kiss_twiddle_cpx b) {
+ kiss_fft_cpx m;
+
+ long long ac1 = ((long long)a.r * (long long)b.r);
+ long long ac2 = ((long long)a.i * (long long)b.i);
+ ac1 = ac1 + ac2;
+ ac1 = ac1 >> 15;
+ m.r = ac1;
+
+ ac1 = ((long long)a.i * (long long)b.r);
+ ac2 = ((long long)a.r * (long long)b.i);
+ ac1 = ac1 - ac2;
+ ac1 = ac1 >> 15;
+ m.i = ac1;
+
+ return m;
+}
+#endif /* FIXED_POINT */
/* The guts header contains all the multiplication and addition macros that are defined for
complex numbers. It also delares the kf_ internal functions.
*/
@@ -352,19 +441,20 @@ static void kf_bfly5(
Fout0->r += scratch[7].r + scratch[8].r;
Fout0->i += scratch[7].i + scratch[8].i;
- scratch[5].r = scratch[0].r + S_MUL(scratch[7].r,ya.r) + S_MUL(scratch[8].r,yb.r);
- scratch[5].i = scratch[0].i + S_MUL(scratch[7].i,ya.r) + S_MUL(scratch[8].i,yb.r);
+ scratch[5].r = scratch[0].r + S_MUL_ADD(scratch[7].r,ya.r,scratch[8].r,yb.r);
+ scratch[5].i = scratch[0].i + S_MUL_ADD(scratch[7].i,ya.r,scratch[8].i,yb.r);
- scratch[6].r = S_MUL(scratch[10].i,ya.i) + S_MUL(scratch[9].i,yb.i);
- scratch[6].i = -S_MUL(scratch[10].r,ya.i) - S_MUL(scratch[9].r,yb.i);
+ scratch[6].r = S_MUL_ADD(scratch[10].i,ya.i,scratch[9].i,yb.i);
+ scratch[6].i = -S_MUL_ADD(scratch[10].r,ya.i,scratch[9].r,yb.i);
C_SUB(*Fout1,scratch[5],scratch[6]);
C_ADD(*Fout4,scratch[5],scratch[6]);
- scratch[11].r = scratch[0].r + S_MUL(scratch[7].r,yb.r) + S_MUL(scratch[8].r,ya.r);
- scratch[11].i = scratch[0].i + S_MUL(scratch[7].i,yb.r) + S_MUL(scratch[8].i,ya.r);
- scratch[12].r = - S_MUL(scratch[10].i,yb.i) + S_MUL(scratch[9].i,ya.i);
- scratch[12].i = S_MUL(scratch[10].r,yb.i) - S_MUL(scratch[9].r,ya.i);
+ scratch[11].r = scratch[0].r + S_MUL_ADD(scratch[7].r,yb.r,scratch[8].r,ya.r);
+ scratch[11].i = scratch[0].i + S_MUL_ADD(scratch[7].i,yb.r,scratch[8].i,ya.r);
+
+ scratch[12].r = S_MUL_SUB(scratch[9].i,ya.i,scratch[10].i,yb.i);
+ scratch[12].i = S_MUL_SUB(scratch[10].r,yb.i,scratch[9].r,ya.i);
C_ADD(*Fout2,scratch[11],scratch[12]);
C_SUB(*Fout3,scratch[11],scratch[12]);
@@ -420,19 +510,20 @@ static void ki_bfly5(
Fout0->r += scratch[7].r + scratch[8].r;
Fout0->i += scratch[7].i + scratch[8].i;
- scratch[5].r = scratch[0].r + S_MUL(scratch[7].r,ya.r) + S_MUL(scratch[8].r,yb.r);
- scratch[5].i = scratch[0].i + S_MUL(scratch[7].i,ya.r) + S_MUL(scratch[8].i,yb.r);
+ scratch[5].r = scratch[0].r + S_MUL_ADD(scratch[7].r,ya.r,scratch[8].r,yb.r);
+ scratch[5].i = scratch[0].i + S_MUL_ADD(scratch[7].i,ya.r,scratch[8].i,yb.r);
- scratch[6].r = -S_MUL(scratch[10].i,ya.i) - S_MUL(scratch[9].i,yb.i);
- scratch[6].i = S_MUL(scratch[10].r,ya.i) + S_MUL(scratch[9].r,yb.i);
+ scratch[6].r = -S_MUL_ADD(scratch[10].i,ya.i,scratch[9].i,yb.i);
+ scratch[6].i = S_MUL_ADD(scratch[10].r,ya.i,scratch[9].r,yb.i);
C_SUB(*Fout1,scratch[5],scratch[6]);
C_ADD(*Fout4,scratch[5],scratch[6]);
- scratch[11].r = scratch[0].r + S_MUL(scratch[7].r,yb.r) + S_MUL(scratch[8].r,ya.r);
- scratch[11].i = scratch[0].i + S_MUL(scratch[7].i,yb.r) + S_MUL(scratch[8].i,ya.r);
- scratch[12].r = S_MUL(scratch[10].i,yb.i) - S_MUL(scratch[9].i,ya.i);
- scratch[12].i = -S_MUL(scratch[10].r,yb.i) + S_MUL(scratch[9].r,ya.i);
+ scratch[11].r = scratch[0].r + S_MUL_ADD(scratch[7].r,yb.r,scratch[8].r,ya.r);
+ scratch[11].i = scratch[0].i + S_MUL_ADD(scratch[7].i,yb.r,scratch[8].i,ya.r);
+
+ scratch[12].r = S_MUL_SUB(scratch[10].i,yb.i,scratch[9].i,ya.i);
+ scratch[12].i = S_MUL_SUB(scratch[9].r,ya.i,scratch[10].r,yb.i);
C_ADD(*Fout2,scratch[11],scratch[12]);
C_SUB(*Fout3,scratch[11],scratch[12]);
diff --git a/celt/mdct.c b/celt/mdct.c
index 90a214ad..cce474a8 100644
--- a/celt/mdct.c
+++ b/celt/mdct.c
@@ -136,8 +136,8 @@ void clt_mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar
for(i=0;i<((overlap+3)>>2);i++)
{
/* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
- *yp++ = MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2);
- *yp++ = MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]);
+ *yp++ = S_MUL_ADD(*wp2, xp1[N2],*wp1,*xp2);
+ *yp++ = S_MUL_SUB(*wp1, *xp1,*wp2, xp2[-N2]);
xp1+=2;
xp2-=2;
wp1+=2;
@@ -156,8 +156,8 @@ void clt_mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar
for(;i<N4;i++)
{
/* Real part arranged as a-bR, Imag part arranged as -c-dR */
- *yp++ = -MULT16_32_Q15(*wp1, xp1[-N2]) + MULT16_32_Q15(*wp2, *xp2);
- *yp++ = MULT16_32_Q15(*wp2, *xp1) + MULT16_32_Q15(*wp1, xp2[N2]);
+ *yp++ = S_MUL_SUB(*wp2, *xp2, *wp1, xp1[-N2]);
+ *yp++ = S_MUL_ADD(*wp2, *xp1, *wp1, xp2[N2]);
xp1+=2;
xp2-=2;
wp1+=2;
@@ -173,8 +173,8 @@ void clt_mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar
kiss_fft_scalar re, im, yr, yi;
re = yp[0];
im = yp[1];
- yr = -S_MUL(re,t[i<<shift]) - S_MUL(im,t[(N4-i)<<shift]);
- yi = -S_MUL(im,t[i<<shift]) + S_MUL(re,t[(N4-i)<<shift]);
+ yr = -S_MUL_ADD(re,t[i<<shift],im,t[(N4-i)<<shift]);
+ yi = S_MUL_SUB(re,t[(N4-i)<<shift],im,t[i<<shift]);
/* works because the cos is nearly one */
*yp++ = yr + S_MUL(yi,sine);
*yp++ = yi - S_MUL(yr,sine);
@@ -195,8 +195,8 @@ void clt_mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar
for(i=0;i<N4;i++)
{
kiss_fft_scalar yr, yi;
- yr = S_MUL(fp[1],t[(N4-i)<<shift]) + S_MUL(fp[0],t[i<<shift]);
- yi = S_MUL(fp[0],t[(N4-i)<<shift]) - S_MUL(fp[1],t[i<<shift]);
+ yr = S_MUL_ADD(fp[1],t[(N4-i)<<shift],fp[0],t[i<<shift]);
+ yi = S_MUL_SUB(fp[0],t[(N4-i)<<shift],fp[1],t[i<<shift]);
/* works because the cos is nearly one */
*yp1 = yr - S_MUL(yi,sine);
*yp2 = yi + S_MUL(yr,sine);;
@@ -238,8 +238,8 @@ void clt_mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scala
for(i=0;i<N4;i++)
{
kiss_fft_scalar yr, yi;
- yr = -S_MUL(*xp2, t[i<<shift]) + S_MUL(*xp1,t[(N4-i)<<shift]);
- yi = -S_MUL(*xp2, t[(N4-i)<<shift]) - S_MUL(*xp1,t[i<<shift]);
+ yr = S_MUL_SUB(*xp1,t[(N4-i)<<shift],*xp2, t[i<<shift]);
+ yi = -S_MUL_ADD(*xp2, t[(N4-i)<<shift],*xp1,t[i<<shift]);
/* works because the cos is nearly one */
*yp++ = yr - S_MUL(yi,sine);
*yp++ = yi + S_MUL(yr,sine);
@@ -268,8 +268,8 @@ void clt_mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scala
t0 = t[i<<shift];
t1 = t[(N4-i)<<shift];
/* We'd scale up by 2 here, but instead it's done when mixing the windows */
- yr = S_MUL(re,t0) - S_MUL(im,t1);
- yi = S_MUL(im,t0) + S_MUL(re,t1);
+ yr = S_MUL_SUB(re,t0,im,t1);
+ yi = S_MUL_ADD(im,t0,re,t1);
re = yp1[0];
im = yp1[1];
/* works because the cos is nearly one */
@@ -279,8 +279,8 @@ void clt_mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scala
t0 = t[(N4-i-1)<<shift];
t1 = t[(i+1)<<shift];
/* We'd scale up by 2 here, but instead it's done when mixing the windows */
- yr = S_MUL(re,t0) - S_MUL(im,t1);
- yi = S_MUL(im,t0) + S_MUL(re,t1);
+ yr = S_MUL_SUB(re,t0,im,t1);
+ yi = S_MUL_ADD(im,t0,re,t1);
/* works because the cos is nearly one */
yp1[0] = -(yr - S_MUL(yi,sine));
yp0[1] = yi + S_MUL(yr,sine);
diff --git a/celt/pitch.h b/celt/pitch.h
index a1074ef2..86bf8a58 100644
--- a/celt/pitch.h
+++ b/celt/pitch.h
@@ -61,65 +61,89 @@ static OPUS_INLINE void xcorr_kernel(const opus_val16 * x, const opus_val16 * y,
{
int j;
opus_val16 y_0, y_1, y_2, y_3;
- y_3=0; /* gcc doesn't realize that y_3 can't be used uninitialized */
- y_0=*y++;
- y_1=*y++;
- y_2=*y++;
- for (j=0;j<len-3;j+=4)
- {
- opus_val16 tmp;
- tmp = *x++;
- y_3=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_0);
- sum[1] = MAC16_16(sum[1],tmp,y_1);
- sum[2] = MAC16_16(sum[2],tmp,y_2);
- sum[3] = MAC16_16(sum[3],tmp,y_3);
- tmp=*x++;
- y_0=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_1);
- sum[1] = MAC16_16(sum[1],tmp,y_2);
- sum[2] = MAC16_16(sum[2],tmp,y_3);
- sum[3] = MAC16_16(sum[3],tmp,y_0);
- tmp=*x++;
- y_1=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_2);
- sum[1] = MAC16_16(sum[1],tmp,y_3);
- sum[2] = MAC16_16(sum[2],tmp,y_0);
- sum[3] = MAC16_16(sum[3],tmp,y_1);
+
+ opus_int64 sum_0, sum_1, sum_2, sum_3;
+ sum_0 = (opus_int64)sum[0];
+ sum_1 = (opus_int64)sum[1];
+ sum_2 = (opus_int64)sum[2];
+ sum_3 = (opus_int64)sum[3];
+
+ y_3=0; /* gcc doesn't realize that y_3 can't be used uninitialized */
+ y_0=*y++;
+ y_1=*y++;
+ y_2=*y++;
+ for (j=0;j<len-3;j+=4)
+ {
+ opus_val16 tmp;
+ tmp = *x++;
+ y_3=*y++;
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_0) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_1) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_3) );
+
+ tmp=*x++;
+ y_0=*y++;
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_1) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_3) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_0) );
+
+ tmp=*x++;
+ y_1=*y++;
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_3) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_0) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_1) );
+
+
tmp=*x++;
y_2=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_3);
- sum[1] = MAC16_16(sum[1],tmp,y_0);
- sum[2] = MAC16_16(sum[2],tmp,y_1);
- sum[3] = MAC16_16(sum[3],tmp,y_2);
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_3) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_0) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_1) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_2) );
+
}
if (j++<len)
{
opus_val16 tmp = *x++;
y_3=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_0);
- sum[1] = MAC16_16(sum[1],tmp,y_1);
- sum[2] = MAC16_16(sum[2],tmp,y_2);
- sum[3] = MAC16_16(sum[3],tmp,y_3);
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_0) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_1) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_3) );
}
if (j++<len)
{
opus_val16 tmp=*x++;
y_0=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_1);
- sum[1] = MAC16_16(sum[1],tmp,y_2);
- sum[2] = MAC16_16(sum[2],tmp,y_3);
- sum[3] = MAC16_16(sum[3],tmp,y_0);
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_1) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_3) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_0) );
}
if (j<len)
{
opus_val16 tmp=*x++;
y_1=*y++;
- sum[0] = MAC16_16(sum[0],tmp,y_2);
- sum[1] = MAC16_16(sum[1],tmp,y_3);
- sum[2] = MAC16_16(sum[2],tmp,y_0);
- sum[3] = MAC16_16(sum[3],tmp,y_1);
+
+ sum_0 += ( ((long long)tmp) * ((long long)y_2) );
+ sum_1 += ( ((long long)tmp) * ((long long)y_3) );
+ sum_2 += ( ((long long)tmp) * ((long long)y_0) );
+ sum_3 += ( ((long long)tmp) * ((long long)y_1) );
}
+
+ sum[0] = (opus_val32)sum_0;
+ sum[1] = (opus_val32)sum_1;
+ sum[2] = (opus_val32)sum_2;
+ sum[3] = (opus_val32)sum_3;
}
#endif /* OVERRIDE_XCORR_KERNEL */
@@ -127,14 +151,23 @@ static OPUS_INLINE void xcorr_kernel(const opus_val16 * x, const opus_val16 * y,
static OPUS_INLINE void dual_inner_prod(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02,
int N, opus_val32 *xy1, opus_val32 *xy2)
{
- int i;
+ int j;
opus_val32 xy01=0;
opus_val32 xy02=0;
- for (i=0;i<N;i++)
+ long long ac1 = 0;
+ long long ac2 = 0;
+
+ /* Compute the norm of X+Y and X-Y as |X|^2 + |Y|^2 +/- sum(xy) */
+ for (j=0;j<N;j++)
{
- xy01 = MAC16_16(xy01, x[i], y01[i]);
- xy02 = MAC16_16(xy02, x[i], y02[i]);
+ ac1 += ( ((long long)x[j]) * ((long long)y01[j]) );
+ ac2 += ( ((long long)x[j]) * ((long long)y02[j]) );
+ ++j;
+ ac1 += ( ((long long)x[j]) * ((long long)y01[j]) );
+ ac2 += ( ((long long)x[j]) * ((long long)y02[j]) );
}
+ xy01 = ac1;
+ xy02 = ac2;
*xy1 = xy01;
*xy2 = xy02;
}
diff --git a/celt/vq.c b/celt/vq.c
index 98a0f36c..fe7e564b 100644
--- a/celt/vq.c
+++ b/celt/vq.c
@@ -46,19 +46,21 @@ static void exp_rotation1(celt_norm *X, int len, int stride, opus_val16 c, opus_
for (i=0;i<len-stride;i++)
{
celt_norm x1, x2;
+
x1 = Xptr[0];
x2 = Xptr[stride];
- Xptr[stride] = EXTRACT16(SHR32(MULT16_16(c,x2) + MULT16_16(s,x1), 15));
- *Xptr++ = EXTRACT16(SHR32(MULT16_16(c,x1) - MULT16_16(s,x2), 15));
+ Xptr[stride] = EXTRACT16(MULT16_16_Q15_ADD(c,x2,s,x1));
+ *Xptr++ = EXTRACT16(MULT16_16_Q15_SUB(c,x1,s,x2));
}
Xptr = &X[len-2*stride-1];
for (i=len-2*stride-1;i>=0;i--)
{
celt_norm x1, x2;
+
x1 = Xptr[0];
x2 = Xptr[stride];
- Xptr[stride] = EXTRACT16(SHR32(MULT16_16(c,x2) + MULT16_16(s,x1), 15));
- *Xptr-- = EXTRACT16(SHR32(MULT16_16(c,x1) - MULT16_16(s,x2), 15));
+ Xptr[stride] = EXTRACT16(MULT16_16_Q15_ADD(c,x2,s,x1));
+ *Xptr-- = EXTRACT16(MULT16_16_Q15_SUB(c,x1,s,x2));
}
}
@@ -324,6 +326,7 @@ unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B,
{
int i;
opus_val32 Ryy;
+ int X0;
unsigned collapse_mask;
VARDECL(int, iy);
SAVE_STACK;
@@ -334,9 +337,14 @@ unsigned alg_unquant(celt_norm *X, int N, int K, int spread, int B,
decode_pulses(iy, N, K, dec);
Ryy = 0;
i=0;
+ {
+ long long ac1 = 0;
do {
- Ryy = MAC16_16(Ryy, iy[i], iy[i]);
+ X0 = (int)iy[i];
+ ac1 += ( ((long long)X0) * ((long long)X0) );
} while (++i < N);
+ Ryy = ac1;
+ }
normalise_residual(iy, X, N, Ryy, gain);
exp_rotation(X, N, -1, B, K, spread);
collapse_mask = extract_collapse_mask(iy, N, B);
@@ -354,11 +362,23 @@ void renormalise_vector(celt_norm *X, int N, opus_val16 gain)
opus_val16 g;
opus_val32 t;
celt_norm *xptr = X;
- for (i=0;i<N;i++)
+
+ int X0, X2, X3, X1;
{
- E = MAC16_16(E, *xptr, *xptr);
- xptr++;
+ long long ac1 = ((long long)E);
+ /*if(N %4)
+ printf("error");*/
+ for (i=0;i<N;i+=2)
+ {
+ X0 = (int)*xptr++;
+ ac1 += ( ((long long)X0) * ((long long)X0) );
+
+ X1 = (int)*xptr++;
+ ac1 += ( ((long long)X1) * ((long long)X1) );
}
+ E = ac1;
+ }
+
#ifdef FIXED_POINT
k = celt_ilog2(E)>>1;
#endif
diff --git a/silk/NSQ_del_dec.c b/silk/NSQ_del_dec.c
index 522be406..0e3c6aa2 100644
--- a/silk/NSQ_del_dec.c
+++ b/silk/NSQ_del_dec.c
@@ -339,13 +339,44 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec(
opus_int32 q1_Q0, q1_Q10, q2_Q10, exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10;
opus_int32 tmp1, tmp2, sLF_AR_shp_Q14;
opus_int32 *pred_lag_ptr, *shp_lag_ptr, *psLPC_Q14;
- VARDECL( NSQ_sample_pair, psSampleState );
+ NSQ_sample_struct psSampleState[ MAX_DEL_DEC_STATES ][ 2 ];
NSQ_del_dec_struct *psDD;
NSQ_sample_struct *psSS;
- SAVE_STACK;
+ opus_int16 b_Q14_0, b_Q14_1, b_Q14_2, b_Q14_3, b_Q14_4;
+ opus_int16 a_Q12_0, a_Q12_1, a_Q12_2, a_Q12_3, a_Q12_4, a_Q12_5, a_Q12_6;
+ opus_int16 a_Q12_7, a_Q12_8, a_Q12_9, a_Q12_10, a_Q12_11, a_Q12_12, a_Q12_13;
+ opus_int16 a_Q12_14, a_Q12_15;
+
+ opus_int32 cur, prev, next;
+
+ //Intialize b_Q14 variables
+ b_Q14_0 = b_Q14[ 0 ];
+ b_Q14_1 = b_Q14[ 1 ];
+ b_Q14_2 = b_Q14[ 2 ];
+ b_Q14_3 = b_Q14[ 3 ];
+ b_Q14_4 = b_Q14[ 4 ];
+
+ //Intialize a_Q12 variables
+ a_Q12_0 = a_Q12[0];
+ a_Q12_1 = a_Q12[1];
+ a_Q12_2 = a_Q12[2];
+ a_Q12_3 = a_Q12[3];
+ a_Q12_4 = a_Q12[4];
+ a_Q12_5 = a_Q12[5];
+ a_Q12_6 = a_Q12[6];
+ a_Q12_7 = a_Q12[7];
+ a_Q12_8 = a_Q12[8];
+ a_Q12_9 = a_Q12[9];
+ a_Q12_10 = a_Q12[10];
+ a_Q12_11 = a_Q12[11];
+ a_Q12_12 = a_Q12[12];
+ a_Q12_13 = a_Q12[13];
+ a_Q12_14 = a_Q12[14];
+ a_Q12_15 = a_Q12[15];
+
+ long long temp64;
silk_assert( nStatesDelayedDecision > 0 );
- ALLOC( psSampleState, nStatesDelayedDecision, NSQ_sample_pair );
shp_lag_ptr = &NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - lag + HARM_SHAPE_FIR_TAPS / 2 ];
pred_lag_ptr = &sLTP_Q15[ NSQ->sLTP_buf_idx - lag + LTP_ORDER / 2 ];
@@ -358,12 +389,14 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec(
if( signalType == TYPE_VOICED ) {
/* Unrolled loop */
/* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */
- LTP_pred_Q14 = 2;
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ 0 ], b_Q14[ 0 ] );
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -1 ], b_Q14[ 1 ] );
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -2 ], b_Q14[ 2 ] );
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -3 ], b_Q14[ 3 ] );
- LTP_pred_Q14 = silk_SMLAWB( LTP_pred_Q14, pred_lag_ptr[ -4 ], b_Q14[ 4 ] );
+ temp64 = ( ((long long)pred_lag_ptr[ 0 ]) * ((long long)b_Q14_0) );
+ temp64 += ( ((long long)pred_lag_ptr[ -1 ]) * ((long long)b_Q14_1) );
+ temp64 += ( ((long long)pred_lag_ptr[ -2 ]) * ((long long)b_Q14_2) );
+ temp64 += ( ((long long)pred_lag_ptr[ -3 ]) * ((long long)b_Q14_3) );
+ temp64 += ( ((long long)pred_lag_ptr[ -4 ]) * ((long long)b_Q14_4) );
+ temp64 = temp64 >> 16;
+ LTP_pred_Q14 = temp64;
+ LTP_pred_Q14 += 2;
LTP_pred_Q14 = silk_LSHIFT( LTP_pred_Q14, 1 ); /* Q13 -> Q14 */
pred_lag_ptr++;
} else {
@@ -395,26 +428,28 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec(
psLPC_Q14 = &psDD->sLPC_Q14[ NSQ_LPC_BUF_LENGTH - 1 + i ];
/* Short-term prediction */
silk_assert( predictLPCOrder == 10 || predictLPCOrder == 16 );
- /* Avoids introducing a bias because silk_SMLAWB() always rounds to -inf */
- LPC_pred_Q14 = silk_RSHIFT( predictLPCOrder, 1 );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ 0 ], a_Q12[ 0 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -1 ], a_Q12[ 1 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -2 ], a_Q12[ 2 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -3 ], a_Q12[ 3 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -4 ], a_Q12[ 4 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -5 ], a_Q12[ 5 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -6 ], a_Q12[ 6 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -7 ], a_Q12[ 7 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -8 ], a_Q12[ 8 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -9 ], a_Q12[ 9 ] );
+ temp64 = ( ((long long)psLPC_Q14[ 0 ]) * ((long long)a_Q12_0) );
+ temp64 += ( ((long long)psLPC_Q14[ -1 ]) * ((long long)a_Q12_1) );
+ temp64 += ( ((long long)psLPC_Q14[ -2 ]) * ((long long)a_Q12_2) );
+ temp64 += ( ((long long)psLPC_Q14[ -3 ]) * ((long long)a_Q12_3) );
+ temp64 += ( ((long long)psLPC_Q14[ -4 ]) * ((long long)a_Q12_4) );
+ temp64 += ( ((long long)psLPC_Q14[ -5 ]) * ((long long)a_Q12_5) );
+ temp64 += ( ((long long)psLPC_Q14[ -6 ]) * ((long long)a_Q12_6) );
+ temp64 += ( ((long long)psLPC_Q14[ -7 ]) * ((long long)a_Q12_7) );
+ temp64 += ( ((long long)psLPC_Q14[ -8 ]) * ((long long)a_Q12_8) );
+ temp64 += ( ((long long)psLPC_Q14[ -9 ]) * ((long long)a_Q12_9) );
if( predictLPCOrder == 16 ) {
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -10 ], a_Q12[ 10 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -11 ], a_Q12[ 11 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -12 ], a_Q12[ 12 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -13 ], a_Q12[ 13 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -14 ], a_Q12[ 14 ] );
- LPC_pred_Q14 = silk_SMLAWB( LPC_pred_Q14, psLPC_Q14[ -15 ], a_Q12[ 15 ] );
+ temp64 += ( ((long long)psLPC_Q14[ -10 ]) * ((long long)a_Q12_10) );
+ temp64 += ( ((long long)psLPC_Q14[ -11 ]) * ((long long)a_Q12_11) );
+ temp64 += ( ((long long)psLPC_Q14[ -12 ]) * ((long long)a_Q12_12) );
+ temp64 += ( ((long long)psLPC_Q14[ -13 ]) * ((long long)a_Q12_13) );
+ temp64 += ( ((long long)psLPC_Q14[ -14 ]) * ((long long)a_Q12_14) );
+ temp64 += ( ((long long)psLPC_Q14[ -15 ]) * ((long long)a_Q12_15) );
}
+ temp64 = temp64 >> 16;
+ LPC_pred_Q14 = temp64;
+
+ LPC_pred_Q14 = LPC_pred_Q14 + silk_RSHIFT( predictLPCOrder, 1 );
LPC_pred_Q14 = silk_LSHIFT( LPC_pred_Q14, 4 ); /* Q10 -> Q14 */
/* Noise shape feedback */
@@ -424,22 +459,31 @@ static OPUS_INLINE void silk_noise_shape_quantizer_del_dec(
/* Output of allpass section */
tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ 0 ], psDD->sAR2_Q14[ 1 ] - tmp2, warping_Q16 );
psDD->sAR2_Q14[ 0 ] = tmp2;
- n_AR_Q14 = silk_RSHIFT( shapingLPCOrder, 1 );
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ 0 ] );
+
+ temp64 = ( ((long long)tmp2) * ((long long)AR_shp_Q13[ 0 ]) );
+
+ prev = psDD->sAR2_Q14[ 1 ];
+
/* Loop over allpass sections */
for( j = 2; j < shapingLPCOrder; j += 2 ) {
+ cur = psDD->sAR2_Q14[ j ];
+ next = psDD->sAR2_Q14[ j+1 ];
/* Output of allpass section */
- tmp2 = silk_SMLAWB( psDD->sAR2_Q14[ j - 1 ], psDD->sAR2_Q14[ j + 0 ] - tmp1, warping_Q16 );
+ tmp2 = silk_SMLAWB( prev, cur - tmp1, warping_Q16 );
psDD->sAR2_Q14[ j - 1 ] = tmp1;
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ j - 1 ] );
+ temp64 += ( ((long long)tmp1) * ((long long)AR_shp_Q13[ j - 1 ]) );
+ temp64 += ( ((long long)tmp2) * ((long long)AR_shp_Q13[ j ]) );
/* Output of allpass section */
- tmp1 = silk_SMLAWB( psDD->sAR2_Q14[ j + 0 ], psDD->sAR2_Q14[ j + 1 ] - tmp2, warping_Q16 );
+ tmp1 = silk_SMLAWB( cur, next - tmp2, warping_Q16 );
psDD->sAR2_Q14[ j + 0 ] = tmp2;
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp2, AR_shp_Q13[ j ] );
+ prev = next;
}
psDD->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1;
- n_AR_Q14 = silk_SMLAWB( n_AR_Q14, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
+ temp64 += ( ((long long)tmp1) * ((long long)AR_shp_Q13[ shapingLPCOrder - 1 ]) );
+ temp64 = temp64 >> 16;
+ n_AR_Q14 = temp64;
+ n_AR_Q14 += silk_RSHIFT( shapingLPCOrder, 1 );
n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 1 ); /* Q11 -> Q12 */
n_AR_Q14 = silk_SMLAWB( n_AR_Q14, psDD->LF_AR_Q14, Tilt_Q14 ); /* Q12 */
n_AR_Q14 = silk_LSHIFT( n_AR_Q14, 2 ); /* Q12 -> Q14 */
diff --git a/silk/fixed/warped_autocorrelation_FIX.c b/silk/fixed/warped_autocorrelation_FIX.c
index a4a579b1..5f7f910b 100644
--- a/silk/fixed/warped_autocorrelation_FIX.c
+++ b/silk/fixed/warped_autocorrelation_FIX.c
@@ -45,43 +45,126 @@ void silk_warped_autocorrelation_FIX(
)
{
opus_int n, i, lsh;
- opus_int32 tmp1_QS, tmp2_QS;
+ opus_int32 tmp1_QS, tmp2_QS, tmp3_QS, tmp4_QS, tmp5_QS, tmp6_QS, tmp7_QS, tmp8_QS,start_1, start_2, start_3;
opus_int32 state_QS[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 };
opus_int64 corr_QC[ MAX_SHAPE_LPC_ORDER + 1 ] = { 0 };
+ opus_int64 temp64;
+ long long temp2_64=0;
+
+ opus_int32 val;
+ val = 2 * QS - QC;
+
/* Order must be even */
silk_assert( ( order & 1 ) == 0 );
silk_assert( 2 * QS - QC >= 0 );
/* Loop over samples */
- for( n = 0; n < length; n++ ) {
+ for( n = 0; n < length; n=n+4 ) {
+
+ tmp1_QS = silk_LSHIFT32( (opus_int32)input[ n ], QS );
+ start_1 = tmp1_QS;
+ tmp3_QS = silk_LSHIFT32( (opus_int32)input[ n+1], QS );
+ start_2 = tmp3_QS;
+ tmp5_QS = silk_LSHIFT32( (opus_int32)input[ n+2], QS );
+ start_3 = tmp5_QS;
+ tmp7_QS = silk_LSHIFT32( (opus_int32)input[ n+3], QS );
+
+
+ /* Loop over allpass sections */
+ for( i = 0; i < order; i += 2 ) {
+
+ /* Output of allpass section */
+ tmp2_QS = silk_SMLAWB( state_QS[ i ], state_QS[ i + 1 ] - tmp1_QS, warping_Q16 );
+ corr_QC[ i ] += ( ((long long)tmp1_QS) * ((long long)start_1) );
+ tmp4_QS = silk_SMLAWB( tmp1_QS, tmp2_QS - tmp3_QS, warping_Q16 );
+ corr_QC[ i ] += ( ((long long)tmp3_QS) * ((long long)start_2) );
+
+ tmp6_QS = silk_SMLAWB( tmp3_QS, tmp4_QS - tmp5_QS, warping_Q16 );
+ corr_QC[ i ] += ( ((long long)tmp5_QS) * ((long long)start_3) );
+
+ tmp8_QS = silk_SMLAWB( tmp5_QS, tmp6_QS - tmp7_QS, warping_Q16 );
+ state_QS[ i ] = tmp7_QS;
+ corr_QC[ i ] += ( ((long long)tmp7_QS) * ((long long)state_QS[0]) );
+
+ /* Output of allpass section */
+ tmp1_QS = silk_SMLAWB( state_QS[ i + 1 ], state_QS[ i + 2 ] - tmp2_QS, warping_Q16 );
+ corr_QC[ i+1 ] += ( ((long long)tmp2_QS) * ((long long)start_1) );
+
+ tmp3_QS = silk_SMLAWB( tmp2_QS, tmp1_QS - tmp4_QS, warping_Q16 );
+ corr_QC[ i+1 ] += ( ((long long)tmp4_QS) * ((long long)start_2) );
+
+ tmp5_QS = silk_SMLAWB( tmp4_QS, tmp3_QS - tmp6_QS, warping_Q16 );
+ corr_QC[ i+1 ] += ( ((long long)tmp6_QS) * ((long long)start_3) );
+
+ tmp7_QS = silk_SMLAWB( tmp6_QS, tmp5_QS - tmp8_QS, warping_Q16 );
+ state_QS[ i + 1 ] = tmp8_QS;
+ corr_QC[ i+1 ] += ( ((long long)tmp8_QS) * ((long long)state_QS[ 0 ]) );
+
+ }
+ state_QS[ order ] = tmp7_QS;
+ corr_QC[ order ] += ( ((long long)tmp1_QS) * ((long long)start_1) );
+ corr_QC[ order ] += ( ((long long)tmp3_QS) * ((long long)start_2) );
+ corr_QC[ order ] += ( ((long long)tmp5_QS) * ((long long)start_3) );
+ corr_QC[ order ] += ( ((long long)tmp7_QS) * ((long long)state_QS[ 0 ]) );
+ }
+
+ for(;n< length; n++ ) {
tmp1_QS = silk_LSHIFT32( (opus_int32)input[ n ], QS );
/* Loop over allpass sections */
for( i = 0; i < order; i += 2 ) {
/* Output of allpass section */
tmp2_QS = silk_SMLAWB( state_QS[ i ], state_QS[ i + 1 ] - tmp1_QS, warping_Q16 );
- state_QS[ i ] = tmp1_QS;
- corr_QC[ i ] += silk_RSHIFT64( silk_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
+ state_QS[ i ] = tmp1_QS;
+ corr_QC[ i ] += ( ((long long)tmp1_QS) * ((long long)state_QS[ 0 ]) );
+
/* Output of allpass section */
tmp1_QS = silk_SMLAWB( state_QS[ i + 1 ], state_QS[ i + 2 ] - tmp2_QS, warping_Q16 );
state_QS[ i + 1 ] = tmp2_QS;
- corr_QC[ i + 1 ] += silk_RSHIFT64( silk_SMULL( tmp2_QS, state_QS[ 0 ] ), 2 * QS - QC );
+ corr_QC[ i+1 ] += ( ((long long)tmp2_QS) * ((long long)state_QS[ 0 ]) );
}
state_QS[ order ] = tmp1_QS;
- corr_QC[ order ] += silk_RSHIFT64( silk_SMULL( tmp1_QS, state_QS[ 0 ] ), 2 * QS - QC );
+ corr_QC[ order ] += ( ((long long)tmp1_QS) * ((long long)state_QS[ 0 ]) );
}
- lsh = silk_CLZ64( corr_QC[ 0 ] ) - 35;
+ temp64 = corr_QC[ 0 ];
+ if(val >= 0)
+ temp64 = temp64 >> val;
+ else
+ temp64 = temp64 << (-val);
+
+ lsh = silk_CLZ64( temp64 ) - 35;
lsh = silk_LIMIT( lsh, -12 - QC, 30 - QC );
*scale = -( QC + lsh );
silk_assert( *scale >= -30 && *scale <= 12 );
if( lsh >= 0 ) {
for( i = 0; i < order + 1; i++ ) {
- corr[ i ] = (opus_int32)silk_CHECK_FIT32( silk_LSHIFT64( corr_QC[ i ], lsh ) );
+ temp64 = corr_QC[ i ];
+ if(val >= 0)
+ temp64 = temp64 >> val;
+ else
+ temp64 = temp64 << (-val);
+
+ if( (-lsh) >= 0)
+ temp2_64 = temp64 >> (-lsh);
+ else
+ temp2_64 = temp64 << (lsh);
+
+ corr[ i ] = (opus_int32)silk_CHECK_FIT32( temp2_64 );
}
} else {
for( i = 0; i < order + 1; i++ ) {
- corr[ i ] = (opus_int32)silk_CHECK_FIT32( silk_RSHIFT64( corr_QC[ i ], -lsh ) );
+ temp64 = corr_QC[ i ];
+ if(val >= 0)
+ temp64 = temp64 >> val;
+ else
+ temp64 = temp64 << (-val);
+
+ if( (-lsh) >= 0)
+ temp2_64 = temp64 >> (-lsh);
+ else
+ temp2_64 = temp64 << (lsh);
+ corr[ i ] = (opus_int32)silk_CHECK_FIT32( temp2_64 );
}
}
silk_assert( corr_QC[ 0 ] >= 0 ); /* If breaking, decrease QC*/
diff --git a/silk/macros.h b/silk/macros.h
index a84e5a5d..25506e15 100644
--- a/silk/macros.h
+++ b/silk/macros.h
@@ -38,10 +38,20 @@ POSSIBILITY OF SUCH DAMAGE.
/* This is an OPUS_INLINE header file for general platform. */
/* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */
-#define silk_SMULWB(a32, b32) ((((a32) >> 16) * (opus_int32)((opus_int16)(b32))) + ((((a32) & 0x0000FFFF) * (opus_int32)((opus_int16)(b32))) >> 16))
+static inline int silk_SMULWB(int a, int b)
+{
+ long long ac;
+ int c;
+
+ ac = ((long long) a * (long long)(opus_int16)b);
+ ac = ac >> 16;
+ c = ac;
+
+ return c;
+}
/* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit int */
-#define silk_SMLAWB(a32, b32, c32) ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)))
+#define silk_SMLAWB(a32, b32, c32) ((a32) + silk_SMULWB(b32, c32))
/* (a32 * (b32 >> 16)) >> 16 */
#define silk_SMULWT(a32, b32) (((a32) >> 16) * ((b32) >> 16) + ((((a32) & 0x0000FFFF) * ((b32) >> 16)) >> 16))
@@ -65,10 +75,31 @@ POSSIBILITY OF SUCH DAMAGE.
#define silk_SMLAL(a64, b32, c32) (silk_ADD64((a64), ((opus_int64)(b32) * (opus_int64)(c32))))
/* (a32 * b32) >> 16 */
-#define silk_SMULWW(a32, b32) silk_MLA(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16))
+static inline int silk_SMULWW(int a, int b)
+{
+ long long ac;
+ int c;
+
+ ac = ((long long) a * (long long)b);
+ ac = ac >> 16;
+ c = ac;
+
+ return c;
+}
/* a32 + ((b32 * c32) >> 16) */
-#define silk_SMLAWW(a32, b32, c32) silk_MLA(silk_SMLAWB((a32), (b32), (c32)), (b32), silk_RSHIFT_ROUND((c32), 16))
+static inline int silk_SMLAWW(int a, int b, int c)
+{
+ long long ac;
+ int res;
+
+ ac = ((long long)b * (long long)c);
+ ac = ac >> 16;
+ res = ac;
+ res += a;
+
+ return res;
+}
/* add/subtract with output saturated */
#define silk_ADD_SAT32(a, b) ((((opus_uint32)(a) + (opus_uint32)(b)) & 0x80000000) == 0 ? \