diff options
author | Josh Coalson <jcoalson@users.sourceforce.net> | 2004-11-09 01:34:01 +0000 |
---|---|---|
committer | Josh Coalson <jcoalson@users.sourceforce.net> | 2004-11-09 01:34:01 +0000 |
commit | 5f2b46d0314085189627e8b47453fd99ee172cb0 (patch) | |
tree | 67b4bb725c7c99f6908933922e06ea50cdc5072a /src/libFLAC/fixed.c | |
parent | 8f64899d97009613d9c20e7cea821b9c1a40639a (diff) | |
download | flac-5f2b46d0314085189627e8b47453fd99ee172cb0.tar.gz |
finished integerized flavor of library; sections with floating point are either #ifdef'd out or written in fixed-point
Diffstat (limited to 'src/libFLAC/fixed.c')
-rw-r--r-- | src/libFLAC/fixed.c | 191 |
1 files changed, 191 insertions, 0 deletions
diff --git a/src/libFLAC/fixed.c b/src/libFLAC/fixed.c index abf74d1d..1c46c331 100644 --- a/src/libFLAC/fixed.c +++ b/src/libFLAC/fixed.c @@ -30,6 +30,7 @@ */ #include <math.h> +#include "private/bitmath.h" #include "private/fixed.h" #include "FLAC/assert.h" @@ -48,7 +49,177 @@ #endif #define local_abs(x) ((unsigned)((x)<0? -(x) : (x))) +#ifdef FLAC__INTEGER_ONLY_LIBRARY +/* rbps stands for residual bits per sample + * + * (ln(2) * err) + * rbps = log (-----------) + * 2 ( n ) + */ +static FLAC__fixedpoint local__compute_rbps_integerized(FLAC__uint32 err, FLAC__uint32 n) +{ + FLAC__uint32 rbps; + unsigned bits; /* the number of bits required to represent a number */ + int fracbits; /* the number of bits of rbps that comprise the fractional part */ + + FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint)); + FLAC__ASSERT(err > 0); + FLAC__ASSERT(n > 0); + + FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE); + if(err <= n) + return 0; + /* + * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1. + * These allow us later to know we won't lose too much precision in the + * fixed-point division (err<<fracbits)/n. + */ + + fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2(err)+1); + + err <<= fracbits; + err /= n; + /* err now holds err/n with fracbits fractional bits */ + + /* + * Whittle err down to 16 bits max. 16 significant bits is enough for + * our purposes. + */ + FLAC__ASSERT(err > 0); + bits = FLAC__bitmath_ilog2(err)+1; + if(bits > 16) { + err >>= (bits-16); + fracbits -= (bits-16); + } + rbps = (FLAC__uint32)err; + + /* Multiply by fixed-point version of ln(2), with 16 fractional bits */ + rbps *= FLAC__FP_LN2; + fracbits += 16; + FLAC__ASSERT(fracbits >= 0); + + /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */ + { + const int f = fracbits & 3; + if(f) { + rbps >>= f; + fracbits -= f; + } + } + + rbps = FLAC__fixedpoint_log2(rbps, fracbits, (unsigned)(-1)); + + if(rbps == 0) + return 0; + + /* + * The return value must have 16 fractional bits. Since the whole part + * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits + * must be >= -3, these assertion allows us to be able to shift rbps + * left if necessary to get 16 fracbits without losing any bits of the + * whole part of rbps. + * + * There is a slight chance due to accumulated error that the whole part + * will require 6 bits, so we use 6 in the assertion. Really though as + * long as it fits in 13 bits (32 - (16 - (-3))) we are fine. + */ + FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6); + FLAC__ASSERT(fracbits >= -3); + + /* now shift the decimal point into place */ + if(fracbits < 16) + return rbps << (16-fracbits); + else if(fracbits > 16) + return rbps >> (fracbits-16); + else + return rbps; +} + +static FLAC__fixedpoint local__compute_rbps_wide_integerized(FLAC__uint64 err, FLAC__uint32 n) +{ + FLAC__uint32 rbps; + unsigned bits; /* the number of bits required to represent a number */ + int fracbits; /* the number of bits of rbps that comprise the fractional part */ + + FLAC__ASSERT(sizeof(rbps) == sizeof(FLAC__fixedpoint)); + FLAC__ASSERT(err > 0); + FLAC__ASSERT(n > 0); + + FLAC__ASSERT(n <= FLAC__MAX_BLOCK_SIZE); + if(err <= n) + return 0; + /* + * The above two things tell us 1) n fits in 16 bits; 2) err/n > 1. + * These allow us later to know we won't lose too much precision in the + * fixed-point division (err<<fracbits)/n. + */ + + fracbits = (8*sizeof(err)) - (FLAC__bitmath_ilog2_wide(err)+1); + + err <<= fracbits; + err /= n; + /* err now holds err/n with fracbits fractional bits */ + + /* + * Whittle err down to 16 bits max. 16 significant bits is enough for + * our purposes. + */ + FLAC__ASSERT(err > 0); + bits = FLAC__bitmath_ilog2_wide(err)+1; + if(bits > 16) { + err >>= (bits-16); + fracbits -= (bits-16); + } + rbps = (FLAC__uint32)err; + + /* Multiply by fixed-point version of ln(2), with 16 fractional bits */ + rbps *= FLAC__FP_LN2; + fracbits += 16; + FLAC__ASSERT(fracbits >= 0); + + /* FLAC__fixedpoint_log2 requires fracbits%4 to be 0 */ + { + const int f = fracbits & 3; + if(f) { + rbps >>= f; + fracbits -= f; + } + } + + rbps = FLAC__fixedpoint_log2(rbps, fracbits, (unsigned)(-1)); + + if(rbps == 0) + return 0; + + /* + * The return value must have 16 fractional bits. Since the whole part + * of the base-2 log of a 32 bit number must fit in 5 bits, and fracbits + * must be >= -3, these assertion allows us to be able to shift rbps + * left if necessary to get 16 fracbits without losing any bits of the + * whole part of rbps. + * + * There is a slight chance due to accumulated error that the whole part + * will require 6 bits, so we use 6 in the assertion. Really though as + * long as it fits in 13 bits (32 - (16 - (-3))) we are fine. + */ + FLAC__ASSERT((int)FLAC__bitmath_ilog2(rbps)+1 <= fracbits + 6); + FLAC__ASSERT(fracbits >= -3); + + /* now shift the decimal point into place */ + if(fracbits < 16) + return rbps << (16-fracbits); + else if(fracbits > 16) + return rbps >> (fracbits-16); + else + return rbps; +} +#endif + +#ifndef FLAC__INTEGER_ONLY_LIBRARY unsigned FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], unsigned data_len, FLAC__float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]) +#else +unsigned FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], unsigned data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]) +#endif { FLAC__int32 last_error_0 = data[-1]; FLAC__int32 last_error_1 = data[-1] - data[-2]; @@ -85,16 +256,28 @@ unsigned FLAC__fixed_compute_best_predictor(const FLAC__int32 data[], unsigned d FLAC__ASSERT(data_len > 0 || total_error_2 == 0); FLAC__ASSERT(data_len > 0 || total_error_3 == 0); FLAC__ASSERT(data_len > 0 || total_error_4 == 0); +#ifndef FLAC__INTEGER_ONLY_LIBRARY residual_bits_per_sample[0] = (FLAC__float)((total_error_0 > 0) ? log(M_LN2 * (FLAC__double)total_error_0 / (FLAC__double)data_len) / M_LN2 : 0.0); residual_bits_per_sample[1] = (FLAC__float)((total_error_1 > 0) ? log(M_LN2 * (FLAC__double)total_error_1 / (FLAC__double)data_len) / M_LN2 : 0.0); residual_bits_per_sample[2] = (FLAC__float)((total_error_2 > 0) ? log(M_LN2 * (FLAC__double)total_error_2 / (FLAC__double)data_len) / M_LN2 : 0.0); residual_bits_per_sample[3] = (FLAC__float)((total_error_3 > 0) ? log(M_LN2 * (FLAC__double)total_error_3 / (FLAC__double)data_len) / M_LN2 : 0.0); residual_bits_per_sample[4] = (FLAC__float)((total_error_4 > 0) ? log(M_LN2 * (FLAC__double)total_error_4 / (FLAC__double)data_len) / M_LN2 : 0.0); +#else + residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_integerized(total_error_0, data_len) : 0; + residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_integerized(total_error_1, data_len) : 0; + residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_integerized(total_error_2, data_len) : 0; + residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_integerized(total_error_3, data_len) : 0; + residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_integerized(total_error_4, data_len) : 0; +#endif return order; } +#ifndef FLAC__INTEGER_ONLY_LIBRARY unsigned FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], unsigned data_len, FLAC__float residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]) +#else +unsigned FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], unsigned data_len, FLAC__fixedpoint residual_bits_per_sample[FLAC__MAX_FIXED_ORDER+1]) +#endif { FLAC__int32 last_error_0 = data[-1]; FLAC__int32 last_error_1 = data[-1] - data[-2]; @@ -135,6 +318,7 @@ unsigned FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], unsig FLAC__ASSERT(data_len > 0 || total_error_2 == 0); FLAC__ASSERT(data_len > 0 || total_error_3 == 0); FLAC__ASSERT(data_len > 0 || total_error_4 == 0); +#ifndef FLAC__INTEGER_ONLY_LIBRARY #if defined _MSC_VER || defined __MINGW32__ /* with VC++ you have to spoon feed it the casting */ residual_bits_per_sample[0] = (FLAC__float)((total_error_0 > 0) ? log(M_LN2 * (FLAC__double)(FLAC__int64)total_error_0 / (FLAC__double)data_len) / M_LN2 : 0.0); @@ -149,6 +333,13 @@ unsigned FLAC__fixed_compute_best_predictor_wide(const FLAC__int32 data[], unsig residual_bits_per_sample[3] = (FLAC__float)((total_error_3 > 0) ? log(M_LN2 * (FLAC__double)total_error_3 / (FLAC__double)data_len) / M_LN2 : 0.0); residual_bits_per_sample[4] = (FLAC__float)((total_error_4 > 0) ? log(M_LN2 * (FLAC__double)total_error_4 / (FLAC__double)data_len) / M_LN2 : 0.0); #endif +#else + residual_bits_per_sample[0] = (total_error_0 > 0) ? local__compute_rbps_wide_integerized(total_error_0, data_len) : 0; + residual_bits_per_sample[1] = (total_error_1 > 0) ? local__compute_rbps_wide_integerized(total_error_1, data_len) : 0; + residual_bits_per_sample[2] = (total_error_2 > 0) ? local__compute_rbps_wide_integerized(total_error_2, data_len) : 0; + residual_bits_per_sample[3] = (total_error_3 > 0) ? local__compute_rbps_wide_integerized(total_error_3, data_len) : 0; + residual_bits_per_sample[4] = (total_error_4 > 0) ? local__compute_rbps_wide_integerized(total_error_4, data_len) : 0; +#endif return order; } |