summaryrefslogtreecommitdiff
path: root/libavcodec
diff options
context:
space:
mode:
authorStefano Sabatini <stefano.sabatini-lala@poste.it>2010-09-04 09:59:08 +0000
committerStefano Sabatini <stefano.sabatini-lala@poste.it>2010-09-04 09:59:08 +0000
commit7160bb716be07254bde383b1b2298eeff6a3e641 (patch)
treef01c7a2125ba5e8bdc866a34b401bc4b9724911c /libavcodec
parent55127e7b494a0221d79501867e9b81ce185ec7b8 (diff)
downloadffmpeg-7160bb716be07254bde383b1b2298eeff6a3e641.tar.gz
Rename FF_MM_ symbols related to CPU features flags as AV_CPU_FLAG_
symbols, and move them from libavcodec/avcodec.h to libavutil/cpu.h. Originally committed as revision 25040 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec')
-rw-r--r--libavcodec/arm/dsputil_init_arm.c2
-rw-r--r--libavcodec/arm/dsputil_iwmmxt.c7
-rw-r--r--libavcodec/arm/mpegvideo_iwmmxt.c3
-rw-r--r--libavcodec/audioconvert.h3
-rw-r--r--libavcodec/avcodec.h42
-rw-r--r--libavcodec/dct-test.c22
-rw-r--r--libavcodec/h263dec.c2
-rw-r--r--libavcodec/motion-test.c6
-rw-r--r--libavcodec/ppc/dsputil_ppc.c2
-rw-r--r--libavcodec/x86/cavsdsp_mmx.c4
-rw-r--r--libavcodec/x86/cpuid.c52
-rw-r--r--libavcodec/x86/dnxhd_mmx.c2
-rw-r--r--libavcodec/x86/dsputil_mmx.c53
-rw-r--r--libavcodec/x86/dsputilenc_mmx.c18
-rw-r--r--libavcodec/x86/fft.c8
-rw-r--r--libavcodec/x86/h264_intrapred_init.c10
-rw-r--r--libavcodec/x86/h264dsp_mmx.c12
-rw-r--r--libavcodec/x86/motion_est_mmx.c6
-rw-r--r--libavcodec/x86/mpegaudiodec_mmx.c2
-rw-r--r--libavcodec/x86/mpegvideo_mmx.c10
-rw-r--r--libavcodec/x86/snowdsp_mmx.c7
-rw-r--r--libavcodec/x86/vc1dsp_mmx.c12
-rw-r--r--libavcodec/x86/vp56dsp_init.c4
-rw-r--r--libavcodec/x86/vp8dsp-init.c14
24 files changed, 154 insertions, 149 deletions
diff --git a/libavcodec/arm/dsputil_init_arm.c b/libavcodec/arm/dsputil_init_arm.c
index c9c335112d..27b94613ef 100644
--- a/libavcodec/arm/dsputil_init_arm.c
+++ b/libavcodec/arm/dsputil_init_arm.c
@@ -75,7 +75,7 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
int mm_support(void)
{
- return HAVE_IWMMXT * FF_MM_IWMMXT;
+ return HAVE_IWMMXT * AV_CPU_FLAG_IWMMXT;
}
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
diff --git a/libavcodec/arm/dsputil_iwmmxt.c b/libavcodec/arm/dsputil_iwmmxt.c
index a23ccfc58f..758d9cbfea 100644
--- a/libavcodec/arm/dsputil_iwmmxt.c
+++ b/libavcodec/arm/dsputil_iwmmxt.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/cpu.h"
#include "libavcodec/dsputil.h"
#define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt
@@ -153,16 +154,16 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
{
- int mm_flags = FF_MM_IWMMXT; /* multimedia extension flags */
+ int mm_flags = AV_CPU_FLAG_IWMMXT; /* multimedia extension flags */
if (avctx->dsp_mask) {
- if (avctx->dsp_mask & FF_MM_FORCE)
+ if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
mm_flags |= (avctx->dsp_mask & 0xffff);
else
mm_flags &= ~(avctx->dsp_mask & 0xffff);
}
- if (!(mm_flags & FF_MM_IWMMXT)) return;
+ if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;
c->add_pixels_clamped = add_pixels_clamped_iwmmxt;
diff --git a/libavcodec/arm/mpegvideo_iwmmxt.c b/libavcodec/arm/mpegvideo_iwmmxt.c
index 9e3878fa57..0a288630ab 100644
--- a/libavcodec/arm/mpegvideo_iwmmxt.c
+++ b/libavcodec/arm/mpegvideo_iwmmxt.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/mpegvideo.h"
@@ -111,7 +112,7 @@ static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s,
void MPV_common_init_iwmmxt(MpegEncContext *s)
{
- if (!(mm_flags & FF_MM_IWMMXT)) return;
+ if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt;
#if 0
diff --git a/libavcodec/audioconvert.h b/libavcodec/audioconvert.h
index 1f93b5c336..349065edc2 100644
--- a/libavcodec/audioconvert.h
+++ b/libavcodec/audioconvert.h
@@ -29,6 +29,7 @@
*/
+#include "libavutil/cpu.h"
#include "avcodec.h"
@@ -93,7 +94,7 @@ typedef struct AVAudioConvert AVAudioConvert;
* @param in_fmt Input sample format
* @param in_channels Number of input channels
* @param[in] matrix Channel mixing matrix (of dimension in_channel*out_channels). Set to NULL to ignore.
- * @param flags See FF_MM_xx
+ * @param flags See AV_CPU_FLAG_xx
* @return NULL on error
*/
AVAudioConvert *av_audio_convert_alloc(enum SampleFormat out_fmt, int out_channels,
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index b4b0cecc88..935321079c 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -28,10 +28,11 @@
#include <errno.h>
#include "libavutil/avutil.h"
+#include "libavutil/cpu.h"
#define LIBAVCODEC_VERSION_MAJOR 52
#define LIBAVCODEC_VERSION_MINOR 87
-#define LIBAVCODEC_VERSION_MICRO 0
+#define LIBAVCODEC_VERSION_MICRO 1
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
@@ -50,6 +51,7 @@
#ifndef FF_API_PALETTE_CONTROL
#define FF_API_PALETTE_CONTROL (LIBAVCODEC_VERSION_MAJOR < 54)
#endif
+#define FF_API_MM_FLAGS (LIBAVCODEC_VERSION_MAJOR < 53)
#define AV_NOPTS_VALUE INT64_C(0x8000000000000000)
#define AV_TIME_BASE 1000000
@@ -1657,27 +1659,25 @@ typedef struct AVCodecContext {
* result into program crash.)
*/
unsigned dsp_mask;
-#define FF_MM_FORCE 0x80000000 /* Force usage of selected flags (OR) */
- /* lower 16 bits - CPU features */
-#define FF_MM_MMX 0x0001 ///< standard MMX
-#define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW
-#if LIBAVCODEC_VERSION_MAJOR < 53
-#define FF_MM_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+
+#if FF_API_MM_FLAGS
+#define FF_MM_FORCE AV_CPU_FLAG_FORCE
+#define FF_MM_MMX AV_CPU_FLAG_MMX
+#define FF_MM_3DNOW AV_CPU_FLAG_3DNOW
+#define FF_MM_MMXEXT AV_CPU_FLAG_MMX2
+#define FF_MM_MMX2 AV_CPU_FLAG_MMX2
+#define FF_MM_SSE AV_CPU_FLAG_SSE
+#define FF_MM_SSE2 AV_CPU_FLAG_SSE2
+#define FF_MM_SSE2SLOW AV_CPU_FLAG_SSE2SLOW
+#define FF_MM_3DNOWEXT AV_CPU_FLAG_3DNOWEXT
+#define FF_MM_SSE3 AV_CPU_FLAG_SSE3
+#define FF_MM_SSE3SLOW AV_CPU_FLAG_SSE3SLOW
+#define FF_MM_SSSE3 AV_CPU_FLAG_SSSE3
+#define FF_MM_SSE4 AV_CPU_FLAG_SSE4
+#define FF_MM_SSE42 AV_CPU_FLAG_SSE42
+#define FF_MM_IWMMXT AV_CPU_FLAG_IWMMXT
+#define FF_MM_ALTIVEC AV_CPU_FLAG_ALTIVEC
#endif
-#define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
-#define FF_MM_SSE 0x0008 ///< SSE functions
-#define FF_MM_SSE2 0x0010 ///< PIV SSE2 functions
-#define FF_MM_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
- ///< than regular MMX/SSE (e.g. Core1)
-#define FF_MM_3DNOWEXT 0x0020 ///< AMD 3DNowExt
-#define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions
-#define FF_MM_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
- ///< than regular MMX/SSE (e.g. Core1)
-#define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions
-#define FF_MM_SSE4 0x0100 ///< Penryn SSE4.1 functions
-#define FF_MM_SSE42 0x0200 ///< Nehalem SSE4.2 functions
-#define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT
-#define FF_MM_ALTIVEC 0x0001 ///< standard AltiVec
/**
* bits per sample/pixel from the demuxer (needed for huffyuv).
diff --git a/libavcodec/dct-test.c b/libavcodec/dct-test.c
index 4f0a0c6996..b2dfcdf39a 100644
--- a/libavcodec/dct-test.c
+++ b/libavcodec/dct-test.c
@@ -94,24 +94,24 @@ struct algo algos[] = {
{"SIMPLE-C", 1, ff_simple_idct, ff_ref_idct, NO_PERM},
#if HAVE_MMX
- {"MMX", 0, ff_fdct_mmx, ff_ref_fdct, NO_PERM, FF_MM_MMX},
+ {"MMX", 0, ff_fdct_mmx, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_MMX},
#if HAVE_MMX2
- {"MMX2", 0, ff_fdct_mmx2, ff_ref_fdct, NO_PERM, FF_MM_MMX2},
- {"SSE2", 0, ff_fdct_sse2, ff_ref_fdct, NO_PERM, FF_MM_SSE2},
+ {"MMX2", 0, ff_fdct_mmx2, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_MMX2},
+ {"SSE2", 0, ff_fdct_sse2, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_SSE2},
#endif
#if CONFIG_GPL
- {"LIBMPEG2-MMX", 1, ff_mmx_idct, ff_ref_idct, MMX_PERM, FF_MM_MMX},
- {"LIBMPEG2-MMX2", 1, ff_mmxext_idct, ff_ref_idct, MMX_PERM, FF_MM_MMX2},
+ {"LIBMPEG2-MMX", 1, ff_mmx_idct, ff_ref_idct, MMX_PERM, AV_CPU_FLAG_MMX},
+ {"LIBMPEG2-MMX2", 1, ff_mmxext_idct, ff_ref_idct, MMX_PERM, AV_CPU_FLAG_MMX2},
#endif
- {"SIMPLE-MMX", 1, ff_simple_idct_mmx, ff_ref_idct, MMX_SIMPLE_PERM, FF_MM_MMX},
- {"XVID-MMX", 1, ff_idct_xvid_mmx, ff_ref_idct, NO_PERM, FF_MM_MMX},
- {"XVID-MMX2", 1, ff_idct_xvid_mmx2, ff_ref_idct, NO_PERM, FF_MM_MMX2},
- {"XVID-SSE2", 1, ff_idct_xvid_sse2, ff_ref_idct, SSE2_PERM, FF_MM_SSE2},
+ {"SIMPLE-MMX", 1, ff_simple_idct_mmx, ff_ref_idct, MMX_SIMPLE_PERM, AV_CPU_FLAG_MMX},
+ {"XVID-MMX", 1, ff_idct_xvid_mmx, ff_ref_idct, NO_PERM, AV_CPU_FLAG_MMX},
+ {"XVID-MMX2", 1, ff_idct_xvid_mmx2, ff_ref_idct, NO_PERM, AV_CPU_FLAG_MMX2},
+ {"XVID-SSE2", 1, ff_idct_xvid_sse2, ff_ref_idct, SSE2_PERM, AV_CPU_FLAG_SSE2},
#endif
#if HAVE_ALTIVEC
- {"altivecfdct", 0, fdct_altivec, ff_ref_fdct, NO_PERM, FF_MM_ALTIVEC},
+ {"altivecfdct", 0, fdct_altivec, ff_ref_fdct, NO_PERM, AV_CPU_FLAG_ALTIVEC},
#endif
#if ARCH_BFIN
@@ -187,7 +187,7 @@ DECLARE_ALIGNED(8, static DCTELEM, block_org)[64];
static inline void mmx_emms(void)
{
#if HAVE_MMX
- if (cpu_flags & FF_MM_MMX)
+ if (cpu_flags & AV_CPU_FLAG_MMX)
__asm__ volatile ("emms\n\t");
#endif
}
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 9091042abb..af5f074253 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -553,7 +553,7 @@ retry:
#endif
#if HAVE_MMX
- if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (mm_support() & FF_MM_MMX)){
+ if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (mm_support() & AV_CPU_FLAG_MMX)){
avctx->idct_algo= FF_IDCT_XVIDMMX;
avctx->coded_width= 0; // force reinit
// dsputil_init(&s->dsp, avctx);
diff --git a/libavcodec/motion-test.c b/libavcodec/motion-test.c
index 37f55a6f42..994b262bc0 100644
--- a/libavcodec/motion-test.c
+++ b/libavcodec/motion-test.c
@@ -128,7 +128,7 @@ int main(int argc, char **argv)
AVCodecContext *ctx;
int c;
DSPContext cctx, mmxctx;
- int flags[2] = { FF_MM_MMX, FF_MM_MMX2 };
+ int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 };
int flags_size = HAVE_MMX2 ? 2 : 1;
for(;;) {
@@ -145,11 +145,11 @@ int main(int argc, char **argv)
printf("ffmpeg motion test\n");
ctx = avcodec_alloc_context();
- ctx->dsp_mask = FF_MM_FORCE;
+ ctx->dsp_mask = AV_CPU_FLAG_FORCE;
dsputil_init(&cctx, ctx);
for (c = 0; c < flags_size; c++) {
int x;
- ctx->dsp_mask = FF_MM_FORCE | flags[c];
+ ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c];
dsputil_init(&mmxctx, ctx);
for (x = 0; x < 2; x++) {
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index 4f1c501c77..6d004cf221 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -28,7 +28,7 @@ int mm_support(void)
int result = 0;
#if HAVE_ALTIVEC
if (has_altivec()) {
- result |= FF_MM_ALTIVEC;
+ result |= AV_CPU_FLAG_ALTIVEC;
}
#endif /* result */
return result;
diff --git a/libavcodec/x86/cavsdsp_mmx.c b/libavcodec/x86/cavsdsp_mmx.c
index 15193a57f3..2ec53395cc 100644
--- a/libavcodec/x86/cavsdsp_mmx.c
+++ b/libavcodec/x86/cavsdsp_mmx.c
@@ -474,6 +474,6 @@ void ff_cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX2) ff_cavsdsp_init_mmx2 (c, avctx);
- if (mm_flags & FF_MM_3DNOW) ff_cavsdsp_init_3dnow(c, avctx);
+ if (mm_flags & AV_CPU_FLAG_MMX2) ff_cavsdsp_init_mmx2 (c, avctx);
+ if (mm_flags & AV_CPU_FLAG_3DNOW) ff_cavsdsp_init_3dnow(c, avctx);
}
diff --git a/libavcodec/x86/cpuid.c b/libavcodec/x86/cpuid.c
index f9afd6e729..45fac5dd1e 100644
--- a/libavcodec/x86/cpuid.c
+++ b/libavcodec/x86/cpuid.c
@@ -79,21 +79,21 @@ int mm_support(void)
family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
model = ((eax>>4)&0xf) + ((eax>>12)&0xf0);
if (std_caps & (1<<23))
- rval |= FF_MM_MMX;
+ rval |= AV_CPU_FLAG_MMX;
if (std_caps & (1<<25))
- rval |= FF_MM_MMX2
+ rval |= AV_CPU_FLAG_MMX2
#if HAVE_SSE
- | FF_MM_SSE;
+ | AV_CPU_FLAG_SSE;
if (std_caps & (1<<26))
- rval |= FF_MM_SSE2;
+ rval |= AV_CPU_FLAG_SSE2;
if (ecx & 1)
- rval |= FF_MM_SSE3;
+ rval |= AV_CPU_FLAG_SSE3;
if (ecx & 0x00000200 )
- rval |= FF_MM_SSSE3;
+ rval |= AV_CPU_FLAG_SSSE3;
if (ecx & 0x00080000 )
- rval |= FF_MM_SSE4;
+ rval |= AV_CPU_FLAG_SSE4;
if (ecx & 0x00100000 )
- rval |= FF_MM_SSE42;
+ rval |= AV_CPU_FLAG_SSE42;
#endif
;
}
@@ -103,13 +103,13 @@ int mm_support(void)
if(max_ext_level >= 0x80000001){
cpuid(0x80000001, eax, ebx, ecx, ext_caps);
if (ext_caps & (1<<31))
- rval |= FF_MM_3DNOW;
+ rval |= AV_CPU_FLAG_3DNOW;
if (ext_caps & (1<<30))
- rval |= FF_MM_3DNOWEXT;
+ rval |= AV_CPU_FLAG_3DNOWEXT;
if (ext_caps & (1<<23))
- rval |= FF_MM_MMX;
+ rval |= AV_CPU_FLAG_MMX;
if (ext_caps & (1<<22))
- rval |= FF_MM_MMX2;
+ rval |= AV_CPU_FLAG_MMX2;
}
if (!strncmp(vendor.c, "GenuineIntel", 12) &&
@@ -117,24 +117,24 @@ int mm_support(void)
/* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and 6/14 (core1 "yonah")
* theoretically support sse2, but it's usually slower than mmx,
* so let's just pretend they don't. */
- if (rval & FF_MM_SSE2) rval ^= FF_MM_SSE2SLOW|FF_MM_SSE2;
- if (rval & FF_MM_SSE3) rval ^= FF_MM_SSE3SLOW|FF_MM_SSE3;
+ if (rval & AV_CPU_FLAG_SSE2) rval ^= AV_CPU_FLAG_SSE2SLOW|AV_CPU_FLAG_SSE2;
+ if (rval & AV_CPU_FLAG_SSE3) rval ^= AV_CPU_FLAG_SSE3SLOW|AV_CPU_FLAG_SSE3;
}
#if 0
av_log(NULL, AV_LOG_DEBUG, "%s%s%s%s%s%s%s%s%s%s%s%s\n",
- (rval&FF_MM_MMX) ? "MMX ":"",
- (rval&FF_MM_MMX2) ? "MMX2 ":"",
- (rval&FF_MM_SSE) ? "SSE ":"",
- (rval&FF_MM_SSE2) ? "SSE2 ":"",
- (rval&FF_MM_SSE2SLOW) ? "SSE2(slow) ":"",
- (rval&FF_MM_SSE3) ? "SSE3 ":"",
- (rval&FF_MM_SSE3SLOW) ? "SSE3(slow) ":"",
- (rval&FF_MM_SSSE3) ? "SSSE3 ":"",
- (rval&FF_MM_SSE4) ? "SSE4.1 ":"",
- (rval&FF_MM_SSE42) ? "SSE4.2 ":"",
- (rval&FF_MM_3DNOW) ? "3DNow ":"",
- (rval&FF_MM_3DNOWEXT) ? "3DNowExt ":"");
+ (rval&AV_CPU_FLAG_MMX) ? "MMX ":"",
+ (rval&AV_CPU_FLAG_MMX2) ? "MMX2 ":"",
+ (rval&AV_CPU_FLAG_SSE) ? "SSE ":"",
+ (rval&AV_CPU_FLAG_SSE2) ? "SSE2 ":"",
+ (rval&AV_CPU_FLAG_SSE2SLOW) ? "SSE2(slow) ":"",
+ (rval&AV_CPU_FLAG_SSE3) ? "SSE3 ":"",
+ (rval&AV_CPU_FLAG_SSE3SLOW) ? "SSE3(slow) ":"",
+ (rval&AV_CPU_FLAG_SSSE3) ? "SSSE3 ":"",
+ (rval&AV_CPU_FLAG_SSE4) ? "SSE4.1 ":"",
+ (rval&AV_CPU_FLAG_SSE42) ? "SSE4.2 ":"",
+ (rval&AV_CPU_FLAG_3DNOW) ? "3DNow ":"",
+ (rval&AV_CPU_FLAG_3DNOWEXT) ? "3DNowExt ":"");
#endif
return rval;
}
diff --git a/libavcodec/x86/dnxhd_mmx.c b/libavcodec/x86/dnxhd_mmx.c
index 1e6299bcd1..644dcf1537 100644
--- a/libavcodec/x86/dnxhd_mmx.c
+++ b/libavcodec/x86/dnxhd_mmx.c
@@ -52,7 +52,7 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l
void ff_dnxhd_init_mmx(DNXHDEncContext *ctx)
{
- if (mm_support() & FF_MM_SSE2) {
+ if (mm_support() & AV_CPU_FLAG_SSE2) {
ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2;
}
}
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 28c616651c..b5d32b67ff 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -22,6 +22,7 @@
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*/
+#include "libavutil/cpu.h"
#include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/h264dsp.h"
@@ -2525,7 +2526,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
int mm_flags = mm_support();
if (avctx->dsp_mask) {
- if (avctx->dsp_mask & FF_MM_FORCE)
+ if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
mm_flags |= (avctx->dsp_mask & 0xffff);
else
mm_flags &= ~(avctx->dsp_mask & 0xffff);
@@ -2533,20 +2534,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#if 0
av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
- if (mm_flags & FF_MM_MMX)
+ if (mm_flags & AV_CPU_FLAG_MMX)
av_log(avctx, AV_LOG_INFO, " mmx");
- if (mm_flags & FF_MM_MMX2)
+ if (mm_flags & AV_CPU_FLAG_MMX2)
av_log(avctx, AV_LOG_INFO, " mmx2");
- if (mm_flags & FF_MM_3DNOW)
+ if (mm_flags & AV_CPU_FLAG_3DNOW)
av_log(avctx, AV_LOG_INFO, " 3dnow");
- if (mm_flags & FF_MM_SSE)
+ if (mm_flags & AV_CPU_FLAG_SSE)
av_log(avctx, AV_LOG_INFO, " sse");
- if (mm_flags & FF_MM_SSE2)
+ if (mm_flags & AV_CPU_FLAG_SSE2)
av_log(avctx, AV_LOG_INFO, " sse2");
av_log(avctx, AV_LOG_INFO, "\n");
#endif
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
const int idct_algo= avctx->idct_algo;
if(avctx->lowres==0){
@@ -2557,7 +2558,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
#if CONFIG_GPL
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
- if(mm_flags & FF_MM_MMX2){
+ if(mm_flags & AV_CPU_FLAG_MMX2){
c->idct_put= ff_libmpeg2mmx2_idct_put;
c->idct_add= ff_libmpeg2mmx2_idct_add;
c->idct = ff_mmxext_idct;
@@ -2570,7 +2571,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#endif
}else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
idct_algo==FF_IDCT_VP3 && HAVE_YASM){
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->idct_put= ff_vp3_idct_put_sse2;
c->idct_add= ff_vp3_idct_add_sse2;
c->idct = ff_vp3_idct_sse2;
@@ -2584,12 +2585,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}else if(idct_algo==FF_IDCT_CAVS){
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
}else if(idct_algo==FF_IDCT_XVIDMMX){
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->idct_put= ff_idct_xvid_sse2_put;
c->idct_add= ff_idct_xvid_sse2_add;
c->idct = ff_idct_xvid_sse2;
c->idct_permutation_type= FF_SSE2_IDCT_PERM;
- }else if(mm_flags & FF_MM_MMX2){
+ }else if(mm_flags & AV_CPU_FLAG_MMX2){
c->idct_put= ff_idct_xvid_mmx2_put;
c->idct_add= ff_idct_xvid_mmx2_add;
c->idct = ff_idct_xvid_mmx2;
@@ -2606,7 +2607,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
- if ((mm_flags & FF_MM_SSE) &&
+ if ((mm_flags & AV_CPU_FLAG_SSE) &&
!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
c->clear_block = clear_block_sse;
@@ -2649,7 +2650,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
#endif
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
c->prefetch = prefetch_mmx2;
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
@@ -2740,7 +2741,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
#endif
#if HAVE_7REGS && HAVE_TEN_OPERANDS
- if( mm_flags&FF_MM_3DNOW )
+ if( mm_flags&AV_CPU_FLAG_3DNOW )
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
#endif
@@ -2748,7 +2749,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
ff_vc1dsp_init_mmx(c, avctx);
c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
- } else if (mm_flags & FF_MM_3DNOW) {
+ } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
c->prefetch = prefetch_3dnow;
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
@@ -2816,13 +2817,13 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
- if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
+ if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
// these functions are slower than mmx on AMD, but faster on Intel
c->put_pixels_tab[0][0] = put_pixels16_sse2;
c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
H264_QPEL_FUNCS(0, 0, sse2);
}
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
@@ -2837,7 +2838,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
H264_QPEL_FUNCS(3, 3, sse2);
}
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if(mm_flags & AV_CPU_FLAG_SSSE3){
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
@@ -2859,13 +2860,13 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
- if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
+ if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
#endif
}
#endif
- if(mm_flags & FF_MM_3DNOW){
+ if(mm_flags & AV_CPU_FLAG_3DNOW){
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
@@ -2873,20 +2874,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
}
}
- if(mm_flags & FF_MM_3DNOWEXT){
+ if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
c->vector_fmul_window = vector_fmul_window_3dnow2;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
}
}
- if(mm_flags & FF_MM_MMX2){
+ if(mm_flags & AV_CPU_FLAG_MMX2){
#if HAVE_YASM
c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
#endif
}
- if(mm_flags & FF_MM_SSE){
+ if(mm_flags & AV_CPU_FLAG_SSE){
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse;
c->vector_fmul = vector_fmul_sse;
@@ -2901,9 +2902,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->scalarproduct_float = ff_scalarproduct_float_sse;
#endif
}
- if(mm_flags & FF_MM_3DNOW)
+ if(mm_flags & AV_CPU_FLAG_3DNOW)
c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
c->float_to_int16 = float_to_int16_sse2;
c->float_to_int16_interleave = float_to_int16_interleave_sse2;
@@ -2912,7 +2913,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
#endif
}
- if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
+ if((mm_flags & AV_CPU_FLAG_SSSE3) && !(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW)) && HAVE_YASM) // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
}
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index bc24abadc7..f78fc319c3 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -1352,12 +1352,12 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
const int dct_algo = avctx->dct_algo;
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->fdct = ff_fdct_sse2;
- }else if(mm_flags & FF_MM_MMX2){
+ }else if(mm_flags & AV_CPU_FLAG_MMX2){
c->fdct = ff_fdct_mmx2;
}else{
c->fdct = ff_fdct_mmx;
@@ -1375,7 +1375,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->hadamard8_diff[1]= hadamard8_diff_mmx;
c->pix_norm1 = pix_norm1_mmx;
- c->sse[0] = (mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx;
+ c->sse[0] = (mm_flags & AV_CPU_FLAG_SSE2) ? sse16_sse2 : sse16_mmx;
c->sse[1] = sse8_mmx;
c->vsad[4]= vsad_intra16_mmx;
@@ -1393,7 +1393,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
c->hadamard8_diff[1]= hadamard8_diff_mmx2;
@@ -1406,19 +1406,19 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
}
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->get_pixels = get_pixels_sse2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2;
c->hadamard8_diff[0]= hadamard8_diff16_sse2;
c->hadamard8_diff[1]= hadamard8_diff_sse2;
}
- if (CONFIG_LPC && mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
+ if (CONFIG_LPC && mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
c->lpc_compute_autocorr = ff_lpc_compute_autocorr_sse2;
}
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if(mm_flags & AV_CPU_FLAG_SSSE3){
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_ssse3;
}
@@ -1429,7 +1429,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
#endif
- if(mm_flags & FF_MM_3DNOW){
+ if(mm_flags & AV_CPU_FLAG_3DNOW){
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_3dnow;
}
diff --git a/libavcodec/x86/fft.c b/libavcodec/x86/fft.c
index eb5c65ecbb..acb508f3ec 100644
--- a/libavcodec/x86/fft.c
+++ b/libavcodec/x86/fft.c
@@ -23,18 +23,18 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
{
#if HAVE_YASM
int has_vectors = mm_support();
- if (has_vectors & FF_MM_SSE && HAVE_SSE) {
+ if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) {
/* SSE for P3/P4/K8 */
s->imdct_calc = ff_imdct_calc_sse;
s->imdct_half = ff_imdct_half_sse;
s->fft_permute = ff_fft_permute_sse;
s->fft_calc = ff_fft_calc_sse;
- } else if (has_vectors & FF_MM_3DNOWEXT && HAVE_AMD3DNOWEXT) {
+ } else if (has_vectors & AV_CPU_FLAG_3DNOWEXT && HAVE_AMD3DNOWEXT) {
/* 3DNowEx for K7 */
s->imdct_calc = ff_imdct_calc_3dn2;
s->imdct_half = ff_imdct_half_3dn2;
s->fft_calc = ff_fft_calc_3dn2;
- } else if (has_vectors & FF_MM_3DNOW && HAVE_AMD3DNOW) {
+ } else if (has_vectors & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) {
/* 3DNow! for K6-2/3 */
s->imdct_calc = ff_imdct_calc_3dn;
s->imdct_half = ff_imdct_half_3dn;
@@ -47,7 +47,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
av_cold void ff_dct_init_mmx(DCTContext *s)
{
int has_vectors = mm_support();
- if (has_vectors & FF_MM_SSE && HAVE_SSE)
+ if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE)
s->dct32 = ff_dct32_float_sse;
}
#endif
diff --git a/libavcodec/x86/h264_intrapred_init.c b/libavcodec/x86/h264_intrapred_init.c
index aceb2e6bbb..dd02de81e5 100644
--- a/libavcodec/x86/h264_intrapred_init.c
+++ b/libavcodec/x86/h264_intrapred_init.c
@@ -51,7 +51,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
int mm_flags = mm_support();
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx;
h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx;
@@ -63,7 +63,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
@@ -77,11 +77,11 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_SSE) {
+ if (mm_flags & AV_CPU_FLAG_SSE) {
h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse;
}
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2;
if (codec_id == CODEC_ID_VP8) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2;
@@ -89,7 +89,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (mm_flags & AV_CPU_FLAG_SSSE3) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3;
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3;
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index 9bdde84b15..1f3e06bfb9 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -744,7 +744,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
c->h264_idct_dc_add=
c->h264_idct_add= ff_h264_idct_add_mmx;
c->h264_idct8_dc_add=
@@ -755,7 +755,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->h264_idct_add8 = ff_h264_idct_add8_mmx;
c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
@@ -765,13 +765,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
}
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
c->h264_idct8_add = ff_h264_idct8_add_sse2;
c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
}
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX2){
+ if (mm_flags & AV_CPU_FLAG_MMX2){
c->h264_v_loop_filter_chroma= ff_x264_deblock_v_chroma_mmxext;
c->h264_h_loop_filter_chroma= ff_x264_deblock_h_chroma_mmxext;
c->h264_v_loop_filter_chroma_intra= ff_x264_deblock_v_chroma_intra_mmxext;
@@ -800,7 +800,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
- if( mm_flags&FF_MM_SSE2 ){
+ if (mm_flags&AV_CPU_FLAG_SSE2) {
c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_sse2;
c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_sse2;
@@ -825,7 +825,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
#endif
}
- if ( mm_flags&FF_MM_SSSE3 ){
+ if (mm_flags&AV_CPU_FLAG_SSSE3) {
c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_ssse3;
c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_ssse3;
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c
index 87e7a15a47..d7870c580e 100644
--- a/libavcodec/x86/motion_est_mmx.c
+++ b/libavcodec/x86/motion_est_mmx.c
@@ -429,7 +429,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
@@ -442,7 +442,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->sad[0]= sad16_mmx;
c->sad[1]= sad8_mmx;
}
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
c->pix_abs[0][0] = sad16_mmx2;
c->pix_abs[1][0] = sad8_mmx2;
@@ -458,7 +458,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->pix_abs[1][3] = sad8_xy2_mmx2;
}
}
- if ((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
+ if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
c->sad[0]= sad16_sse2;
}
}
diff --git a/libavcodec/x86/mpegaudiodec_mmx.c b/libavcodec/x86/mpegaudiodec_mmx.c
index f4c9b45986..6cc45bed09 100644
--- a/libavcodec/x86/mpegaudiodec_mmx.c
+++ b/libavcodec/x86/mpegaudiodec_mmx.c
@@ -151,7 +151,7 @@ void ff_mpegaudiodec_init_mmx(MPADecodeContext *s)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
s->apply_window_mp3 = apply_window_mp3;
}
}
diff --git a/libavcodec/x86/mpegvideo_mmx.c b/libavcodec/x86/mpegvideo_mmx.c
index 3556e1caa7..a935667d97 100644
--- a/libavcodec/x86/mpegvideo_mmx.c
+++ b/libavcodec/x86/mpegvideo_mmx.c
@@ -627,7 +627,7 @@ void MPV_common_init_mmx(MpegEncContext *s)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
const int dct_algo = s->avctx->dct_algo;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
@@ -638,7 +638,7 @@ void MPV_common_init_mmx(MpegEncContext *s)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
s->denoise_dct= denoise_dct_sse2;
} else {
s->denoise_dct= denoise_dct_mmx;
@@ -646,13 +646,13 @@ void MPV_common_init_mmx(MpegEncContext *s)
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if(mm_flags & AV_CPU_FLAG_SSSE3){
s->dct_quantize= dct_quantize_SSSE3;
} else
#endif
- if(mm_flags & FF_MM_SSE2){
+ if(mm_flags & AV_CPU_FLAG_SSE2){
s->dct_quantize= dct_quantize_SSE2;
- } else if(mm_flags & FF_MM_MMX2){
+ } else if(mm_flags & AV_CPU_FLAG_MMX2){
s->dct_quantize= dct_quantize_MMX2;
} else {
s->dct_quantize= dct_quantize_MMX;
diff --git a/libavcodec/x86/snowdsp_mmx.c b/libavcodec/x86/snowdsp_mmx.c
index 96b3bf29ba..cc65b9a0c6 100644
--- a/libavcodec/x86/snowdsp_mmx.c
+++ b/libavcodec/x86/snowdsp_mmx.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/cpu.h"
#include "libavutil/x86_cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/snow.h"
@@ -876,8 +877,8 @@ void ff_dwt_init_x86(DWTContext *c)
{
int mm_flags = mm_support();
- if (mm_flags & FF_MM_MMX) {
- if(mm_flags & FF_MM_SSE2 & 0){
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ if(mm_flags & AV_CPU_FLAG_SSE2 & 0){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
#if HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
@@ -885,7 +886,7 @@ void ff_dwt_init_x86(DWTContext *c)
c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
}
else{
- if(mm_flags & FF_MM_MMX2){
+ if(mm_flags & AV_CPU_FLAG_MMX2){
c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
#if HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index eb3ad2c32f..7a9a6bebbd 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -736,7 +736,7 @@ void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
- if (mm_flags & FF_MM_MMX2){
+ if (mm_flags & AV_CPU_FLAG_MMX2){
dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
@@ -772,23 +772,23 @@ void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
ASSIGN_LF(mmx);
}
return;
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
ASSIGN_LF(mmx2);
}
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2;
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2;
dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (mm_flags & AV_CPU_FLAG_SSSE3) {
ASSIGN_LF(ssse3);
}
- if (mm_flags & FF_MM_SSE4) {
+ if (mm_flags & AV_CPU_FLAG_SSE4) {
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
}
diff --git a/libavcodec/x86/vp56dsp_init.c b/libavcodec/x86/vp56dsp_init.c
index 5120ed231d..719190b247 100644
--- a/libavcodec/x86/vp56dsp_init.c
+++ b/libavcodec/x86/vp56dsp_init.c
@@ -35,11 +35,11 @@ av_cold void ff_vp56dsp_init_x86(VP56DSPContext* c, enum CodecID codec)
int mm_flags = mm_support();
if (CONFIG_VP6_DECODER && codec == CODEC_ID_VP6) {
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
}
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
}
}
diff --git a/libavcodec/x86/vp8dsp-init.c b/libavcodec/x86/vp8dsp-init.c
index ed5cf46024..f84bfed3a9 100644
--- a/libavcodec/x86/vp8dsp-init.c
+++ b/libavcodec/x86/vp8dsp-init.c
@@ -285,7 +285,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
int mm_flags = mm_support();
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
@@ -312,7 +312,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
- if (mm_flags & FF_MM_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
VP8_MC_FUNC(2, 4, mmxext);
@@ -334,14 +334,14 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
}
- if (mm_flags & FF_MM_SSE) {
+ if (mm_flags & AV_CPU_FLAG_SSE) {
c->vp8_idct_add = ff_vp8_idct_add_sse;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
}
- if (mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
+ if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
VP8_LUMA_MC_FUNC(0, 16, sse2);
VP8_MC_FUNC(1, 8, sse2);
VP8_BILINEAR_MC_FUNC(0, 16, sse2);
@@ -356,7 +356,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
}
- if (mm_flags & FF_MM_SSE2) {
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
@@ -368,7 +368,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (mm_flags & AV_CPU_FLAG_SSSE3) {
VP8_LUMA_MC_FUNC(0, 16, ssse3);
VP8_MC_FUNC(1, 8, ssse3);
VP8_MC_FUNC(2, 4, ssse3);
@@ -390,7 +390,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
}
- if (mm_flags & FF_MM_SSE4) {
+ if (mm_flags & AV_CPU_FLAG_SSE4) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;