diff options
Diffstat (limited to 'libavcodec/x86/idctdsp_mmx.c')
-rw-r--r-- | libavcodec/x86/idctdsp_mmx.c | 133 |
1 files changed, 133 insertions, 0 deletions
diff --git a/libavcodec/x86/idctdsp_mmx.c b/libavcodec/x86/idctdsp_mmx.c new file mode 100644 index 0000000000..a72b9416b1 --- /dev/null +++ b/libavcodec/x86/idctdsp_mmx.c @@ -0,0 +1,133 @@ +/* + * SIMD-optimized IDCT-related routines + * Copyright (c) 2000, 2001 Fabrice Bellard + * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> + * + * MMX optimization by Nick Kurshev <nickols_k@mail.ru> + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "libavutil/cpu.h" +#include "libavutil/x86/asm.h" +#include "idctdsp.h" +#include "inline_asm.h" + +#if HAVE_INLINE_ASM + +void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, + int line_size) +{ + const int16_t *p; + uint8_t *pix; + + /* read the pixels */ + p = block; + pix = pixels; + /* unrolled loop */ + __asm__ volatile ( + "movq (%3), %%mm0 \n\t" + "movq 8(%3), %%mm1 \n\t" + "movq 16(%3), %%mm2 \n\t" + "movq 24(%3), %%mm3 \n\t" + "movq 32(%3), %%mm4 \n\t" + "movq 40(%3), %%mm5 \n\t" + "movq 48(%3), %%mm6 \n\t" + "movq 56(%3), %%mm7 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "packuswb %%mm7, %%mm6 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm2, (%0, %1) \n\t" + "movq %%mm4, (%0, %1, 2) \n\t" + "movq %%mm6, (%0, %2) \n\t" + :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3), + "r" (p) + : "memory"); + pix += line_size * 4; + p += 32; + + // if here would be an exact copy of the code above + // compiler would generate some very strange code + // thus using "r" + __asm__ volatile ( + "movq (%3), %%mm0 \n\t" + "movq 8(%3), %%mm1 \n\t" + "movq 16(%3), %%mm2 \n\t" + "movq 24(%3), %%mm3 \n\t" + "movq 32(%3), %%mm4 \n\t" + "movq 40(%3), %%mm5 \n\t" + "movq 48(%3), %%mm6 \n\t" + "movq 56(%3), %%mm7 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "packuswb %%mm5, %%mm4 \n\t" + "packuswb %%mm7, %%mm6 \n\t" + "movq %%mm0, (%0) \n\t" + "movq %%mm2, (%0, %1) \n\t" + "movq %%mm4, (%0, %1, 2) \n\t" + "movq %%mm6, (%0, %2) \n\t" + :: "r" (pix), "r" ((x86_reg) line_size), "r" ((x86_reg) line_size * 3), + "r" (p) + : "memory"); +} + +void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels, + int line_size) +{ + const int16_t *p; + uint8_t *pix; + int i; + + /* read the pixels */ + p = block; + pix = pixels; + MOVQ_ZERO(mm7); + i = 4; + do { + __asm__ volatile ( + "movq (%2), %%mm0 \n\t" + "movq 8(%2), %%mm1 \n\t" + "movq 16(%2), %%mm2 \n\t" + "movq 24(%2), %%mm3 \n\t" + "movq %0, %%mm4 \n\t" + "movq %1, %%mm6 \n\t" + "movq %%mm4, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddsw %%mm4, %%mm0 \n\t" + "paddsw %%mm5, %%mm1 \n\t" + "movq %%mm6, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm6 \n\t" + "punpckhbw %%mm7, %%mm5 \n\t" + "paddsw %%mm6, %%mm2 \n\t" + "paddsw %%mm5, %%mm3 \n\t" + "packuswb %%mm1, %%mm0 \n\t" + "packuswb %%mm3, %%mm2 \n\t" + "movq %%mm0, %0 \n\t" + "movq %%mm2, %1 \n\t" + : "+m" (*pix), "+m" (*(pix + line_size)) + : "r" (p) + : "memory"); + pix += line_size * 2; + p += 16; + } while (--i); +} + +#endif /* HAVE_INLINE_ASM */ |