diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-12 14:27:29 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-13 09:35:20 +0000 |
commit | c30a6232df03e1efbd9f3b226777b07e087a1122 (patch) | |
tree | e992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/third_party/openh264 | |
parent | 7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff) | |
download | qtwebengine-chromium-85-based.tar.gz |
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/third_party/openh264')
31 files changed, 3810 insertions, 98 deletions
diff --git a/chromium/third_party/openh264/README.chromium b/chromium/third_party/openh264/README.chromium index e7da1ff489e..599e7d4fd70 100644 --- a/chromium/third_party/openh264/README.chromium +++ b/chromium/third_party/openh264/README.chromium @@ -2,8 +2,8 @@ Name: OpenH264 Short Name: openh264 URL: http://www.openh264.org/ Version: unknown -CPEPrefix: cpe:/a:cisco:openh264:2.1.0 -(Cut at 7067d2d9075ab4988bef2b9135ae9542f9778772, which is 2.1.0) +CPEPrefix: cpe:/a:cisco:openh264:2.1.1 +(Cut at 3dd5b80bc4f172dd82925bb259cb7c82348409c5, which is 2.1.1) License: 2-Clause BSD License File: src/LICENSE Security Critical: yes diff --git a/chromium/third_party/openh264/src/Makefile b/chromium/third_party/openh264/src/Makefile index bf398d79632..53d16ccb65e 100644 --- a/chromium/third_party/openh264/src/Makefile +++ b/chromium/third_party/openh264/src/Makefile @@ -34,8 +34,9 @@ GTEST_VER=release-1.8.1 CCASFLAGS=$(CFLAGS) STATIC_LDFLAGS=-lstdc++ STRIP ?= strip +USE_STACK_PROTECTOR = Yes -SHAREDLIB_MAJORVERSION=5 +SHAREDLIB_MAJORVERSION=6 FULL_VERSION := 2.1.0 ifeq (,$(wildcard $(SRC_PATH)gmp-api)) diff --git a/chromium/third_party/openh264/src/build/arch.mk b/chromium/third_party/openh264/src/build/arch.mk index 1bf318ab9a4..c6570ed4e59 100644 --- a/chromium/third_party/openh264/src/build/arch.mk +++ b/chromium/third_party/openh264/src/build/arch.mk @@ -41,7 +41,7 @@ ASMFLAGS += -I$(SRC_PATH)codec/common/mips/ ifeq ($(ENABLE_MMI), Yes) ENABLE_MMI = $(shell $(SRC_PATH)build/mips-simd-check.sh $(CC) mmi) ifeq ($(ENABLE_MMI), Yes) -CFLAGS += -DHAVE_MMI -Wa,-mloongson-mmi,-mloongson-ext +CFLAGS += -DHAVE_MMI -march=loongson3a endif endif #msa diff --git a/chromium/third_party/openh264/src/build/mips-simd-check.sh b/chromium/third_party/openh264/src/build/mips-simd-check.sh index 5efffbef277..d0d72f9edd6 100755 --- a/chromium/third_party/openh264/src/build/mips-simd-check.sh +++ b/chromium/third_party/openh264/src/build/mips-simd-check.sh @@ -15,7 +15,7 @@ TMPO=$(mktemp tmp.XXXXXX.o) if [ $2 == "mmi" ] then echo "void main(void){ __asm__ volatile(\"punpcklhw \$f0, \$f0, \$f0\"); }" > $TMPC - $1 -Wa,-mloongson-mmi $TMPC -o $TMPO &> /dev/null + $1 -march=loongson3a $TMPC -o $TMPO &> /dev/null if test -s $TMPO then echo "Yes" diff --git a/chromium/third_party/openh264/src/build/platform-android.mk b/chromium/third_party/openh264/src/build/platform-android.mk index 5cebcb05e7d..0c442dfac13 100644 --- a/chromium/third_party/openh264/src/build/platform-android.mk +++ b/chromium/third_party/openh264/src/build/platform-android.mk @@ -44,7 +44,10 @@ SYSROOT = $(NDKROOT)/platforms/android-$(NDKLEVEL)/arch-$(ARCH) CXX = $(TOOLCHAINPREFIX)g++ CC = $(TOOLCHAINPREFIX)gcc AR = $(TOOLCHAINPREFIX)ar -CFLAGS += -DANDROID_NDK -fpic --sysroot=$(SYSROOT) -MMD -MP -fstack-protector-all +CFLAGS += -DANDROID_NDK -fpic --sysroot=$(SYSROOT) -MMD -MP +ifeq ($(USE_STACK_PROTECTOR), Yes) +CFLAGS += -fstack-protector-all +endif CFLAGS += -isystem $(NDKROOT)/sysroot/usr/include -isystem $(NDKROOT)/sysroot/usr/include/$(TOOLCHAIN_NAME) -D__ANDROID_API__=$(NDKLEVEL) CXXFLAGS += -fno-rtti -fno-exceptions LDFLAGS += --sysroot=$(SYSROOT) diff --git a/chromium/third_party/openh264/src/build/platform-bsd.mk b/chromium/third_party/openh264/src/build/platform-bsd.mk index e60d2cc094b..2e0bf2ccd60 100644 --- a/chromium/third_party/openh264/src/build/platform-bsd.mk +++ b/chromium/third_party/openh264/src/build/platform-bsd.mk @@ -3,7 +3,10 @@ SHAREDLIBSUFFIX = so SHAREDLIBSUFFIXFULLVER=$(SHAREDLIBSUFFIX).$(FULL_VERSION) SHAREDLIBSUFFIXMAJORVER=$(SHAREDLIBSUFFIX).$(SHAREDLIB_MAJORVERSION) SHLDFLAGS = -Wl,-soname,$(LIBPREFIX)$(PROJECT_NAME).$(SHAREDLIBSUFFIXMAJORVER) -CFLAGS += -fPIC -fstack-protector-all +CFLAGS += -fPIC +ifeq ($(USE_STACK_PROTECTOR), Yes) +CFLAGS += -fstack-protector-all +endif LDFLAGS += -lpthread STATIC_LDFLAGS += -lpthread -lm ifeq ($(ASM_ARCH), x86) diff --git a/chromium/third_party/openh264/src/build/platform-darwin.mk b/chromium/third_party/openh264/src/build/platform-darwin.mk index 6551cea5015..6f91dafb2af 100644 --- a/chromium/third_party/openh264/src/build/platform-darwin.mk +++ b/chromium/third_party/openh264/src/build/platform-darwin.mk @@ -10,7 +10,10 @@ SHLDFLAGS = -dynamiclib -twolevel_namespace -undefined dynamic_lookup \ $(SHAREDLIB_DIR)/$(LIBPREFIX)$(PROJECT_NAME).$(SHAREDLIBSUFFIXMAJORVER) SHARED = -dynamiclib SHARED += -current_version $(CURRENT_VERSION) -compatibility_version $(COMPATIBILITY_VERSION) -CFLAGS += -Wall -fPIC -MMD -MP -fstack-protector-all +CFLAGS += -Wall -fPIC -MMD -MP +ifeq ($(USE_STACK_PROTECTOR), Yes) +CFLAGS += -fstack-protector-all +endif ifeq ($(ASM_ARCH), x86) ASMFLAGS += -DPREFIX ifeq ($(ARCH), x86_64) diff --git a/chromium/third_party/openh264/src/build/platform-linux.mk b/chromium/third_party/openh264/src/build/platform-linux.mk index 44fe242439f..b5c006b2325 100644 --- a/chromium/third_party/openh264/src/build/platform-linux.mk +++ b/chromium/third_party/openh264/src/build/platform-linux.mk @@ -3,7 +3,10 @@ SHAREDLIBSUFFIX = so SHAREDLIBSUFFIXFULLVER=$(SHAREDLIBSUFFIX).$(FULL_VERSION) SHAREDLIBSUFFIXMAJORVER=$(SHAREDLIBSUFFIX).$(SHAREDLIB_MAJORVERSION) SHLDFLAGS = -Wl,-soname,$(LIBPREFIX)$(PROJECT_NAME).$(SHAREDLIBSUFFIXMAJORVER) -CFLAGS += -Wall -fno-strict-aliasing -fPIC -MMD -MP -fstack-protector-all +CFLAGS += -Wall -fno-strict-aliasing -fPIC -MMD -MP +ifeq ($(USE_STACK_PROTECTOR), Yes) +CFLAGS += -fstack-protector-all +endif LDFLAGS += -lpthread STATIC_LDFLAGS += -lpthread -lm AR_OPTS = crD $@ diff --git a/chromium/third_party/openh264/src/codec/build/win32/dec/WelsDecCore.vcproj b/chromium/third_party/openh264/src/codec/build/win32/dec/WelsDecCore.vcproj index 63318f340d0..de7f119f332 100644 --- a/chromium/third_party/openh264/src/codec/build/win32/dec/WelsDecCore.vcproj +++ b/chromium/third_party/openh264/src/codec/build/win32/dec/WelsDecCore.vcproj @@ -982,6 +982,10 @@ > </File> <File + RelativePath="..\..\..\common\src\WelsThreadLib.cpp" + > + </File> + <File RelativePath="..\..\..\decoder\core\src\wels_decoder_thread.cpp" > </File> diff --git a/chromium/third_party/openh264/src/codec/common/inc/copy_mb.h b/chromium/third_party/openh264/src/codec/common/inc/copy_mb.h index 56bef626faa..532702a9edc 100644 --- a/chromium/third_party/openh264/src/codec/common/inc/copy_mb.h +++ b/chromium/third_party/openh264/src/codec/common/inc/copy_mb.h @@ -82,6 +82,13 @@ void WelsCopy16x8NotAligned_mmi (uint8_t* Dst, int32_t iStrideD, uint8_t* Src, void WelsCopy16x16_mmi (uint8_t* Dst, int32_t iStrideD, uint8_t* Src, int32_t iStrideS); void WelsCopy16x16NotAligned_mmi (uint8_t* Dst, int32_t iStrideD, uint8_t* Src, int32_t iStrideS); #endif//HAVE_MMI + +#if defined (HAVE_MSA) +void WelsCopy8x8_msa (uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, int32_t iStrideS); +void WelsCopy8x16_msa (uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, int32_t iStrideS); +void WelsCopy16x8_msa (uint8_t* Dst, int32_t iStrideD, uint8_t* Src, int32_t iStrideS); +void WelsCopy16x16_msa (uint8_t* Dst, int32_t iStrideD, uint8_t* Src, int32_t iStrideS); +#endif//HAVE_MSA #if defined(__cplusplus) } #endif//__cplusplus diff --git a/chromium/third_party/openh264/src/codec/common/inc/deblocking_common.h b/chromium/third_party/openh264/src/codec/common/inc/deblocking_common.h index a605a6a224f..3ec9b2e5d8c 100644 --- a/chromium/third_party/openh264/src/codec/common/inc/deblocking_common.h +++ b/chromium/third_party/openh264/src/codec/common/inc/deblocking_common.h @@ -91,6 +91,20 @@ void DeblockChromaLt4H_mmi (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, i int8_t* pTC); void WelsNonZeroCount_mmi (int8_t* pNonZeroCount); #endif//HAVE_MMI + +#if defined(HAVE_MSA) +void DeblockLumaLt4V_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta, int8_t* pTc); +void DeblockLumaEq4V_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta); +void DeblockLumaLt4H_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta, int8_t* pTc); +void DeblockLumaEq4H_msa (uint8_t* pPixY, int32_t iStride, int32_t iAlpha, int32_t iBeta); +void DeblockChromaEq4V_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta); +void DeblockChromaLt4V_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta, + int8_t* pTC); +void DeblockChromaEq4H_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta); +void DeblockChromaLt4H_msa (uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, int32_t iAlpha, int32_t iBeta, + int8_t* pTC); +void WelsNonZeroCount_msa (int8_t* pNonZeroCount); +#endif//HAVE_MSA #if defined(__cplusplus) } #endif//__cplusplus diff --git a/chromium/third_party/openh264/src/codec/common/inc/msa_macros.h b/chromium/third_party/openh264/src/codec/common/inc/msa_macros.h new file mode 100644 index 00000000000..2eef0e5b838 --- /dev/null +++ b/chromium/third_party/openh264/src/codec/common/inc/msa_macros.h @@ -0,0 +1,2393 @@ +/* + * Copyright © 2020 Loongson Technology Co. Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Yin Shiyou (yinshiyou-hf@loongson.cn) + * Gu Xiwei (guxiwei-hf@loongson.cn) + */ + +/* + * This header file is copied from loongson LSOM project. + * MSA macros is implemented with msa intrinsics in msa.h, + * and used for simplifing MSA optimization. + */ + +#ifndef _MSA_MACROS_H +#define _MSA_MACROS_H 1 +#define MSA_MACROS_VERSION 18 +#include <msa.h> + +#if (__mips_isa_rev >= 6) + #define LH(psrc) \ + ( { \ + uint16_t val_lh_m = *(uint16_t *)(psrc); \ + val_lh_m; \ + } ) + + #define LW(psrc) \ + ( { \ + uint32_t val_lw_m = *(uint32_t *)(psrc); \ + val_lw_m; \ + } ) + + #if (__mips == 64) + #define LD(psrc) \ + ( { \ + uint64_t val_ld_m = *(uint64_t *)(psrc); \ + val_ld_m; \ + } ) + #else // !(__mips == 64) + #define LD(psrc) \ + ( { \ + uint8_t *psrc_ld_m = (uint8_t *) (psrc); \ + uint32_t val0_ld_m, val1_ld_m; \ + uint64_t val_ld_m = 0; \ + \ + val0_ld_m = LW(psrc_ld_m); \ + val1_ld_m = LW(psrc_ld_m + 4); \ + \ + val_ld_m = (uint64_t) (val1_ld_m); \ + val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \ + val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \ + \ + val_ld_m; \ + } ) + #endif // (__mips == 64) + + #define SH(val, pdst) *(uint16_t *)(pdst) = (val); + #define SW(val, pdst) *(uint32_t *)(pdst) = (val); + #define SD(val, pdst) *(uint64_t *)(pdst) = (val); + +#else // !(__mips_isa_rev >= 6) + #define LH(psrc) \ + ( { \ + uint8_t *psrc_lh_m = (uint8_t *) (psrc); \ + uint16_t val_lh_m; \ + \ + __asm__ volatile ( \ + "ulh %[val_lh_m], %[psrc_lh_m] \n\t" \ + \ + : [val_lh_m] "=r" (val_lh_m) \ + : [psrc_lh_m] "m" (*psrc_lh_m) \ + ); \ + \ + val_lh_m; \ + } ) + + #define LW(psrc) \ + ( { \ + uint8_t *psrc_lw_m = (uint8_t *) (psrc); \ + uint32_t val_lw_m; \ + \ + __asm__ volatile ( \ + "ulw %[val_lw_m], %[psrc_lw_m] \n\t" \ + \ + : [val_lw_m] "=r" (val_lw_m) \ + : [psrc_lw_m] "m" (*psrc_lw_m) \ + ); \ + \ + val_lw_m; \ + } ) + + #if (__mips == 64) + #define LD(psrc) \ + ( { \ + uint8_t *psrc_ld_m = (uint8_t *) (psrc); \ + uint64_t val_ld_m = 0; \ + \ + __asm__ volatile ( \ + "uld %[val_ld_m], %[psrc_ld_m] \n\t" \ + \ + : [val_ld_m] "=r" (val_ld_m) \ + : [psrc_ld_m] "m" (*psrc_ld_m) \ + ); \ + \ + val_ld_m; \ + } ) + #else // !(__mips == 64) + #define LD(psrc) \ + ( { \ + uint8_t *psrc_ld_m = (uint8_t *) (psrc); \ + uint32_t val0_ld_m, val1_ld_m; \ + uint64_t val_ld_m = 0; \ + \ + val0_ld_m = LW(psrc_ld_m); \ + val1_ld_m = LW(psrc_ld_m + 4); \ + \ + val_ld_m = (uint64_t) (val1_ld_m); \ + val_ld_m = (uint64_t) ((val_ld_m << 32) & 0xFFFFFFFF00000000); \ + val_ld_m = (uint64_t) (val_ld_m | (uint64_t) val0_ld_m); \ + \ + val_ld_m; \ + } ) + #endif // (__mips == 64) + + #define SH(val, pdst) \ + { \ + uint8_t *pdst_sh_m = (uint8_t *) (pdst); \ + uint16_t val_sh_m = (val); \ + \ + __asm__ volatile ( \ + "ush %[val_sh_m], %[pdst_sh_m] \n\t" \ + \ + : [pdst_sh_m] "=m" (*pdst_sh_m) \ + : [val_sh_m] "r" (val_sh_m) \ + ); \ + } + + #define SW(val, pdst) \ + { \ + uint8_t *pdst_sw_m = (uint8_t *) (pdst); \ + uint32_t val_sw_m = (val); \ + \ + __asm__ volatile ( \ + "usw %[val_sw_m], %[pdst_sw_m] \n\t" \ + \ + : [pdst_sw_m] "=m" (*pdst_sw_m) \ + : [val_sw_m] "r" (val_sw_m) \ + ); \ + } + + #define SD(val, pdst) \ + { \ + uint8_t *pdst_sd_m = (uint8_t *) (pdst); \ + uint32_t val0_sd_m, val1_sd_m; \ + \ + val0_sd_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \ + val1_sd_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \ + \ + SW(val0_sd_m, pdst_sd_m); \ + SW(val1_sd_m, pdst_sd_m + 4); \ + } +#endif // (__mips_isa_rev >= 6) + + + + + + +/* Description : Load vector elements with stride. + * Arguments : Inputs - psrc (source pointer to load from) + * - stride + * Outputs - out0, out1... + * Return Type - as per RTYPE + * Details : Loads elements in 'out0' from (psrc). + * Loads elements in 'out1' from (psrc + stride). + */ +#define MSA_LD_V(RTYPE, psrc, out) (out) = *((RTYPE *)(psrc)); + +#define MSA_LD_V2(RTYPE, psrc, stride, out0, out1) \ +{ \ + MSA_LD_V(RTYPE, (psrc), out0); \ + MSA_LD_V(RTYPE, (psrc) + (stride), out1); \ +} + +#define MSA_LD_V4(RTYPE, psrc, stride, out0, out1, out2, out3) \ +{ \ + MSA_LD_V2(RTYPE, (psrc), stride, out0, out1); \ + MSA_LD_V2(RTYPE, (psrc) + 2 * (stride) , stride, out2, out3); \ +} + +#define MSA_LD_V8(RTYPE, psrc, stride, out0, out1, out2, out3, \ + out4, out5, out6, out7) \ +{ \ + MSA_LD_V4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + MSA_LD_V4(RTYPE, (psrc) + 4 * (stride), stride, out4, out5, out6, out7); \ +} + +/* Description : Store vectors with stride. + * Arguments : Inputs - in0, in1... (source vector to be stored) + * - stride + * Outputs - pdst (destination pointer to store to) + * Details : Stores elements from 'in0' to (pdst). + * Stores elements from 'in1' to (pdst + stride). + */ +#define MSA_ST_V(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in); + +#define MSA_ST_V2(RTYPE, in0, in1, pdst, stride) \ +{ \ + MSA_ST_V(RTYPE, in0, (pdst)); \ + MSA_ST_V(RTYPE, in1, (pdst) + (stride)); \ +} + +#define MSA_ST_V4(RTYPE, in0, in1, in2, in3, pdst, stride) \ +{ \ + MSA_ST_V2(RTYPE, in0, in1, (pdst), stride); \ + MSA_ST_V2(RTYPE, in2, in3, (pdst) + 2 * (stride), stride); \ +} + +#define MSA_ST_V8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ +{ \ + MSA_ST_V4(RTYPE, in0, in1, in2, in3, (pdst), stride); \ + MSA_ST_V4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * (stride), stride); \ +} + +/* Description : Store half word elements of vector with stride. + * Arguments : Inputs - in (source vector) + * - pdst (destination pointer to store to) + * - stride + * Details : Stores half word 'idx0' from 'in' to (pdst). + * Stores half word 'idx1' from 'in' to (pdst + stride). + * Similar for other elements. + */ +#define MSA_ST_H(in, idx, pdst) \ +{ \ + uint16_t out0_m; \ + out0_m = __msa_copy_u_h((v8i16) in, idx); \ + SH(out0_m, (pdst)); \ +} +#define MSA_ST_H2(in, idx0, idx1, pdst, stride) \ +{ \ + uint16_t out0_m, out1_m; \ + out0_m = __msa_copy_u_h((v8i16) in, idx0); \ + out1_m = __msa_copy_u_h((v8i16) in, idx1); \ + SH(out0_m, (pdst)); \ + SH(out1_m, (pdst) + stride); \ +} +#define MSA_ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \ +{ \ + uint16_t out0_m, out1_m, out2_m, out3_m; \ + out0_m = __msa_copy_u_h((v8i16) in, idx0); \ + out1_m = __msa_copy_u_h((v8i16) in, idx1); \ + out2_m = __msa_copy_u_h((v8i16) in, idx2); \ + out3_m = __msa_copy_u_h((v8i16) in, idx3); \ + SH(out0_m, (pdst)); \ + SH(out1_m, (pdst) + stride); \ + SH(out2_m, (pdst) + 2 * stride); \ + SH(out3_m, (pdst) + 3 * stride); \ +} +#define MSA_ST_H8(in, idx0, idx1, idx2, idx3, idx4, idx5, \ + idx6, idx7, pdst, stride) \ +{ \ + MSA_ST_H4(in, idx0, idx1, idx2, idx3, pdst, stride) \ + MSA_ST_H4(in, idx4, idx5, idx6, idx7, (pdst) + 4*stride, stride) \ +} + +/* Description : Store word elements of vector with stride. + * Arguments : Inputs - in (source vector) + * - pdst (destination pointer to store to) + * - stride + * Details : Stores word 'idx0' from 'in' to (pdst). + * Stores word 'idx1' from 'in' to (pdst + stride). + * Similar for other elements. + */ +#define MSA_ST_W(in, idx, pdst) \ +{ \ + uint32_t out0_m; \ + out0_m = __msa_copy_u_w((v4i32) in, idx); \ + SW(out0_m, (pdst)); \ +} +#define MSA_ST_W2(in, idx0, idx1, pdst, stride) \ +{ \ + uint32_t out0_m, out1_m; \ + out0_m = __msa_copy_u_w((v4i32) in, idx0); \ + out1_m = __msa_copy_u_w((v4i32) in, idx1); \ + SW(out0_m, (pdst)); \ + SW(out1_m, (pdst) + stride); \ +} +#define MSA_ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \ +{ \ + uint32_t out0_m, out1_m, out2_m, out3_m; \ + out0_m = __msa_copy_u_w((v4i32) in, idx0); \ + out1_m = __msa_copy_u_w((v4i32) in, idx1); \ + out2_m = __msa_copy_u_w((v4i32) in, idx2); \ + out3_m = __msa_copy_u_w((v4i32) in, idx3); \ + SW(out0_m, (pdst)); \ + SW(out1_m, (pdst) + stride); \ + SW(out2_m, (pdst) + 2*stride); \ + SW(out3_m, (pdst) + 3*stride); \ +} +#define MSA_ST_W8(in0, in1, idx0, idx1, idx2, idx3, \ + idx4, idx5, idx6, idx7, pdst, stride) \ +{ \ + MSA_ST_W4(in0, idx0, idx1, idx2, idx3, pdst, stride) \ + MSA_ST_W4(in1, idx4, idx5, idx6, idx7, pdst + 4*stride, stride) \ +} + +/* Description : Store double word elements of vector with stride. + * Arguments : Inputs - in (source vector) + * - pdst (destination pointer to store to) + * - stride + * Details : Stores double word 'idx0' from 'in' to (pdst). + * Stores double word 'idx1' from 'in' to (pdst + stride). + * Similar for other elements. + */ +#define MSA_ST_D(in, idx, pdst) \ +{ \ + uint64_t out0_m; \ + out0_m = __msa_copy_u_d((v2i64) in, idx); \ + SD(out0_m, (pdst)); \ +} +#define MSA_ST_D2(in, idx0, idx1, pdst, stride) \ +{ \ + uint64_t out0_m, out1_m; \ + out0_m = __msa_copy_u_d((v2i64) in, idx0); \ + out1_m = __msa_copy_u_d((v2i64) in, idx1); \ + SD(out0_m, (pdst)); \ + SD(out1_m, (pdst) + stride); \ +} +#define MSA_ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \ +{ \ + uint64_t out0_m, out1_m, out2_m, out3_m; \ + out0_m = __msa_copy_u_d((v2i64) in0, idx0); \ + out1_m = __msa_copy_u_d((v2i64) in0, idx1); \ + out2_m = __msa_copy_u_d((v2i64) in1, idx2); \ + out3_m = __msa_copy_u_d((v2i64) in1, idx3); \ + SD(out0_m, (pdst)); \ + SD(out1_m, (pdst) + stride); \ + SD(out2_m, (pdst) + 2 * stride); \ + SD(out3_m, (pdst) + 3 * stride); \ +} +#define MSA_ST_D8(in0, in1, in2, in3, idx0, idx1, idx2, idx3, \ + idx4, idx5, idx6, idx7, pdst, stride) \ +{ \ + MSA_ST_D4(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \ + MSA_ST_D4(in2, in3, idx4, idx5, idx6, idx7, pdst + 4 * stride, stride) \ +} + +/* Description : Shuffle byte vector elements as per mask vector. + * Arguments : Inputs - in0, in1 (source vectors) + * - mask (mask vectors) + * Outputs - out (dstination vectors) + * Return Type - as per RTYPE + * Details : Selective byte elements from 'in0' & 'in1' are copied to 'out' as + * per control vector 'mask'. + */ +#define MSA_VSHF_B(RTYPE, in0, in1, mask, out) \ +{ \ + out = (RTYPE) __msa_vshf_b((v16i8) mask, (v16i8) in0, (v16i8) in1); \ +} + +#define MSA_VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ +{ \ + MSA_VSHF_B(RTYPE, in0, in1, mask0, out0) \ + MSA_VSHF_B(RTYPE, in2, in3, mask1, out1) \ +} + +#define MSA_VSHF_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + mask0, mask1, mask2, mask3, out0, out1, out2, out3) \ +{ \ + MSA_VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ + MSA_VSHF_B2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \ +} + +/* Description : Shuffle halfword vector elements as per mask vector. + * Arguments : Inputs - in0, in1 (source vectors) + * - mask (mask vectors) + * Outputs - out (dstination vectors) + * Return Type - as per RTYPE + * Details : Selective halfword elements from 'in0' & 'in1' are copied to 'out' as + * per control vector 'mask'. + */ +#define MSA_VSHF_H(RTYPE, in0, in1, mask, out) \ +{ \ + out = (RTYPE) __msa_vshf_h((v8i16) mask, (v8i16) in0, (v8i16) in1); \ +} + +#define MSA_VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ +{ \ + MSA_VSHF_H(RTYPE, in0, in1, mask0, out0) \ + MSA_VSHF_H(RTYPE, in2, in3, mask1, out1) \ +} + +#define MSA_VSHF_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + mask0, mask1, mask2, mask3, out0, out1, out2, out3) \ +{ \ + MSA_VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ + MSA_VSHF_H2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \ +} + +/* Description : Shuffle word vector elements as per mask vector. + * Arguments : Inputs - in0, in1 (source vectors) + * - mask (mask vectors) + * Outputs - out (dstination vectors) + * Return Type - as per RTYPE + * Details : Selective word elements from 'in0' & 'in1' are copied to 'out' as + * per control vector 'mask'. + */ +#define MSA_VSHF_W(RTYPE, in0, in1, mask, out) \ +{ \ + out = (RTYPE) __msa_vshf_w((v4i32) mask, (v4i32) in0, (v4i32) in1); \ +} + +#define MSA_VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ +{ \ + MSA_VSHF_W(RTYPE, in0, in1, mask0, out0) \ + MSA_VSHF_W(RTYPE, in2, in3, mask1, out1) \ +} + +#define MSA_VSHF_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + mask0, mask1, mask2, mask3, out0, out1, out2, out3) \ +{ \ + MSA_VSHF_W2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ + MSA_VSHF_W2(RTYPE, in4, in5, in6, in7, mask2, mask3, out2, out3); \ +} + +/* Description : Interleave even byte elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even byte elements of 'in0' and even byte + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVEV_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvev_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVEV_B(RTYPE, in0, in1, out0); \ + MSA_ILVEV_B(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave even half word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even half word elements of 'in0' and even half word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVEV_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvev_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVEV_H(RTYPE, in0, in1, out0); \ + MSA_ILVEV_H(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave even word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even word elements of 'in0' and even word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVEV_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvev_w((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVEV_W(RTYPE, in0, in1, out0); \ + MSA_ILVEV_W(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVEV_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVEV_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave even double word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even double word elements of 'in0' and even double word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVEV_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvev_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVEV_D(RTYPE, in0, in1, out0); \ + MSA_ILVEV_D(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave odd byte elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd byte elements of 'in0' and odd byte + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVOD_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvod_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVOD_B(RTYPE, in0, in1, out0); \ + MSA_ILVOD_B(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVOD_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVOD_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave odd half word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd half word elements of 'in0' and odd half word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVOD_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvod_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVOD_H(RTYPE, in0, in1, out0); \ + MSA_ILVOD_H(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVOD_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVOD_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave odd word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd word elements of 'in0' and odd word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVOD_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvod_w((v4i32) in0, (v4i32) in1); \ +} + +#define MSA_ILVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVOD_W(RTYPE, in0, in1, out0); \ + MSA_ILVOD_W(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVOD_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVOD_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave odd double word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd double word elements of 'in0' and odd double word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVOD_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvod_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_ILVOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVOD_D(RTYPE, in0, in1, out0); \ + MSA_ILVOD_D(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVOD_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVOD_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVOD_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave left half of byte elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Left half of byte elements of 'in0' and left half of byte + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVL_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvl_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVL_B(RTYPE, in0, in1, out0); \ + MSA_ILVL_B(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave left half of halfword elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Left half of halfword elements of 'in0' and left half of halfword + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVL_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvl_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVL_H(RTYPE, in0, in1, out0); \ + MSA_ILVL_H(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVL_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVL_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave left half of word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Left half of word elements of 'in0' and left half of word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVL_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvl_w((v4i32) in0, (v4i32) in1); \ +} + +#define MSA_ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVL_W(RTYPE, in0, in1, out0); \ + MSA_ILVL_W(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVL_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVL_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave left half of double word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Left half of double word elements of 'in0' and left half of + * double word elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVL_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvl_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVL_D(RTYPE, in0, in1, out0); \ + MSA_ILVL_D(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVL_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVL_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVL_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave right half of byte elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Right half of byte elements of 'in0' and right half of byte + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVR_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVR_B(RTYPE, in0, in1, out0); \ + MSA_ILVR_B(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave right half of halfword elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Right half of halfword elements of 'in0' and right half of halfword + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVR_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvr_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVR_H(RTYPE, in0, in1, out0); \ + MSA_ILVR_H(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave right half of word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Right half of word elements of 'in0' and right half of word + * elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVR_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvr_w((v4i32) in0, (v4i32) in1); \ +} + +#define MSA_ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVR_W(RTYPE, in0, in1, out0); \ + MSA_ILVR_W(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave right half of double word elements from vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Right half of double word elements of 'in0' and right half of + * double word elements of 'in1' are interleaved and copied to 'out'. + */ +#define MSA_ILVR_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_ilvr_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_ILVR_D(RTYPE, in0, in1, out0); \ + MSA_ILVR_D(RTYPE, in2, in3, out1); \ +} + +#define MSA_ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + MSA_ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} + +/* Description : Interleave both left and right half of input vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of byte elements from 'in0' and 'in1' are + * interleaved and stored to 'out0'. + * Left half of byte elements from 'in0' and 'in1' are + * interleaved and stored to 'out1'. + */ +#define MSA_ILVRL_B2(RTYPE, in0, in1, out0, out1) \ +{ \ + MSA_ILVR_B(RTYPE, in0, in1, out0); \ + MSA_ILVL_B(RTYPE, in0, in1, out1); \ +} + +#define MSA_ILVRL_B4(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVRL_B2(RTYPE, in0, in1, out0, out1); \ + MSA_ILVRL_B2(RTYPE, in2, in3, out2, out3); \ +} + +/* Description : Interleave both left and right half of input vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of halfword elements from 'in0' and 'in1' are + * interleaved and stored to 'out0'. + * Left half of halfword elements from 'in0' and 'in1' are + * interleaved and stored to 'out1'. + */ +#define MSA_ILVRL_H2(RTYPE, in0, in1, out0, out1) \ +{ \ + MSA_ILVR_H(RTYPE, in0, in1, out0); \ + MSA_ILVL_H(RTYPE, in0, in1, out1); \ +} + +#define MSA_ILVRL_H4(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVRL_H2(RTYPE, in0, in1, out0, out1); \ + MSA_ILVRL_H2(RTYPE, in2, in3, out2, out3); \ +} + +/* Description : Interleave both left and right half of input vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of word elements from 'in0' and 'in1' are + * interleaved and stored to 'out0'. + * Left half of word elements from 'in0' and 'in1' are + * interleaved and stored to 'out1'. + */ +#define MSA_ILVRL_W2(RTYPE, in0, in1, out0, out1) \ +{ \ + MSA_ILVR_W(RTYPE, in0, in1, out0); \ + MSA_ILVL_W(RTYPE, in0, in1, out1); \ +} + +#define MSA_ILVRL_W4(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVRL_W2(RTYPE, in0, in1, out0, out1); \ + MSA_ILVRL_W2(RTYPE, in2, in3, out2, out3); \ +} + +/* Description : Interleave both left and right half of input vectors. + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of double word elements from 'in0' and 'in1' are + * interleaved and stored to 'out0'. + * Left half of double word elements from 'in0' and 'in1' are + * interleaved and stored to 'out1'. + */ +#define MSA_ILVRL_D2(RTYPE, in0, in1, out0, out1) \ +{ \ + MSA_ILVR_D(RTYPE, in0, in1, out0); \ + MSA_ILVL_D(RTYPE, in0, in1, out1); \ +} + +#define MSA_ILVRL_D4(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVRL_D2(RTYPE, in0, in1, out0, out1); \ + MSA_ILVRL_D2(RTYPE, in2, in3, out2, out3); \ +} + +/* Description : Indexed byte elements are replicated to all elements in + * output vector. + * Arguments : Inputs - in, idx + * Outputs - out + * Return Type - as per RTYPE + * Details : 'idx' element value from 'in' vector is replicated to all + * elements in 'out' vector. + * Valid index range for halfword operation is 0-7. + */ +#define MSA_SPLATI_B(RTYPE, in, idx, out) \ +{ \ + out = (RTYPE) __msa_splati_b((v16i8) in, idx); \ +} + +#define MSA_SPLATI_B2(RTYPE, in, idx0, idx1, out0, out1) \ +{ \ + MSA_SPLATI_B(RTYPE, in, idx0, out0) \ + MSA_SPLATI_B(RTYPE, in, idx1, out1) \ +} + +#define MSA_SPLATI_B4(RTYPE, in, idx0, idx1, idx2, idx3, \ + out0, out1, out2, out3) \ +{ \ + MSA_SPLATI_B2(RTYPE, in, idx0, idx1, out0, out1) \ + MSA_SPLATI_B2(RTYPE, in, idx2, idx3, out2, out3) \ +} + +/* Description : Indexed halfword elements are replicated to all elements in + * output vector. + * Arguments : Inputs - in, idx + * Outputs - out + * Return Type - as per RTYPE + * Details : 'idx' element value from 'in' vector is replicated to all + * elements in 'out' vector. + * Valid index range for halfword operation is 0-7. + */ +#define MSA_SPLATI_H(RTYPE, in, idx, out) \ +{ \ + out = (RTYPE) __msa_splati_h((v8i16) in, idx); \ +} + +#define MSA_SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ +{ \ + MSA_SPLATI_H(RTYPE, in, idx0, out0) \ + MSA_SPLATI_H(RTYPE, in, idx1, out1) \ +} + +#define MSA_SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \ + out0, out1, out2, out3) \ +{ \ + MSA_SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ + MSA_SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3) \ +} + +/* Description : Indexed word elements are replicated to all elements in + * output vector. + * Arguments : Inputs - in, idx + * Outputs - out + * Return Type - as per RTYPE + * Details : 'idx' element value from 'in' vector is replicated to all + * elements in 'out' vector. + * Valid index range for halfword operation is 0-3. + */ +#define MSA_SPLATI_W(RTYPE, in, idx, out) \ +{ \ + out = (RTYPE) __msa_splati_w((v4i32) in, idx); \ +} + +#define MSA_SPLATI_W2(RTYPE, in, idx0, idx1, out0, out1) \ +{ \ + MSA_SPLATI_W(RTYPE, in, idx0, out0) \ + MSA_SPLATI_W(RTYPE, in, idx1, out1) \ +} + +#define MSA_SPLATI_W4(RTYPE, in, idx0, idx1, idx2, idx3, \ + out0, out1, out2, out3) \ +{ \ + MSA_SPLATI_W2(RTYPE, in, idx0, idx1, out0, out1) \ + MSA_SPLATI_W2(RTYPE, in, idx2, idx3, out2, out3) \ +} + +/* Description : Pack even byte elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even byte elements of 'in0' are copied to the left half of + * 'out' & even byte elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKEV_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckev_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKEV_B(RTYPE, in0, in1, out0) \ + MSA_PCKEV_B(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack even halfword elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even halfword elements of 'in0' are copied to the left half of + * 'out' & even halfword elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKEV_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckev_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKEV_H(RTYPE, in0, in1, out0) \ + MSA_PCKEV_H(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack even word elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even word elements of 'in0' are copied to the left half of + * 'out' & even word elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKEV_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckev_w((v4i32) in0, (v4i32) in1); \ +} + +#define MSA_PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKEV_W(RTYPE, in0, in1, out0) \ + MSA_PCKEV_W(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKEV_W4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKEV_W2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack even double word elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Even double word elements of 'in0' are copied to the left + * half of 'out' & even double word elements of 'in1' are + * copied to the right half of 'out'. + */ +#define MSA_PCKEV_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckev_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKEV_D(RTYPE, in0, in1, out0) \ + MSA_PCKEV_D(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack odd byte elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd byte elements of 'in0' are copied to the left half of + * 'out' & odd byte elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKOD_B(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckod_b((v16i8) in0, (v16i8) in1); \ +} + +#define MSA_PCKOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKOD_B(RTYPE, in0, in1, out0) \ + MSA_PCKOD_B(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKOD_B4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKOD_B2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack odd halfword elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd halfword elements of 'in0' are copied to the left half of + * 'out' & odd halfword elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKOD_H(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckod_h((v8i16) in0, (v8i16) in1); \ +} + +#define MSA_PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKOD_H(RTYPE, in0, in1, out0) \ + MSA_PCKOD_H(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKOD_H4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKOD_H2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack odd word elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd word elements of 'in0' are copied to the left half of + * 'out' & odd word elements of 'in1' are copied to the right + * half of 'out'. + */ +#define MSA_PCKOD_W(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckod_w((v4i32) in0, (v4i32) in1); \ +} + +#define MSA_PCKOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKOD_W(RTYPE, in0, in1, out0) \ + MSA_PCKOD_W(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKOD_W4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKOD_W2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Pack odd double word elements of vector pairs. + * Arguments : Inputs - in0, in1 + * Outputs - out + * Return Type - as per RTYPE + * Details : Odd double word elements of 'in0' are copied to the left + * half of 'out' & odd double word elements of 'in1' are + * copied to the right half of 'out'. + */ +#define MSA_PCKOD_D(RTYPE, in0, in1, out) \ +{ \ + out = (RTYPE) __msa_pckod_d((v2i64) in0, (v2i64) in1); \ +} + +#define MSA_PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ +{ \ + MSA_PCKOD_D(RTYPE, in0, in1, out0) \ + MSA_PCKOD_D(RTYPE, in2, in3, out1) \ +} + +#define MSA_PCKOD_D4(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + MSA_PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + MSA_PCKOD_D2(RTYPE, in4, in5, in6, in7, out2, out3) \ +} + +/* Description : Dot product of unsigned byte vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Unsigned byte elements from 'mult' are multiplied with + * unsigned byte elements from 'cnst' producing a result + * twice the size of input i.e. unsigned halfword. + * Then this multiplication results of adjacent odd-even elements + * are added together and stored to the out vector. + */ +#define MSA_DOTP_UB(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dotp_u_h((v16u8) mult, (v16u8) cnst); \ +} + +#define MSA_DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DOTP_UB(RTYPE, mult0, cnst0, out0) \ + MSA_DOTP_UB(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, \ + out0, out1, out2, out3) \ +{ \ + MSA_DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product of signed byte vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Signed byte elements from 'mult' are multiplied with + * signed byte elements from 'cnst' producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplication results of adjacent odd-even elements + * are added together and stored to the out vector. + */ +#define MSA_DOTP_SB(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dotp_s_h((v16i8) mult, (v16i8) cnst); \ +} + +#define MSA_DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DOTP_SB(RTYPE, mult0, cnst0, out0) \ + MSA_DOTP_SB(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, \ + out0, out1, out2, out3) \ +{ \ + MSA_DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product of unsigned halfword vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Unsigned halfword elements from 'mult' are multiplied with + * unsigned halfword elements from 'cnst' producing a result + * twice the size of input i.e. unsigned word. + * Then this multiplication results of adjacent odd-even elements + * are added together and stored to the out vector. + */ +#define MSA_DOTP_UH(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dotp_u_w((v8u16) mult, (v8u16) cnst); \ +} + +#define MSA_DOTP_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DOTP_UH(RTYPE, mult0, cnst0, out0) \ + MSA_DOTP_UH(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DOTP_UH4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, \ + out0, out1, out2, out3) \ +{ \ + MSA_DOTP_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DOTP_UH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product of signed halfword vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Signed halfword elements from 'mult' are multiplied with + * signed halfword elements from 'cnst' producing a result + * twice the size of input i.e. signed word. + * Then this multiplication results of adjacent odd-even elements + * are added together and stored to the out vector. + */ +#define MSA_DOTP_SH(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dotp_s_w((v8i16) mult, (v8i16) cnst); \ +} + +#define MSA_DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DOTP_SH(RTYPE, mult0, cnst0, out0) \ + MSA_DOTP_SH(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, \ + out0, out1, out2, out3) \ +{ \ + MSA_DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product & addition of unsigned byte vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Unsigned byte elements from 'mult' are multiplied with + * unsigned byte elements from 'cnst' producing a result + * twice the size of input i.e. unsigned halfword. + * Then this multiplication results of adjacent odd-even elements + * are added to the out vector. + */ +#define MSA_DPADD_UB(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dpadd_u_h((v8u16) out, \ + (v16u8) mult, (v16u8) cnst); \ +} + +#define MSA_DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DPADD_UB(RTYPE, mult0, cnst0, out0) \ + MSA_DPADD_UB(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DPADD_UB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + MSA_DPADD_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DPADD_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product & addition of signed byte vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Signed byte elements from 'mult' are multiplied with + * signed byte elements from 'cnst' producing a result + * twice the size of input i.e. signed halfword. + * Then this multiplication results of adjacent odd-even elements + * are added to the out vector. + */ +#define MSA_DPADD_SB(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dpadd_s_h((v8i16) out, \ + (v16i8) mult, (v16i8) cnst); \ +} + +#define MSA_DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DPADD_SB(RTYPE, mult0, cnst0, out0) \ + MSA_DPADD_SB(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + MSA_DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product & addition of unsigned halfword vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Unsigned halfword elements from 'mult' are multiplied with + * unsigned halfword elements from 'cnst' producing a result + * twice the size of input i.e. unsigned word. + * Then this multiplication results of adjacent odd-even elements + * are added to the out vector. + */ +#define MSA_DPADD_UH(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dpadd_u_w((v4u32) out, \ + (v8u16) mult, (v8u16) cnst); \ +} + +#define MSA_DPADD_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DPADD_UH(RTYPE, mult0, cnst0, out0) \ + MSA_DPADD_UH(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DPADD_UH4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + MSA_DPADD_UH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DPADD_UH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Dot product & addition of signed halfword vector elements. + * Arguments : Inputs - mult + * cnst + * Outputs - out + * Return Type - as per RTYPE + * Details : Signed halfword elements from 'mult' are multiplied with + * signed halfword elements from 'cnst' producing a result + * twice the size of input i.e. signed word. + * Then this multiplication results of adjacent odd-even elements + * are added to the out vector. + */ +#define MSA_DPADD_SH(RTYPE, mult, cnst, out) \ +{ \ + out = (RTYPE) __msa_dpadd_s_w((v4i32) out, \ + (v8i16) mult, (v8i16) cnst); \ +} + +#define MSA_DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + MSA_DPADD_SH(RTYPE, mult0, cnst0, out0) \ + MSA_DPADD_SH(RTYPE, mult1, cnst1, out1) \ +} + +#define MSA_DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + MSA_DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + MSA_DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} + +/* Description : Clip all signed halfword elements of input vector between min & max. + * out = ((in) < (min)) ? (min) : (((in) > (max)) ? (max) : (in)). + * Arguments : Inputs - in (input vector) + * - min (min threshold) + * - max (max threshold) + * Outputs - in (output vector with clipped elements) + * Note : type of 'in' must be v8i16. + */ +#define MSA_CLIP_SH(in, min, max) \ +{ \ + in = __msa_max_s_h((v8i16) min, (v8i16) in); \ + in = __msa_min_s_h((v8i16) max, (v8i16) in); \ +} + +/* Description : Clip all signed halfword elements of input vector between 0 & 255. + * Arguments : Inputs - in (input vector) + * Outputs - in (output vector with clipped elements) + * Note : type of 'in' must be v8i16. + */ +#define MSA_CLIP_SH_0_255(in) \ +{ \ + in = __msa_maxi_s_h((v8i16) in, 0); \ + in = (v8i16) __msa_sat_u_h((v8u16) in, 7); \ +} + +#define MSA_CLIP_SH2_0_255(in0, in1) \ +{ \ + MSA_CLIP_SH_0_255(in0); \ + MSA_CLIP_SH_0_255(in1); \ +} + +#define MSA_CLIP_SH4_0_255(in0, in1, in2, in3) \ +{ \ + MSA_CLIP_SH2_0_255(in0, in1); \ + MSA_CLIP_SH2_0_255(in2, in3); \ +} + +#define MSA_CLIP_SH8_0_255(in0, in1, in2, in3, \ + in4, in5, in6, in7) \ +{ \ + MSA_CLIP_SH4_0_255(in0, in1, in2, in3); \ + MSA_CLIP_SH4_0_255(in4, in5, in6, in7); \ +} + +/* Description : Clip all signed word elements of input vector between 0 & 255. + * Arguments : Inputs - in (input vector) + * Outputs - in (output vector with clipped elements) + * Note : type of 'in' must be v4i32. + */ +#define MSA_CLIP_SW_0_255(in) \ +{ \ + in = __msa_maxi_s_w((v4i32) in, 0); \ + in = (v4i32) __msa_sat_u_w((v4u32) in, 7); \ +} + +#define MSA_CLIP_SW2_0_255(in0, in1) \ +{ \ + MSA_CLIP_SW_0_255(in0); \ + MSA_CLIP_SW_0_255(in1); \ +} + +#define MSA_CLIP_SW4_0_255(in0, in1, in2, in3) \ +{ \ + MSA_CLIP_SW2_0_255(in0, in1); \ + MSA_CLIP_SW2_0_255(in2, in3); \ +} + +#define MSA_CLIP_SW8_0_255(in0, in1, in2, in3, \ + in4, in5, in6, in7) \ +{ \ + MSA_CLIP_SW4_0_255(in0, in1, in2, in3); \ + MSA_CLIP_SW4_0_255(in4, in5, in6, in7); \ +} + +/* Description : Addition of 16 unsigned byte elements. + * 16 unsigned byte elements of input vector are added + * together and resulted integer sum is returned. + * Arguments : Inputs - in (unsigned byte vector) + * Outputs - sum_m (u32 sum) + * Return Type - unsigned word + */ +#define MSA_HADD_UB_U32(in, sum_m) \ +{ \ + v8u16 res_m; \ + v4u32 res0_m; \ + v2u64 res1_m, res2_m; \ + \ + res_m = __msa_hadd_u_h((v16u8) in, (v16u8) in); \ + res0_m = __msa_hadd_u_w(res_m, res_m); \ + res1_m = __msa_hadd_u_d(res0_m, res0_m); \ + res2_m = (v2u64) __msa_splati_d((v2i64) res1_m, 1); \ + res1_m += res2_m; \ + sum_m = __msa_copy_u_w((v4i32) res1_m, 0); \ +} + +/* Description : Addition of 8 unsigned halfword elements. + * 8 unsigned halfword elements of input vector are added + * together and resulted integer sum is returned. + * Arguments : Inputs - in (unsigned halfword vector) + * Outputs - sum_m (u32 sum) + * Return Type - unsigned word + */ +#define MSA_HADD_UH_U32(in, sum_m) \ +{ \ + v4u32 res_m; \ + v2u64 res0_m, res1_m; \ + \ + res_m = __msa_hadd_u_w((v8u16) in, (v8u16) in); \ + res0_m = __msa_hadd_u_d(res_m, res_m); \ + res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \ + res0_m += res1_m; \ + sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \ +} + +/* Description : Addition of 4 unsigned word elements. + * 4 unsigned word elements of input vector are added together and + * resulted integer sum is returned. + * Arguments : Inputs - in (unsigned word vector) + * Outputs - sum_m (u32 sum) + * Return Type - unsigned word + */ +#define MSA_HADD_UW_U32(in, sum_m) \ +{ \ + v2u64 res0_m, res1_m; \ + \ + res0_m = __msa_hadd_u_d((v4u32) in, (v4u32) in); \ + res1_m = (v2u64) __msa_splati_d((v2i64) res0_m, 1); \ + res0_m += res1_m; \ + sum_m = __msa_copy_u_w((v4i32) res0_m, 0); \ +} + +/* Description : Addition of 16 signed byte elements. + * 16 signed byte elements of input vector are added + * together and resulted integer sum is returned. + * Arguments : Inputs - in (signed byte vector) + * Outputs - sum_m (i32 sum) + * Return Type - signed word + */ +#define MSA_HADD_SB_S32(in, sum_m) \ +{ \ + v8i16 res_m; \ + v4i32 res0_m; \ + v2i64 res1_m, res2_m; \ + \ + res_m = __msa_hadd_s_h((v16i8) in, (v16i8) in); \ + res0_m = __msa_hadd_s_w(res_m, res_m); \ + res1_m = __msa_hadd_s_d(res0_m, res0_m); \ + res2_m = __msa_splati_d(res1_m, 1); \ + res1_m += res2_m; \ + sum_m = __msa_copy_s_w((v4i32) res1_m, 0); \ +} + +/* Description : Addition of 8 signed halfword elements. + * 8 signed halfword elements of input vector are added + * together and resulted integer sum is returned. + * Arguments : Inputs - in (signed halfword vector) + * Outputs - sum_m (i32 sum) + * Return Type - signed word + */ +#define MSA_HADD_SH_S32(in, sum_m) \ +{ \ + v4i32 res_m; \ + v2i64 res0_m, res1_m; \ + \ + res_m = __msa_hadd_s_w((v8i16) in, (v8i16) in); \ + res0_m = __msa_hadd_s_d(res_m, res_m); \ + res1_m = __msa_splati_d(res0_m, 1); \ + res0_m += res1_m; \ + sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \ +} + +/* Description : Addition of 4 signed word elements. + * 4 signed word elements of input vector are added together and + * resulted integer sum is returned. + * Arguments : Inputs - in (signed word vector) + * Outputs - sum_m (i32 sum) + * Return Type - signed word + */ +#define MSA_HADD_SW_S32(in, sum_m) \ +{ \ + v2i64 res0_m, res1_m; \ + \ + res0_m = __msa_hadd_s_d((v4i32) in, (v4i32) in); \ + res1_m = __msa_splati_d(res0_m, 1); \ + res0_m += res1_m; \ + sum_m = __msa_copy_s_w((v4i32) res0_m, 0); \ +} + +/* Description : Saturate the unsigned halfword element values to the max + * unsigned value of (sat_val+1 bits). + * The element data width remains unchanged. + * Arguments : Inputs - in, sat_val + * Outputs - in (in place) + * Return Type - v8u16 + * Details : Each unsigned halfword element from 'in' is saturated to the + * value generated with (sat_val+1) bit range. + * Results are in placed to original vectors. + */ +#define MSA_SAT_UH(in, sat_val) \ +{ \ + in = __msa_sat_u_h(in, sat_val); \ +} + +#define MSA_SAT_UH2(in0, in1, sat_val) \ +{ \ + MSA_SAT_UH(in0, sat_val) \ + MSA_SAT_UH(in1, sat_val) \ +} + +#define MSA_SAT_UH4(in0, in1, in2, in3, sat_val) \ +{ \ + MSA_SAT_UH2(in0, in1, sat_val) \ + MSA_SAT_UH2(in2, in3, sat_val) \ +} + +/* Description : Saturate the signed halfword element values to the max + * signed value of (sat_val+1 bits). + * The element data width remains unchanged. + * Arguments : Inputs - in, sat_val + * Outputs - in (in place) + * Return Type - v8i16 + * Details : Each signed halfword element from 'in' is saturated to the + * value generated with (sat_val+1) bit range. + * Results are in placed to original vectors. + */ +#define MSA_SAT_SH(in, sat_val) \ +{ \ + in = __msa_sat_s_h(in, sat_val); \ +} + +#define MSA_SAT_SH2(in0, in1, sat_val) \ +{ \ + MSA_SAT_SH(in0, sat_val) \ + MSA_SAT_SH(in1, sat_val) \ +} + +#define MSA_SAT_SH4(in0, in1, in2, in3, sat_val) \ +{ \ + MSA_SAT_SH2(in0, in1, sat_val) \ + MSA_SAT_SH2(in2, in3, sat_val) \ +} + +/* Description : Saturate the unsigned word element values to the max + * unsigned value of (sat_val+1 bits). + * The element data width remains unchanged. + * Arguments : Inputs - in, sat_val + * Outputs - in (in place) + * Return Type - v4u32 + * Details : Each unsigned word element from 'in' is saturated to the + * value generated with (sat_val+1) bit range. + * Results are in placed to original vectors. + */ +#define MSA_SAT_UW(in, sat_val) \ +{ \ + in = __msa_sat_u_w(in, sat_val); \ +} + +#define MSA_SAT_UW2(in0, in1, sat_val) \ +{ \ + MSA_SAT_UW(in0, sat_val) \ + MSA_SAT_UW(in1, sat_val) \ +} + +#define MSA_SAT_UW4(in0, in1, in2, in3, sat_val) \ +{ \ + MSA_SAT_UW2(in0, in1, sat_val) \ + MSA_SAT_UW2(in2, in3, sat_val) \ +} + +/* Description : Saturate the signed word element values to the max + * signed value of (sat_val+1 bits). + * The element data width remains unchanged. + * Arguments : Inputs - in, sat_val + * Outputs - in (in place) + * Return Type - v4i32 + * Details : Each signed word element from 'in' is saturated to the + * value generated with (sat_val+1) bit range. + * Results are in placed to original vectors. + */ +#define MSA_SAT_SW(in, sat_val) \ +{ \ + in = __msa_sat_s_w(in, sat_val); \ +} + +#define MSA_SAT_SW2(in0, in1, sat_val) \ +{ \ + MSA_SAT_SW(in0, sat_val) \ + MSA_SAT_SW(in1, sat_val) \ +} + +#define MSA_SAT_SW4(in0, in1, in2, in3, sat_val) \ +{ \ + MSA_SAT_SW2(in0, in1, sat_val) \ + MSA_SAT_SW2(in2, in3, sat_val) \ +} + +/* Description : Each byte element is logically xor'ed with immediate 128. + * Arguments : Inputs - in + * Outputs - in (in-place) + * Return Type - as per RTYPE + * Details : Each unsigned byte element from input vector 'in' is + * logically xor'ed with 128 and result is in-place stored in + * 'in' vector. + */ +#define MSA_XORI_B_128(RTYPE, in) \ +{ \ + in = (RTYPE) __msa_xori_b((v16u8) in, 128); \ +} + +#define MSA_XORI_B2_128(RTYPE, in0, in1) \ +{ \ + MSA_XORI_B_128(RTYPE, in0); \ + MSA_XORI_B_128(RTYPE, in1); \ +} + +#define MSA_XORI_B4_128(RTYPE, in0, in1, in2, in3) \ +{ \ + MSA_XORI_B2_128(RTYPE, in0, in1); \ + MSA_XORI_B2_128(RTYPE, in2, in3); \ +} + +/* Description : Shift right logical all byte elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical by + * number of bits respective element holds in vector 'shift' and + * result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRL_B(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srl_b((v16i8) in, (v16i8) shift); \ +} + +#define MSA_SRL_B2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRL_B(RTYPE, in0, shift); \ + MSA_SRL_B(RTYPE, in1, shift); \ +} + +#define MSA_SRL_B4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRL_B2(RTYPE, in0, in1, shift); \ + MSA_SRL_B2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical all halfword elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical by + * number of bits respective element holds in vector 'shift' and + * result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRL_H(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srl_h((v8i16) in, (v8i16) shift); \ +} + +#define MSA_SRL_H2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRL_H(RTYPE, in0, shift); \ + MSA_SRL_H(RTYPE, in1, shift); \ +} + +#define MSA_SRL_H4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRL_H2(RTYPE, in0, in1, shift); \ + MSA_SRL_H2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical all word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical by + * number of bits respective element holds in vector 'shift' and + * result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRL_W(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srl_w((v4i32) in, (v4i32) shift); \ +} + +#define MSA_SRL_W2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRL_W(RTYPE, in0, shift); \ + MSA_SRL_W(RTYPE, in1, shift); \ +} + +#define MSA_SRL_W4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRL_W2(RTYPE, in0, in1, shift); \ + MSA_SRL_W2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical all double word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical by + * number of bits respective element holds in vector 'shift' and + * result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRL_D(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srl_d((v2i64) in, (v2i64) shift); \ +} + +#define MSA_SRL_D2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRL_D(RTYPE, in0, shift); \ + MSA_SRL_D(RTYPE, in1, shift); \ +} + +#define MSA_SRL_D4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRL_D2(RTYPE, in0, in1, shift); \ + MSA_SRL_D2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical rounded all byte elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical rounded + * by number of bits respective element holds in vector 'shift' + * and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRLR_B(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srlr_b((v16i8) in, (v16i8) shift); \ +} + +#define MSA_SRLR_B2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRLR_B(RTYPE, in0, shift); \ + MSA_SRLR_B(RTYPE, in1, shift); \ +} + +#define MSA_SRLR_B4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRLR_B2(RTYPE, in0, in1, shift); \ + MSA_SRLR_B2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical rounded all halfword elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical rounded + * by number of bits respective element holds in vector 'shift' + * and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRLR_H(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srlr_h((v8i16) in, (v8i16) shift); \ +} + +#define MSA_SRLR_H2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRLR_H(RTYPE, in0, shift); \ + MSA_SRLR_H(RTYPE, in1, shift); \ +} + +#define MSA_SRLR_H4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRLR_H2(RTYPE, in0, in1, shift); \ + MSA_SRLR_H2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical rounded all word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical rounded + * by number of bits respective element holds in vector 'shift' + * and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRLR_W(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srlr_w((v4i32) in, (v4i32) shift); \ +} + +#define MSA_SRLR_W2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRLR_W(RTYPE, in0, shift); \ + MSA_SRLR_W(RTYPE, in1, shift); \ +} + +#define MSA_SRLR_W4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRLR_W2(RTYPE, in0, in1, shift); \ + MSA_SRLR_W2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right logical rounded all double word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right logical rounded + * by number of bits respective element holds in vector 'shift' + * and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRLR_D(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srlr_d((v2i64) in, (v2i64) shift); \ +} + +#define MSA_SRLR_D2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRLR_D(RTYPE, in0, shift); \ + MSA_SRLR_D(RTYPE, in1, shift); \ +} + +#define MSA_SRLR_D4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRLR_D2(RTYPE, in0, in1, shift); \ + MSA_SRLR_D2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all byte elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in + * vector 'shift' and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRAR_B(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srar_b((v16i8) in, (v16i8) shift); \ +} + +#define MSA_SRAR_B2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRAR_B(RTYPE, in0, shift); \ + MSA_SRAR_B(RTYPE, in1, shift); \ +} + +#define MSA_SRAR_B4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRAR_B2(RTYPE, in0, in1, shift); \ + MSA_SRAR_B2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all halfword elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in + * vector 'shift' and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRAR_H(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srar_h((v8i16) in, (v8i16) shift); \ +} + +#define MSA_SRAR_H2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRAR_H(RTYPE, in0, shift); \ + MSA_SRAR_H(RTYPE, in1, shift); \ +} + +#define MSA_SRAR_H4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRAR_H2(RTYPE, in0, in1, shift); \ + MSA_SRAR_H2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in + * vector 'shift' and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRAR_W(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srar_w((v4i32) in, (v4i32) shift); \ +} + +#define MSA_SRAR_W2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRAR_W(RTYPE, in0, shift); \ + MSA_SRAR_W(RTYPE, in1, shift); \ +} + +#define MSA_SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRAR_W2(RTYPE, in0, in1, shift); \ + MSA_SRAR_W2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all double word elements + * of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in + * vector 'shift' and result is in place written to 'in'. + * Here, 'shift' is a vector passed in. + */ +#define MSA_SRAR_D(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srar_d((v2i64) in, (v2i64) shift); \ +} + +#define MSA_SRAR_D2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRAR_D(RTYPE, in0, shift); \ + MSA_SRAR_D(RTYPE, in1, shift); \ +} + +#define MSA_SRAR_D4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRAR_D2(RTYPE, in0, in1, shift); \ + MSA_SRAR_D2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all byte elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in vector + * 'shift' and result is in place written to 'in'. + * Here, 'shift' is a immediate number passed in. + */ +#define MSA_SRARI_B(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srari_b((v16i8) in, (v16i8) shift); \ +} + +#define MSA_SRARI_B2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRARI_B(RTYPE, in0, shift); \ + MSA_SRARI_B(RTYPE, in1, shift); \ +} + +#define MSA_SRARI_B4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRARI_B2(RTYPE, in0, in1, shift); \ + MSA_SRARI_B2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all halfword elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in vector + * 'shift' and result is in place written to 'in'. + * Here, 'shift' is a immediate number passed in. + */ +#define MSA_SRARI_H(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srari_h((v8i16) in, (v8i16) shift); \ +} + +#define MSA_SRARI_H2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRARI_H(RTYPE, in0, shift); \ + MSA_SRARI_H(RTYPE, in1, shift); \ +} + +#define MSA_SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRARI_H2(RTYPE, in0, in1, shift); \ + MSA_SRARI_H2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all word elements of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in vector + * 'shift' and result is in place written to 'in'. + * Here, 'shift' is a immediate number passed in. + */ +#define MSA_SRARI_W(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srari_w((v4i32) in, (v4i32) shift); \ +} + +#define MSA_SRARI_W2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRARI_W(RTYPE, in0, shift); \ + MSA_SRARI_W(RTYPE, in1, shift); \ +} + +#define MSA_SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRARI_W2(RTYPE, in0, in1, shift); \ + MSA_SRARI_W2(RTYPE, in2, in3, shift); \ +} + +/* Description : Shift right arithmetic rounded all double word elements + * of vector. + * Arguments : Inputs - in, shift + * Outputs - in (in place) + * Return Type - as per RTYPE + * Details : Each element of vector 'in' is shifted right arithmetic + * rounded by number of bits respective element holds in + * vector 'shift' and result is in place written to 'in'. + * Here, 'shift' is a immediate number passed in. + */ +#define MSA_SRARI_D(RTYPE, in, shift) \ +{ \ + in = (RTYPE) __msa_srari_d((v2i64) in, (v2i64) shift); \ +} + +#define MSA_SRARI_D2(RTYPE, in0, in1, shift) \ +{ \ + MSA_SRARI_D(RTYPE, in0, shift); \ + MSA_SRARI_D(RTYPE, in1, shift); \ +} + +#define MSA_SRARI_D4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + MSA_SRARI_D2(RTYPE, in0, in1, shift); \ + MSA_SRARI_D2(RTYPE, in2, in3, shift); \ +} + +/* Description : Transposes input 4x4 byte block. + * Arguments : Inputs - in0, in1, in2, in3 (input 4x4 byte block) + * Outputs - out0, out1, out2, out3 (output 4x4 byte block) + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE4x4_B(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + v16i8 zero_m = { 0 }; \ + \ + MSA_ILVR_B2(RTYPE, in2, in0, in3, in1, out2, out3); \ + out0 = (RTYPE) __msa_ilvr_b((v16i8) out3, (v16i8) out2); \ + out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 4); \ + out2 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out1, 4); \ + out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 4); \ +} + +/* Description : Transposes input 8x4 byte block into 4x8. + * Arguments : Inputs - in0, in1, in2 ~ in7 (input 8x4 byte block) + * Outputs - out0, out1, out2, out3 (output 4x8 byte block) + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE8x4_B(RTYPE, in0, in1, in2, in3, in4, in5, \ + in6, in7, out0, out1, out2, out3) \ +{ \ + v16i8 zero_m = { 0 }; \ + \ + MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5, \ + out0, out1, out2, out3); \ + MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3); \ + out0 = (RTYPE) __msa_ilvr_b((v16i8) out3, (v16i8) out2); \ + out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 8); \ + out2 = (RTYPE) __msa_ilvl_b((v16i8) out3, (v16i8) out2); \ + out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 8); \ +} + +/* Description : Transposes 16x4 block into 4x16 with byte elements in vectors. + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, + * in8, in9, in10, in11, in12, in13, in14, in15 + * Outputs - out0, out1, out2, out3 + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE16x4_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + in8, in9, in10, in11, in12, in13, in14, in15, \ + out0, out1, out2, out3) \ +{ \ + v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5, \ + out0, out1, out2, out3); \ + MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3); \ + MSA_ILVRL_B2(v2i64, out3, out2, tmp0_m, tmp1_m); \ + \ + MSA_ILVR_B4(RTYPE, in10, in8, in11, in9, in14, in12, in15, in13, \ + out0, out1, out2, out3); \ + MSA_ILVR_H2(RTYPE, out2, out0, out3, out1, out2, out3); \ + MSA_ILVRL_B2(v2i64, out3, out2, tmp2_m, tmp3_m); \ + \ + MSA_ILVRL_D4(RTYPE, tmp2_m, tmp0_m, tmp3_m, tmp1_m, \ + out0, out1, out2, out3); \ +} + +/* Description : Transposes input 8x8 byte block. + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 + * (input 8x8 byte block) + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * (output 8x8 byte block) + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE8x8_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3, out4, out5, out6, out7) \ +{ \ + v16i8 zero_m = {0}; \ + \ + MSA_ILVR_B4(RTYPE, in2, in0, in3, in1, in6, in4, in7, in5, \ + out0, out1, out2, out3); \ + MSA_ILVRL_B4(RTYPE, out1, out0, out3, out2, out4, out5, out6, out7); \ + MSA_ILVRL_W4(RTYPE, out6, out4, out7, out5, out0, out2, out4, out6); \ + out1 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out0, 8); \ + out3 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out2, 8); \ + out5 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out4, 8); \ + out7 = (RTYPE) __msa_sldi_b(zero_m, (v16i8) out6, 8); \ +} + +/* Description : Transposes 16x8 block into 8x16 with byte elements in vectors. + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, + * in8, in9, in10, in11, in12, in13, in14, in15 + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE16x8_B(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + in8, in9, in10, in11, in12, in13, in14, in15, \ + out0, out1, out2, out3, out4, out5, out6, out7) \ +{ \ + v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + MSA_ILVEV_D4(RTYPE, in8, in0, in9, in1, in10, in2, in11, in3, \ + out7, out6, out5, out4); \ + MSA_ILVEV_D4(RTYPE, in12, in4, in13, in5, in14, in6, in15, in7, \ + out3, out2, out1, out0); \ + \ + tmp0_m = __msa_ilvev_b((v16i8) out6, (v16i8) out7); \ + tmp1_m = __msa_ilvod_b((v16i8) out6, (v16i8) out7); \ + out6 = (RTYPE) __msa_ilvev_b((v16i8) out4, (v16i8) out5); \ + out5 = (RTYPE) __msa_ilvod_b((v16i8) out4, (v16i8) out5); \ + tmp2_m = __msa_ilvev_b((v16i8) out2, (v16i8) out3); \ + tmp3_m = __msa_ilvod_b((v16i8) out2, (v16i8) out3); \ + out2 = (RTYPE) __msa_ilvev_b((v16i8) out0, (v16i8) out1); \ + out1 = (RTYPE) __msa_ilvod_b((v16i8) out0, (v16i8) out1); \ + \ + MSA_ILVEV_H2(RTYPE, out6, tmp0_m, out2, tmp2_m, out3, out7); \ + out0 = (RTYPE) __msa_ilvev_w((v4i32) out7, (v4i32) out3); \ + out4 = (RTYPE) __msa_ilvod_w((v4i32) out7, (v4i32) out3); \ + \ + MSA_ILVOD_H2(RTYPE, out6, tmp0_m, out2, tmp2_m, out3, out7); \ + out2 = (RTYPE) __msa_ilvev_w((v4i32) out7, (v4i32) out3); \ + out6 = (RTYPE) __msa_ilvod_w((v4i32) out7, (v4i32) out3); \ + \ + MSA_ILVOD_H2(v16i8, out5, tmp1_m, out1, tmp3_m, tmp0_m, tmp2_m); \ + out3 = (RTYPE) __msa_ilvev_w((v4i32) tmp2_m, (v4i32) tmp0_m); \ + out7 = (RTYPE) __msa_ilvod_w((v4i32) tmp2_m, (v4i32) tmp0_m); \ + \ + MSA_ILVEV_H2(v16i8, out5, tmp1_m, out1, tmp3_m, tmp0_m, tmp2_m); \ + out1 = (RTYPE) __msa_ilvev_w((v4i32) tmp2_m, (v4i32) tmp0_m); \ + out5 = (RTYPE) __msa_ilvod_w((v4i32) tmp2_m, (v4i32) tmp0_m); \ +} + +/* Description : Transposes 4x4 block with half word elements in vectors. + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1, out2, out3 + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE4x4_H(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) \ +{ \ + MSA_ILVR_H2(RTYPE, in1, in0, in3, in2, out1, out3); \ + MSA_ILVRL_W2(RTYPE, out3, out1, out0, out2); \ + MSA_ILVL_D2(RTYPE, out0, out0, out2, out2, out1, out3); \ +} + +/* Description : Transposes 8x4 block with half word elements in vectors. + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 + * Outputs - out0, out1, out2, out3 + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE8x4_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + v8i16 s0_m, s1_m; \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + MSA_ILVR_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp0_m, tmp1_m); \ + MSA_ILVR_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp2_m, tmp3_m); \ + MSA_PCKEV_D2(RTYPE, tmp0_m, tmp2_m, tmp1_m, tmp3_m, out0, out2); \ + MSA_PCKOD_D2(RTYPE, tmp0_m, tmp2_m, tmp1_m, tmp3_m, out1, out3); \ +} + +/* Description : Transposes 8x8 block with half word elements in vectors. + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * Return Type - RTYPE + * Details : + */ +#define MSA_TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3, out4, out5, out6, out7) \ +{ \ + v8i16 s0_m, s1_m; \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + MSA_ILVR_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp0_m, tmp1_m); \ + MSA_ILVL_H2(v8i16, in6, in4, in7, in5, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp2_m, tmp3_m); \ + MSA_ILVR_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp4_m, tmp5_m); \ + MSA_ILVL_H2(v8i16, in2, in0, in3, in1, s0_m, s1_m); \ + MSA_ILVRL_H2(v8i16, s1_m, s0_m, tmp6_m, tmp7_m); \ + MSA_PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \ + tmp3_m, tmp7_m, out0, out2, out4, out6); \ + MSA_PCKOD_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \ + tmp3_m, tmp7_m, out1, out3, out5, out7); \ +} + +#endif /* _MSA_MACROS_H */ diff --git a/chromium/third_party/openh264/src/codec/common/meson.build b/chromium/third_party/openh264/src/codec/common/meson.build index d7d15a61ee9..7f8acb685be 100644 --- a/chromium/third_party/openh264/src/codec/common/meson.build +++ b/chromium/third_party/openh264/src/codec/common/meson.build @@ -17,21 +17,41 @@ cpp_sources = [ 'src/WelsThreadPool.cpp', ] -asm_sources = [ - 'x86/cpuid.asm', - 'x86/dct.asm', - 'x86/deblock.asm', - 'x86/expand_picture.asm', - 'x86/intra_pred_com.asm', - 'x86/mb_copy.asm', - 'x86/mc_chroma.asm', - 'x86/mc_luma.asm', - 'x86/satd_sad.asm', - 'x86/vaa.asm', -] - -objs_asm = asm_gen.process(asm_sources) +objs_asm = [] +if ['x86', 'x86_64'].contains(cpu_family) + asm_sources = [ + 'x86/cpuid.asm', + 'x86/dct.asm', + 'x86/deblock.asm', + 'x86/expand_picture.asm', + 'x86/intra_pred_com.asm', + 'x86/mb_copy.asm', + 'x86/mc_chroma.asm', + 'x86/mc_luma.asm', + 'x86/satd_sad.asm', + 'x86/vaa.asm', + ] + objs_asm += asm_gen.process(asm_sources) +elif cpu_family == 'arm' + cpp_sources += [ + 'arm/copy_mb_neon.S', + 'arm/deblocking_neon.S', + 'arm/expand_picture_neon.S', + 'arm/intra_pred_common_neon.S', + 'arm/mc_neon.S', + ] +elif cpu_family == 'aarch64' + cpp_sources += [ + 'arm64/copy_mb_aarch64_neon.S', + 'arm64/deblocking_aarch64_neon.S', + 'arm64/expand_picture_aarch64_neon.S', + 'arm64/intra_pred_common_aarch64_neon.S', + 'arm64/mc_aarch64_neon.S', + ] +else + error('Unsupported cpu_family @0@'.format(cpu_family)) +endif libcommon = static_library('common', cpp_sources, objs_asm, - include_directories: inc, + include_directories: [inc, casm_inc], dependencies: deps) diff --git a/chromium/third_party/openh264/src/codec/common/mips/copy_mb_msa.c b/chromium/third_party/openh264/src/codec/common/mips/copy_mb_msa.c new file mode 100644 index 00000000000..4ba01edc3bd --- /dev/null +++ b/chromium/third_party/openh264/src/codec/common/mips/copy_mb_msa.c @@ -0,0 +1,80 @@ +/*! + * \copy + * Copyright (C) 2020 Loongson Technology Co. Ltd. + * Contributed by Gu Xiwei(guxiwei-hf@loongson.cn) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * + * \file copy_mb_msa.c + * + * \brief MIPS MSA optimizations + * + * \date 14/05/2020 Created + * + ************************************************************************************* + */ + +#include <stdint.h> +#include "msa_macros.h" + +void WelsCopy8x8_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, + int32_t iStrideS ) { + v16u8 src0, src1; + for (int i = 0; i < 4; i++) { + MSA_LD_V2(v16u8, pSrc, iStrideS, src0, src1); + MSA_ST_D(src0, 0, pDst); + MSA_ST_D(src1, 0, pDst + iStrideD); + pSrc += 2 * iStrideS; + pDst += 2 * iStrideD; + } +} + +void WelsCopy8x16_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, + int32_t iStrideS) { + WelsCopy8x8_msa(pDst, iStrideD, pSrc, iStrideS); + WelsCopy8x8_msa(pDst + 8 * iStrideD, iStrideD, + pSrc + 8 * iStrideS, iStrideS); +} + +void WelsCopy16x8_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, + int32_t iStrideS) { + v16u8 src0, src1; + for (int i = 0; i < 4; i++) { + MSA_LD_V2(v16u8, pSrc, iStrideS, src0, src1); + MSA_ST_V2(v16u8, src0, src1, pDst, iStrideD); + pSrc += 2 * iStrideS; + pDst += 2 * iStrideD; + } +} + +void WelsCopy16x16_msa(uint8_t* pDst, int32_t iStrideD, uint8_t* pSrc, + int32_t iStrideS) { + WelsCopy16x8_msa(pDst, iStrideD, pSrc, iStrideS); + WelsCopy16x8_msa(pDst + 8 * iStrideD, iStrideD, + pSrc + 8 * iStrideS, iStrideS); +}; diff --git a/chromium/third_party/openh264/src/codec/common/mips/deblock_msa.c b/chromium/third_party/openh264/src/codec/common/mips/deblock_msa.c new file mode 100644 index 00000000000..0d3dfcb798e --- /dev/null +++ b/chromium/third_party/openh264/src/codec/common/mips/deblock_msa.c @@ -0,0 +1,1024 @@ +/*! + * \copy + * Copyright (C) 2019 Loongson Technology Co. Ltd. + * Contributed by Gu Xiwei(guxiwei-hf@loongson.cn) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * + * \file deblock_msa.c + * + * \brief MIPS MSA optimizations + * + * \date 15/05/2020 Created + * + ************************************************************************************* + */ + +#include <stdint.h> +#include "msa_macros.h" + +void DeblockLumaLt4V_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha, + int32_t iBeta, int8_t *pTc) { + v16u8 p0, p1, p2, q0, q1, q2; + v16i8 iTc, negiTc, negTc, flags, f; + v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, q1_l, q1_r, q2_l, q2_r; + v8i16 tc_l, tc_r, negTc_l, negTc_r; + v8i16 iTc_l, iTc_r, negiTc_l, negiTc_r; + // Use for temporary variable + v8i16 t0, t1, t2, t3; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0; + v16i8 const_1_b = __msa_ldi_b(1); + v8i16 const_1_h = __msa_ldi_h(1); + v8i16 const_4_h = __msa_ldi_h(4); + v8i16 const_not_255_h = __msa_ldi_h(~255); + v16i8 zero = { 0 }; + v16i8 tc = { pTc[0 >> 2], pTc[1 >> 2], pTc[2 >> 2], pTc[3 >> 2], + pTc[4 >> 2], pTc[5 >> 2], pTc[6 >> 2], pTc[7 >> 2], + pTc[8 >> 2], pTc[9 >> 2], pTc[10 >> 2], pTc[11 >> 2], + pTc[12 >> 2], pTc[13 >> 2], pTc[14 >> 2], pTc[15 >> 2] }; + negTc = zero - tc; + iTc = tc; + + // Load data from pPix + MSA_LD_V4(v16u8, pPix - 3 * iStride, iStride, p2, p1, p0, q0); + MSA_LD_V2(v16u8, pPix + iStride, iStride, q1, q2); + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP2P0 = __msa_asub_u_b(p2, p0); + bDetaQ2Q0 = __msa_asub_u_b(q2, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta); + bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta); + + // Unsigned extend p0, p1, p2, q0, q1, q2 from 8 bits to 16 bits + MSA_ILVRL_B4(v8i16, zero, p0, zero, p1, + p0_r, p0_l, p1_r, p1_l); + MSA_ILVRL_B4(v8i16, zero, p2, zero, q0, + p2_r, p2_l, q0_r, q0_l); + MSA_ILVRL_B4(v8i16, zero, q1, zero, q2, + q1_r, q1_l, q2_r, q2_l); + // Signed extend tc, negTc from 8 bits to 16 bits + flags = __msa_clt_s_b(tc, zero); + MSA_ILVRL_B2(v8i16, flags, tc, tc_r, tc_l); + flags = __msa_clt_s_b(negTc, zero); + MSA_ILVRL_B2(v8i16, flags, negTc, negTc_r, negTc_l); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + flags = f & (v16i8)bDetaP2P0; + flags = __msa_ceq_b(flags, zero); + iTc += ((~flags) & const_1_b); + flags = f & (v16i8)bDetaQ2Q0; + flags = __msa_ceq_b(flags, zero); + iTc += ((~flags) & const_1_b); + negiTc = zero - iTc; + // Signed extend iTc, negiTc from 8 bits to 16 bits + flags = __msa_clt_s_b(iTc, zero); + MSA_ILVRL_B2(v8i16, flags, iTc, iTc_r, iTc_l); + flags = __msa_clt_s_b(negiTc, zero); + MSA_ILVRL_B2(v8i16, flags, negiTc, negiTc_r, negiTc_l); + + // Calculate the left part + // p1 + t0 = (p2_l + ((p0_l + q0_l + const_1_h) >> 1) - (p1_l << 1)) >> 1; + t0 = __msa_max_s_h(negTc_l, t0); + t0 = __msa_min_s_h(tc_l, t0); + t1 = p1_l + t0; + // q1 + t0 = (q2_l + ((p0_l + q0_l + const_1_h) >> 1) - (q1_l << 1)) >> 1; + t0 = __msa_max_s_h(negTc_l, t0); + t0 = __msa_min_s_h(tc_l, t0); + t2 = q1_l + t0; + // iDeta + t0 = (((q0_l - p0_l) << 2) + (p1_l - q1_l) + const_4_h) >> 3; + t0 = __msa_max_s_h(negiTc_l, t0); + t0 = __msa_min_s_h(iTc_l, t0); + p1_l = t1; + q1_l = t2; + // p0 + t1 = p0_l + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_l - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + // Calculate the right part + // p1 + t0 = (p2_r + ((p0_r + q0_r + const_1_h) >> 1) - (p1_r << 1)) >> 1; + t0 = __msa_max_s_h(negTc_r, t0); + t0 = __msa_min_s_h(tc_r, t0); + t1 = p1_r + t0; + // q1 + t0 = (q2_r + ((p0_r + q0_r + const_1_h) >> 1) - (q1_r << 1)) >> 1; + t0 = __msa_max_s_h(negTc_r, t0); + t0 = __msa_min_s_h(tc_r, t0); + t2 = q1_r + t0; + // iDeta + t0 = (((q0_r - p0_r) << 2) + (p1_r - q1_r) + const_4_h) >> 3; + t0 = __msa_max_s_h(negiTc_r, t0); + t0 = __msa_min_s_h(iTc_r, t0); + p1_r = t1; + q1_r = t2; + // p0 + t1 = p0_r + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_r - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + // Combined left and right + MSA_PCKEV_B4(v8i16, p1_l, p1_r, p0_l, p0_r, q0_l, q0_r, q1_l, q1_r, + t0, t1, t2, t3); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t1 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t2 & flags) + (q0 & (~flags))); + // Using t1, t2 as temporary flags + t1 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaP2P0, zero)))); + p1 = (v16u8)(t0 & t1) + (p1 & (v16u8)(~t1)); + t2 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaQ2Q0, zero)))); + q1 = (v16u8)(t3 & t2) + (q1 & (v16u8)(~t2)); + + // Store data to pPix + MSA_ST_V4(v16u8, p1, p0, q0, q1, pPix - 2 * iStride, iStride); +} + +void DeblockLumaEq4V_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha, + int32_t iBeta) { + v16u8 p0, p1, p2, p3, q0, q1, q2, q3; + v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r, + q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r; + v8i16 t0, t1, t2, t0_con1; + v8i16 s0, s1, s2, s0_con1; + v16u8 alpha, beta; + v16u8 iDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0; + // Condition mask + v16u8 mask0, mask1; + v16i8 const_2_b = __msa_ldi_b(2); + v8i16 const_2_h = __msa_ldi_h(2); + v8i16 const_4_h = __msa_ldi_h(4); + v16i8 zero = { 0 }; + + // Load data from pPix + MSA_LD_V8(v16u8, pPix - 4 * iStride, iStride, p3, p2, p1, p0, + q0, q1, q2, q3); + // iAlpha and beta are uint8_t type + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + // iDetaP0Q0 is not bool type + iDetaP0Q0 = __msa_asub_u_b(p0, q0); + + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP2P0 = __msa_asub_u_b(p2, p0); + bDetaQ2Q0 = __msa_asub_u_b(q2, q0); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta); + bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta); + + // Unsigned extend p0, p1, p2, p3, q0, q1, q2, q3 from 8 bits to 16 bits + MSA_ILVRL_B4(v8i16, zero, p0, zero, p1, + p0_r, p0_l, p1_r, p1_l); + MSA_ILVRL_B4(v8i16, zero, p2, zero, p3, + p2_r, p2_l, p3_r, p3_l); + MSA_ILVRL_B4(v8i16, zero, q0, zero, q1, + q0_r, q0_l, q1_r, q1_l); + MSA_ILVRL_B4(v8i16, zero, q2, zero, q3, + q2_r, q2_l, q3_r, q3_l); + + // Calculate condition mask + // (iDetaP0Q0 < iAlpha) && bDetaP1P0 && bDetaQ1Q0 + mask0 = (v16u8)__msa_clt_u_b(iDetaP0Q0, alpha); + mask0 &= bDetaP1P0; + mask0 &= bDetaQ1Q0; + // iDetaP0Q0 < ((iAlpha >> 2) + 2) + mask1 = (v16u8)((alpha >> 2) + const_2_b); + mask1 = (v16u8)__msa_clt_u_b(iDetaP0Q0, mask1); + + // Calculate the left part + // p0 + t0 = (p2_l + (p1_l << 1) + (p0_l << 1) + (q0_l << 1) + q1_l + const_4_h) >> 3; + // p1 + t1 = (p2_l + p1_l + p0_l + q0_l + const_2_h) >> 2; + // p2 + t2 = ((p3_l << 1) + p2_l + (p2_l << 1) + p1_l + p0_l + q0_l + const_4_h) >> 3; + // p0 condition 1 + t0_con1 = ((p1_l << 1) + p0_l + q1_l + const_2_h) >> 2; + // q0 + s0 = (p1_l + (p0_l << 1) + (q0_l << 1) + (q1_l << 1) + q2_l + const_4_h) >> 3; + // q1 + s1 = (p0_l + q0_l + q1_l + q2_l + const_2_h) >> 2; + // q2 + s2 = ((q3_l << 1) + q2_l + (q2_l << 1) + q1_l + q0_l + p0_l + const_4_h) >> 3; + // q0 condition 1 + s0_con1 = ((q1_l << 1) + q0_l + p1_l + const_2_h) >> 2; + // Move back + p0_l = t0; + p1_l = t1; + p2_l = t2; + q0_l = s0; + q1_l = s1; + q2_l = s2; + // Use p3_l, q3_l as tmp + p3_l = t0_con1; + q3_l = s0_con1; + + // Calculate the right part + // p0 + t0 = (p2_r + (p1_r << 1) + (p0_r << 1) + (q0_r << 1) + q1_r + const_4_h) >> 3; + // p1 + t1 = (p2_r + p1_r + p0_r + q0_r + const_2_h) >> 2; + // p2 + t2 = ((p3_r << 1) + p2_r + (p2_r << 1) + p1_r + p0_r + q0_r + const_4_h) >> 3; + // p0 condition 1 + t0_con1 = ((p1_r << 1) + p0_r + q1_r + const_2_h) >> 2; + // q0 + s0 = (p1_r + (p0_r << 1) + (q0_r << 1) + (q1_r << 1) + q2_r + const_4_h) >> 3; + // q1 + s1 = (p0_r + q0_r + q1_r + q2_r + const_2_h) >> 2; + // q2 + s2 = ((q3_r << 1) + q2_r + (q2_r << 1) + q1_r + q0_r + p0_r + const_4_h) >> 3; + // q0 condition 1 + s0_con1 = ((q1_r << 1) + q0_r + p1_r + const_2_h) >> 2; + // Move back + p0_r = t0; + p1_r = t1; + p2_r = t2; + q0_r = s0; + q1_r = s1; + q2_r = s2; + // Use p3_r, q3_r as tmp + p3_r = t0_con1; + q3_r = s0_con1; + + // Combined left and right + MSA_PCKEV_B4(v8i16, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, + t0, t1, t2, s0); + MSA_PCKEV_B4(v8i16, q1_l, q1_r, q2_l, q2_r, p3_l, p3_r, q3_l, q3_r, + s1, s2, t0_con1, s0_con1); + t0 = (v8i16)(((v16u8)t0 & mask0 & mask1 & bDetaP2P0) + ((v16u8)t0_con1 & + mask0 & mask1 & (~bDetaP2P0)) + ((v16u8)t0_con1 & mask0 & (~mask1))); + t1 = (v8i16)((v16u8)t1 & mask0 & mask1 & bDetaP2P0); + t2 = (v8i16)((v16u8)t2 & mask0 & mask1 & bDetaP2P0); + s0 = (v8i16)(((v16u8)s0 & mask0 & mask1 & bDetaQ2Q0) + ((v16u8)s0_con1 & + mask0 & mask1 & (~bDetaQ2Q0)) + ((v16u8)s0_con1 & mask0 & (~mask1))); + s1 = (v8i16)((v16u8)s1 & mask0 & mask1 & bDetaQ2Q0); + s2 = (v8i16)((v16u8)s2 & mask0 & mask1 & bDetaQ2Q0); + p0 = (v16u8)t0 + (p0 & (~mask0)); + p1 = (v16u8)t1 + (p1 & ~(mask0 & mask1 & bDetaP2P0)); + p2 = (v16u8)t2 + (p2 & ~(mask0 & mask1 & bDetaP2P0)); + q0 = (v16u8)s0 + (q0 & (~mask0)); + q1 = (v16u8)s1 + (q1 & ~(mask0 & mask1 & bDetaQ2Q0)); + q2 = (v16u8)s2 + (q2 & ~(mask0 & mask1 & bDetaQ2Q0)); + + // Store data to pPix + MSA_ST_V4(v16u8, p2, p1, p0, q0, pPix - 3 * iStride, iStride); + MSA_ST_V2(v16u8, q1, q2, pPix + iStride, iStride); +} + + +void DeblockLumaLt4H_msa(uint8_t* pPix, int32_t iStride, int32_t iAlpha, + int32_t iBeta, int8_t* pTc) { + v16u8 p0, p1, p2, q0, q1, q2; + v16i8 iTc, negiTc, negTc, flags, f; + v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, q1_l, q1_r, q2_l, q2_r; + v8i16 tc_l, tc_r, negTc_l, negTc_r; + v8i16 iTc_l, iTc_r, negiTc_l, negiTc_r; + // Use for temporary variable + v8i16 t0, t1, t2, t3; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0; + v16i8 const_1_b = __msa_ldi_b(1); + v8i16 const_1_h = __msa_ldi_h(1); + v8i16 const_4_h = __msa_ldi_h(4); + v8i16 const_not_255_h = __msa_ldi_h(~255); + v16i8 zero = { 0 }; + v16i8 tc = { pTc[0 >> 2], pTc[1 >> 2], pTc[2 >> 2], pTc[3 >> 2], + pTc[4 >> 2], pTc[5 >> 2], pTc[6 >> 2], pTc[7 >> 2], + pTc[8 >> 2], pTc[9 >> 2], pTc[10 >> 2], pTc[11 >> 2], + pTc[12 >> 2], pTc[13 >> 2], pTc[14 >> 2], pTc[15 >> 2] }; + negTc = zero - tc; + iTc = tc; + + // Load data from pPix + MSA_LD_V8(v8i16, pPix - 3, iStride, t0, t1, t2, t3, q1_l, q1_r, q2_l, q2_r); + MSA_LD_V8(v8i16, pPix + 8 * iStride - 3, iStride, p0_l, p0_r, p1_l, p1_r, + p2_l, p2_r, q0_l, q0_r); + // Transpose 16x8 to 8x16, we just need p0, p1, p2, q0, q1, q2 + MSA_TRANSPOSE16x8_B(v16u8, t0, t1, t2, t3, q1_l, q1_r, q2_l, q2_r, + p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, + p2, p1, p0, q0, q1, q2, alpha, beta); + + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP2P0 = __msa_asub_u_b(p2, p0); + bDetaQ2Q0 = __msa_asub_u_b(q2, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta); + bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta); + + // Unsigned extend p0, p1, p2, q0, q1, q2 from 8 bits to 16 bits + MSA_ILVRL_B4(v8i16, zero, p0, zero, p1, + p0_r, p0_l, p1_r, p1_l); + MSA_ILVRL_B4(v8i16, zero, p2, zero, q0, + p2_r, p2_l, q0_r, q0_l); + MSA_ILVRL_B4(v8i16, zero, q1, zero, q2, + q1_r, q1_l, q2_r, q2_l); + // Signed extend tc, negTc from 8 bits to 16 bits + flags = __msa_clt_s_b(tc, zero); + MSA_ILVRL_B2(v8i16, flags, tc, tc_r, tc_l); + flags = __msa_clt_s_b(negTc, zero); + MSA_ILVRL_B2(v8i16, flags, negTc, negTc_r, negTc_l); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + flags = f & (v16i8)bDetaP2P0; + flags = __msa_ceq_b(flags, zero); + iTc += ((~flags) & const_1_b); + flags = f & (v16i8)bDetaQ2Q0; + flags = __msa_ceq_b(flags, zero); + iTc += ((~flags) & const_1_b); + negiTc = zero - iTc; + // Signed extend iTc, negiTc from 8 bits to 16 bits + flags = __msa_clt_s_b(iTc, zero); + MSA_ILVRL_B2(v8i16, flags, iTc, iTc_r, iTc_l); + flags = __msa_clt_s_b(negiTc, zero); + MSA_ILVRL_B2(v8i16, flags, negiTc, negiTc_r, negiTc_l); + + // Calculate the left part + // p1 + t0 = (p2_l + ((p0_l + q0_l + const_1_h) >> 1) - (p1_l << 1)) >> 1; + t0 = __msa_max_s_h(negTc_l, t0); + t0 = __msa_min_s_h(tc_l, t0); + t1 = p1_l + t0; + // q1 + t0 = (q2_l + ((p0_l + q0_l + const_1_h) >> 1) - (q1_l << 1)) >> 1; + t0 = __msa_max_s_h(negTc_l, t0); + t0 = __msa_min_s_h(tc_l, t0); + t2 = q1_l + t0; + // iDeta + t0 = (((q0_l - p0_l) << 2) + (p1_l - q1_l) + const_4_h) >> 3; + t0 = __msa_max_s_h(negiTc_l, t0); + t0 = __msa_min_s_h(iTc_l, t0); + p1_l = t1; + q1_l = t2; + // p0 + t1 = p0_l + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_l - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_l = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + // Calculate the right part + // p1 + t0 = (p2_r + ((p0_r + q0_r + const_1_h) >> 1) - (p1_r << 1)) >> 1; + t0 = __msa_max_s_h(negTc_r, t0); + t0 = __msa_min_s_h(tc_r, t0); + t1 = p1_r + t0; + // q1 + t0 = (q2_r + ((p0_r + q0_r + const_1_h) >> 1) - (q1_r << 1)) >> 1; + t0 = __msa_max_s_h(negTc_r, t0); + t0 = __msa_min_s_h(tc_r, t0); + t2 = q1_r + t0; + // iDeta + t0 = (((q0_r - p0_r) << 2) + (p1_r - q1_r) + const_4_h) >> 3; + t0 = __msa_max_s_h(negiTc_r, t0); + t0 = __msa_min_s_h(iTc_r, t0); + p1_r = t1; + q1_r = t2; + // p0 + t1 = p0_r + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_r - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_r = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + // Combined left and right + MSA_PCKEV_B4(v8i16, p1_l, p1_r, p0_l, p0_r, q0_l, q0_r, q1_l, q1_r, + t0, t1, t2, t3); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t1 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t2 & flags) + (q0 & (~flags))); + // Using t1, t2 as temporary flags + t1 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaP2P0, zero)))); + p1 = (v16u8)(t0 & t1) + (p1 & (v16u8)(~t1)); + t2 = (v8i16)(flags & (~(__msa_ceq_b((v16i8)bDetaQ2Q0, zero)))); + q1 = (v16u8)(t3 & t2) + (q1 & (v16u8)(~t2)); + + MSA_ILVRL_B4(v8i16, p0, p1, q1, q0, t0, t1, t2, t3); + MSA_ILVRL_H4(v16u8, t2, t0, t3, t1, p1, p0, q0, q1); + // Store data to pPix + MSA_ST_W8(p1, p0, 0, 1, 2, 3, 0, 1, 2, 3, pPix - 2, iStride); + MSA_ST_W8(q0, q1, 0, 1, 2, 3, 0, 1, 2, 3, pPix + 8 * iStride - 2, iStride); +} + +void DeblockLumaEq4H_msa(uint8_t *pPix, int32_t iStride, int32_t iAlpha, + int32_t iBeta) { + v16u8 p0, p1, p2, p3, q0, q1, q2, q3; + v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r, + q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r; + v8i16 t0, t1, t2, t0_con1; + v8i16 s0, s1, s2, s0_con1; + v16u8 alpha, beta; + v16u8 iDetaP0Q0, bDetaP1P0, bDetaQ1Q0, bDetaP2P0, bDetaQ2Q0; + // Condition mask + v16u8 mask0, mask1; + v16i8 const_2_b = __msa_ldi_b(2); + v8i16 const_2_h = __msa_ldi_h(2); + v8i16 const_4_h = __msa_ldi_h(4); + v16i8 zero = { 0 }; + + // Load data from pPix + MSA_LD_V8(v8i16, pPix - 4, iStride, p0_l, p0_r, p1_l, p1_r, + p2_l, p2_r, p3_l, p3_r); + MSA_LD_V8(v8i16, pPix + 8 * iStride - 4, iStride, + q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r); + // Transpose 16x8 to 8x16, we just need p0, p1, p2, p3, q0, q1, q2, q3 + MSA_TRANSPOSE16x8_B(v16u8, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, p3_l, p3_r, + q0_l, q0_r, q1_l, q1_r, q2_l, q2_r, q3_l, q3_r, + p3, p2, p1, p0, q0, q1, q2, q3); + // iAlpha and beta are uint8_t type + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + // iDetaP0Q0 is not bool type + iDetaP0Q0 = __msa_asub_u_b(p0, q0); + + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP2P0 = __msa_asub_u_b(p2, p0); + bDetaQ2Q0 = __msa_asub_u_b(q2, q0); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + bDetaP2P0 = (v16u8)__msa_clt_u_b(bDetaP2P0, beta); + bDetaQ2Q0 = (v16u8)__msa_clt_u_b(bDetaQ2Q0, beta); + + // Unsigned extend p0, p1, p2, p3, q0, q1, q2, q3 from 8 bits to 16 bits + MSA_ILVRL_B4(v8i16, zero, p0, zero, p1, + p0_r, p0_l, p1_r, p1_l); + MSA_ILVRL_B4(v8i16, zero, p2, zero, p3, + p2_r, p2_l, p3_r, p3_l); + MSA_ILVRL_B4(v8i16, zero, q0, zero, q1, + q0_r, q0_l, q1_r, q1_l); + MSA_ILVRL_B4(v8i16, zero, q2, zero, q3, + q2_r, q2_l, q3_r, q3_l); + + // Calculate condition mask + // (iDetaP0Q0 < iAlpha) && bDetaP1P0 && bDetaQ1Q0 + mask0 = (v16u8)__msa_clt_u_b(iDetaP0Q0, alpha); + mask0 &= bDetaP1P0; + mask0 &= bDetaQ1Q0; + // iDetaP0Q0 < ((iAlpha >> 2) + 2) + mask1 = (v16u8)((alpha >> 2) + const_2_b); + mask1 = (v16u8)__msa_clt_u_b(iDetaP0Q0, mask1); + + // Calculate the left part + // p0 + t0 = (p2_l + (p1_l << 1) + (p0_l << 1) + (q0_l << 1) + q1_l + const_4_h) >> 3; + // p1 + t1 = (p2_l + p1_l + p0_l + q0_l + const_2_h) >> 2; + // p2 + t2 = ((p3_l << 1) + p2_l + (p2_l << 1) + p1_l + p0_l + q0_l + const_4_h) >> 3; + // p0 condition 1 + t0_con1 = ((p1_l << 1) + p0_l + q1_l + const_2_h) >> 2; + // q0 + s0 = (p1_l + (p0_l << 1) + (q0_l << 1) + (q1_l << 1) + q2_l + const_4_h) >> 3; + // q1 + s1 = (p0_l + q0_l + q1_l + q2_l + const_2_h) >> 2; + // q2 + s2 = ((q3_l << 1) + q2_l + (q2_l << 1) + q1_l + q0_l + p0_l + const_4_h) >> 3; + // q0 condition 1 + s0_con1 = ((q1_l << 1) + q0_l + p1_l + const_2_h) >> 2; + // Move back + p0_l = t0; + p1_l = t1; + p2_l = t2; + q0_l = s0; + q1_l = s1; + q2_l = s2; + // Use p3_l, q3_l as tmp + p3_l = t0_con1; + q3_l = s0_con1; + + // Calculate the right part + // p0 + t0 = (p2_r + (p1_r << 1) + (p0_r << 1) + (q0_r << 1) + q1_r + const_4_h) >> 3; + // p1 + t1 = (p2_r + p1_r + p0_r + q0_r + const_2_h) >> 2; + // p2 + t2 = ((p3_r << 1) + p2_r + (p2_r << 1) + p1_r + p0_r + q0_r + const_4_h) >> 3; + // p0 condition 1 + t0_con1 = ((p1_r << 1) + p0_r + q1_r + const_2_h) >> 2; + // q0 + s0 = (p1_r + (p0_r << 1) + (q0_r << 1) + (q1_r << 1) + q2_r + const_4_h) >> 3; + // q1 + s1 = (p0_r + q0_r + q1_r + q2_r + const_2_h) >> 2; + // q2 + s2 = ((q3_r << 1) + q2_r + (q2_r << 1) + q1_r + q0_r + p0_r + const_4_h) >> 3; + // q0 condition 1 + s0_con1 = ((q1_r << 1) + q0_r + p1_r + const_2_h) >> 2; + // Move back + p0_r = t0; + p1_r = t1; + p2_r = t2; + q0_r = s0; + q1_r = s1; + q2_r = s2; + // Use p3_r, q3_r as tmp + p3_r = t0_con1; + q3_r = s0_con1; + + // Combined left and right + MSA_PCKEV_B4(v8i16, p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, + t0, t1, t2, s0); + MSA_PCKEV_B4(v8i16, q1_l, q1_r, q2_l, q2_r, p3_l, p3_r, q3_l, q3_r, + s1, s2, t0_con1, s0_con1); + t0 = (v8i16)(((v16u8)t0 & mask0 & mask1 & bDetaP2P0) + ((v16u8)t0_con1 & + mask0 & mask1 & (~bDetaP2P0)) + ((v16u8)t0_con1 & mask0 & (~mask1))); + t1 = (v8i16)((v16u8)t1 & mask0 & mask1 & bDetaP2P0); + t2 = (v8i16)((v16u8)t2 & mask0 & mask1 & bDetaP2P0); + s0 = (v8i16)(((v16u8)s0 & mask0 & mask1 & bDetaQ2Q0) + ((v16u8)s0_con1 & + mask0 & mask1 & (~bDetaQ2Q0)) + ((v16u8)s0_con1 & mask0 & (~mask1))); + s1 = (v8i16)((v16u8)s1 & mask0 & mask1 & bDetaQ2Q0); + s2 = (v8i16)((v16u8)s2 & mask0 & mask1 & bDetaQ2Q0); + p0 = (v16u8)t0 + (p0 & (~mask0)); + p1 = (v16u8)t1 + (p1 & ~(mask0 & mask1 & bDetaP2P0)); + p2 = (v16u8)t2 + (p2 & ~(mask0 & mask1 & bDetaP2P0)); + q0 = (v16u8)s0 + (q0 & (~mask0)); + q1 = (v16u8)s1 + (q1 & ~(mask0 & mask1 & bDetaQ2Q0)); + q2 = (v16u8)s2 + (q2 & ~(mask0 & mask1 & bDetaQ2Q0)); + + MSA_ILVRL_B4(v8i16, p1, p2, q0, p0, t0, s0, t1, s1); + MSA_ILVRL_B2(v8i16, q2, q1, t2, s2); + MSA_ILVRL_H4(v16u8, t1, t0, s1, s0, p2, p1, p0, q0); + // Store data to pPix + MSA_ST_W8(p2, p1, 0, 1, 2, 3, 0, 1, 2, 3, pPix - 3, iStride); + MSA_ST_W8(p0, q0, 0, 1, 2, 3, 0, 1, 2, 3, pPix + 8 * iStride - 3, iStride); + MSA_ST_H8(t2, 0, 1, 2, 3, 4, 5, 6, 7, pPix + 1, iStride); + MSA_ST_H8(s2, 0, 1, 2, 3, 4, 5, 6, 7, pPix + 8 * iStride + 1, iStride); +} + +void DeblockChromaLt4V_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, + int32_t iAlpha, int32_t iBeta, int8_t* pTc) { + v16u8 p0, p1, q0, q1; + v8i16 p0_e, p1_e, q0_e, q1_e; + v16i8 negTc, flags, f; + v8i16 tc_e, negTc_e; + // Use for temporary variable + v8i16 t0, t1, t2, t3; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0; + v8i16 const_4_h = __msa_ldi_h(4); + v8i16 const_not_255_h = __msa_ldi_h(~255); + v16i8 zero = { 0 }; + v16i8 tc = { pTc[0 >> 1], pTc[1 >> 1], pTc[2 >> 1], pTc[3 >> 1], + pTc[4 >> 1], pTc[5 >> 1], pTc[6 >> 1], pTc[7 >> 1] }; + negTc = zero - tc; + + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + // Signed extend tc, negTc from 8 bits to 16 bits + flags = __msa_clt_s_b(tc, zero); + MSA_ILVR_B(v8i16, flags, tc, tc_e); + flags = __msa_clt_s_b(negTc, zero); + MSA_ILVR_B(v8i16, flags, negTc, negTc_e); + + // Cb + // Load data from pPixCb + MSA_LD_V4(v16u8, pPixCb - 2 * iStride, iStride, p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // iDeta + t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3; + t0 = __msa_max_s_h(negTc_e, t0); + t0 = __msa_min_s_h(tc_e, t0); + // p0 + t1 = p0_e + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_e - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags))); + // Store data to pPixCb + MSA_ST_D(p0, 0, pPixCb - iStride); + MSA_ST_D(q0, 0, pPixCb); + + // Cr + // Load data from pPixCr + MSA_LD_V4(v16u8, pPixCr - 2 * iStride, iStride, p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // iDeta + t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3; + t0 = __msa_max_s_h(negTc_e, t0); + t0 = __msa_min_s_h(tc_e, t0); + // p0 + t1 = p0_e + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_e - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags))); + // Store data to pPixCr + MSA_ST_D(p0, 0, pPixCr - iStride); + MSA_ST_D(q0, 0, pPixCr); +} + +void DeblockChromaEq4V_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, + int32_t iAlpha, int32_t iBeta) { + v16u8 p0, p1, q0, q1; + v8i16 p0_e, p1_e, q0_e, q1_e; + v16i8 f; + // Use for temporary variable + v8i16 t0, t1; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0; + v8i16 const_2_h = __msa_ldi_h(2); + v16i8 zero = { 0 }; + + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + // Cb + // Load data from pPixCb + MSA_LD_V4(v16u8, pPixCb - 2 * iStride, iStride, p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // p0 + p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2; + // q0 + q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2; + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f))); + q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f))); + // Store data to pPixCb + MSA_ST_D(p0, 0, pPixCb - iStride); + MSA_ST_D(q0, 0, pPixCb); + + // Cr + // Load data from pPixCr + MSA_LD_V4(v16u8, pPixCr - 2 * iStride, iStride, p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // p0 + p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2; + // q0 + q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2; + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f))); + q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f))); + // Store data to pPixCr + MSA_ST_D(p0, 0, pPixCr - iStride); + MSA_ST_D(q0, 0, pPixCr); +} + +void DeblockChromaLt4H_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, + int32_t iAlpha, int32_t iBeta, int8_t* pTc) { + v16u8 p0, p1, q0, q1; + v8i16 p0_e, p1_e, q0_e, q1_e; + v16i8 negTc, flags, f; + v8i16 tc_e, negTc_e; + // Use for temporary variable + v8i16 t0, t1, t2, t3; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0; + v8i16 const_4_h = __msa_ldi_h(4); + v8i16 const_not_255_h = __msa_ldi_h(~255); + v16i8 zero = { 0 }; + v16i8 tc = { pTc[0 >> 1], pTc[1 >> 1], pTc[2 >> 1], pTc[3 >> 1], + pTc[4 >> 1], pTc[5 >> 1], pTc[6 >> 1], pTc[7 >> 1] }; + negTc = zero - tc; + + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + // Signed extend tc, negTc from 8 bits to 16 bits + flags = __msa_clt_s_b(tc, zero); + MSA_ILVR_B(v8i16, flags, tc, tc_e); + flags = __msa_clt_s_b(negTc, zero); + MSA_ILVR_B(v8i16, flags, negTc, negTc_e); + + // Cb + // Load data from pPixCb + MSA_LD_V8(v8i16, pPixCb - 2, iStride, p1_e, p0_e, q0_e, q1_e, + t0, t1, t2, t3); + // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1 + MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3, + p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // iDeta + t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3; + t0 = __msa_max_s_h(negTc_e, t0); + t0 = __msa_min_s_h(tc_e, t0); + // p0 + t1 = p0_e + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_e - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags))); + // Store data to pPixCb + MSA_ILVR_B(v16u8, q0, p0, p0); + MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCb - 1, iStride); + + // Cr + // Load data from pPixCr + MSA_LD_V8(v8i16, pPixCr - 2, iStride, p1_e, p0_e, q0_e, q1_e, + t0, t1, t2, t3); + // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1 + MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3, + p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // iDeta + t0 = (((q0_e - p0_e) << 2) + (p1_e - q1_e) + const_4_h) >> 3; + t0 = __msa_max_s_h(negTc_e, t0); + t0 = __msa_min_s_h(tc_e, t0); + // p0 + t1 = p0_e + t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + p0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + // q0 + t1 = q0_e - t0; + t2 = t1 & const_not_255_h; + t3 = __msa_cle_s_h((v8i16)zero, t1); + flags = (v16i8)__msa_ceq_h(t2, (v8i16)zero); + q0_e = (t1 & (v8i16)flags) + (t3 & (v8i16)(~flags)); + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + flags = (v16i8)__msa_cle_s_b(zero, tc); + flags &= f; + p0 = (v16u8)(((v16i8)t0 & flags) + (p0 & (~flags))); + q0 = (v16u8)(((v16i8)t1 & flags) + (q0 & (~flags))); + // Store data to pPixCr + MSA_ILVR_B(v16u8, q0, p0, p0); + MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCr - 1, iStride); +} + +void DeblockChromaEq4H_msa(uint8_t* pPixCb, uint8_t* pPixCr, int32_t iStride, + int32_t iAlpha, int32_t iBeta) { + v16u8 p0, p1, q0, q1; + v8i16 p0_e, p1_e, q0_e, q1_e; + v16i8 f; + // Use for temporary variable + v8i16 t0, t1, t2, t3; + v16u8 alpha, beta; + v16u8 bDetaP0Q0, bDetaP1P0, bDetaQ1Q0; + v8i16 const_2_h = __msa_ldi_h(2); + v16i8 zero = { 0 }; + + alpha = (v16u8)__msa_fill_b(iAlpha); + beta = (v16u8)__msa_fill_b(iBeta); + + // Cb + // Load data from pPixCb + MSA_LD_V8(v8i16, pPixCb - 2, iStride, p1_e, p0_e, q0_e, q1_e, + t0, t1, t2, t3); + // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1 + MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3, + p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // p0 + p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2; + // q0 + q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2; + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f))); + q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f))); + // Store data to pPixCb + MSA_ILVR_B(v16u8, q0, p0, p0); + MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCb - 1, iStride); + + // Cr + // Load data from pPixCr + MSA_LD_V8(v8i16, pPixCr - 2, iStride, p1_e, p0_e, q0_e, q1_e, + t0, t1, t2, t3); + // Transpose 8x4 to 4x8, we just need p0, p1, q0, q1 + MSA_TRANSPOSE8x4_B(v16u8, p1_e, p0_e, q0_e, q1_e, t0, t1, t2, t3, + p1, p0, q0, q1); + + bDetaP0Q0 = __msa_asub_u_b(p0, q0); + bDetaP1P0 = __msa_asub_u_b(p1, p0); + bDetaQ1Q0 = __msa_asub_u_b(q1, q0); + bDetaP0Q0 = (v16u8)__msa_clt_u_b(bDetaP0Q0, alpha); + bDetaP1P0 = (v16u8)__msa_clt_u_b(bDetaP1P0, beta); + bDetaQ1Q0 = (v16u8)__msa_clt_u_b(bDetaQ1Q0, beta); + + // Unsigned extend p0, p1, q0, q1 from 8 bits to 16 bits + MSA_ILVR_B4(v8i16, zero, p0, zero, p1, zero, q0, zero, q1, + p0_e, p1_e, q0_e, q1_e); + + f = (v16i8)bDetaP0Q0 & (v16i8)bDetaP1P0 & (v16i8)bDetaQ1Q0; + + // p0 + p0_e = ((p1_e << 1) + p0_e + q1_e + const_2_h) >> 2; + // q0 + q0_e = ((q1_e << 1) + q0_e + p1_e + const_2_h) >> 2; + + MSA_PCKEV_B2(v8i16, p0_e, p0_e, q0_e, q0_e, t0, t1); + p0 = (v16u8)(((v16i8)t0 & f) + (p0 & (~f))); + q0 = (v16u8)(((v16i8)t1 & f) + (q0 & (~f))); + // Store data to pPixCr + MSA_ILVR_B(v16u8, q0, p0, p0); + MSA_ST_H8(p0, 0, 1, 2, 3, 4, 5, 6, 7, pPixCr - 1, iStride); +} + +void WelsNonZeroCount_msa(int8_t* pNonZeroCount) { + v16u8 src0, src1; + v16u8 zero = { 0 }; + v16u8 const_1 = (v16u8)__msa_fill_b(0x01); + + MSA_LD_V2(v16u8, pNonZeroCount, 16, src0, src1); + src0 = (v16u8)__msa_ceq_b((v16i8)zero, (v16i8)src0); + src1 = (v16u8)__msa_ceq_b((v16i8)zero, (v16i8)src1); + src0 += const_1; + src1 += const_1; + MSA_ST_V(v16u8, src0, pNonZeroCount); + MSA_ST_D(src1, 0, pNonZeroCount + 16); +} diff --git a/chromium/third_party/openh264/src/codec/common/targets.mk b/chromium/third_party/openh264/src/codec/common/targets.mk index f2cd192fdd9..43de4ce4cf4 100644 --- a/chromium/third_party/openh264/src/codec/common/targets.mk +++ b/chromium/third_party/openh264/src/codec/common/targets.mk @@ -76,6 +76,8 @@ COMMON_ASM_MIPS_MMI_SRCS=\ COMMON_OBJSMIPS_MMI += $(COMMON_ASM_MIPS_MMI_SRCS:.c=.$(OBJ)) COMMON_ASM_MIPS_MSA_SRCS=\ + $(COMMON_SRCDIR)/mips/copy_mb_msa.c\ + $(COMMON_SRCDIR)/mips/deblock_msa.c\ COMMON_OBJSMIPS_MSA += $(COMMON_ASM_MIPS_MSA_SRCS:.c=.$(OBJ)) ifeq ($(ASM_ARCH), mips) diff --git a/chromium/third_party/openh264/src/codec/decoder/core/inc/wels_common_basis.h b/chromium/third_party/openh264/src/codec/decoder/core/inc/wels_common_basis.h index 833bc8bca3e..19aae110e33 100644 --- a/chromium/third_party/openh264/src/codec/decoder/core/inc/wels_common_basis.h +++ b/chromium/third_party/openh264/src/codec/decoder/core/inc/wels_common_basis.h @@ -275,7 +275,7 @@ static const SPartMbInfo g_ksInterBMbTypeInfo[] = { { MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 | MB_TYPE_P1L0 | MB_TYPE_P1L1, 4, 4 } //B_8x8 }; -//Table 7.17 – Sub-macroblock types in B macroblocks. +//Table 7.17 Sub-macroblock types in B macroblocks. static const SPartMbInfo g_ksInterPSubMbTypeInfo[4] = { {SUB_MB_TYPE_8x8, 1, 2}, {SUB_MB_TYPE_8x4, 2, 2}, @@ -283,7 +283,7 @@ static const SPartMbInfo g_ksInterPSubMbTypeInfo[4] = { {SUB_MB_TYPE_4x4, 4, 1}, }; -//Table 7.18 – Sub-macroblock types in B macroblocks. +//Table 7.18 Sub-macroblock types in B macroblocks. static const SPartMbInfo g_ksInterBSubMbTypeInfo[] = { { MB_TYPE_DIRECT, 1, 2 }, //B_Direct_8x8 { SUB_MB_TYPE_8x8 | MB_TYPE_P0L0, 1, 2 }, //B_L0_8x8 diff --git a/chromium/third_party/openh264/src/codec/decoder/core/src/deblocking.cpp b/chromium/third_party/openh264/src/codec/decoder/core/src/deblocking.cpp index a3a6fded334..1817ab5b44b 100644 --- a/chromium/third_party/openh264/src/codec/decoder/core/src/deblocking.cpp +++ b/chromium/third_party/openh264/src/codec/decoder/core/src/deblocking.cpp @@ -1503,6 +1503,19 @@ void DeblockingInit (SDeblockingFunc* pFunc, int32_t iCpu) { pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_mmi; } #endif//HAVE_MMI + +#if defined(HAVE_MSA) + if (iCpu & WELS_CPU_MSA) { + pFunc->pfLumaDeblockingLT4Ver = DeblockLumaLt4V_msa; + pFunc->pfLumaDeblockingEQ4Ver = DeblockLumaEq4V_msa; + pFunc->pfLumaDeblockingLT4Hor = DeblockLumaLt4H_msa; + pFunc->pfLumaDeblockingEQ4Hor = DeblockLumaEq4H_msa; + pFunc->pfChromaDeblockingLT4Ver = DeblockChromaLt4V_msa; + pFunc->pfChromaDeblockingEQ4Ver = DeblockChromaEq4V_msa; + pFunc->pfChromaDeblockingLT4Hor = DeblockChromaLt4H_msa; + pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_msa; + } +#endif//HAVE_MSA } } // namespace WelsDec diff --git a/chromium/third_party/openh264/src/codec/decoder/core/src/decoder.cpp b/chromium/third_party/openh264/src/codec/decoder/core/src/decoder.cpp index 3b38032acf5..a6f2da4374f 100644 --- a/chromium/third_party/openh264/src/codec/decoder/core/src/decoder.cpp +++ b/chromium/third_party/openh264/src/codec/decoder/core/src/decoder.cpp @@ -813,7 +813,7 @@ int32_t WelsDecodeBs (PWelsDecoderContext pCtx, const uint8_t* kpBsBuf, const in } CheckAndFinishLastPic (pCtx, ppDst, pDstBufInfo); if (pCtx->bAuReadyFlag && pCtx->pAccessUnitList->uiAvailUnitsNum != 0) { - if (pCtx->pThreadCtx == NULL) { + if (GetThreadCount (pCtx) <= 1) { ConstructAccessUnit (pCtx, ppDst, pDstBufInfo); } else { pCtx->pAccessUnitList->uiAvailUnitsNum = 1; @@ -873,11 +873,11 @@ int32_t WelsDecodeBs (PWelsDecoderContext pCtx, const uint8_t* kpBsBuf, const in if (IS_PARAM_SETS_NALS (pCtx->sCurNalHead.eNalUnitType)) { iRet = ParseNonVclNal (pCtx, pNalPayload, iDstIdx - iConsumedBytes, pSrcNal - 3, iSrcIdx + 3); } - if (pCtx->pThreadCtx == NULL) { + if (GetThreadCount (pCtx) <= 1) { CheckAndFinishLastPic (pCtx, ppDst, pDstBufInfo); } if (pCtx->bAuReadyFlag && pCtx->pAccessUnitList->uiAvailUnitsNum != 0) { - if (pCtx->pThreadCtx == NULL) { + if (GetThreadCount (pCtx) <= 1) { ConstructAccessUnit (pCtx, ppDst, pDstBufInfo); } else { pCtx->pAccessUnitList->uiAvailUnitsNum = 1; diff --git a/chromium/third_party/openh264/src/codec/decoder/core/src/decoder_core.cpp b/chromium/third_party/openh264/src/codec/decoder/core/src/decoder_core.cpp index a317000123c..32da38e382c 100644 --- a/chromium/third_party/openh264/src/codec/decoder/core/src/decoder_core.cpp +++ b/chromium/third_party/openh264/src/codec/decoder/core/src/decoder_core.cpp @@ -2338,7 +2338,7 @@ int32_t InitConstructAccessUnit (PWelsDecoderContext pCtx, SBufferInfo* pDstInfo */ int32_t ConstructAccessUnit (PWelsDecoderContext pCtx, uint8_t** ppDst, SBufferInfo* pDstInfo) { int32_t iErr = ERR_NONE; - if (pCtx->pThreadCtx == NULL) { + if (GetThreadCount (pCtx) <= 1) { iErr = InitConstructAccessUnit (pCtx, pDstInfo); if (ERR_NONE != iErr) { return iErr; @@ -2550,7 +2550,7 @@ int32_t DecodeCurrentAccessUnit (PWelsDecoderContext pCtx, uint8_t** ppDst, SBuf isNewFrame = pCtx->pDec == NULL; } if (pCtx->pDec == NULL) { - if (pLastThreadCtx != NULL) { + if (pLastThreadCtx != NULL && iIdx == 0) { pLastThreadCtx->pDec->bUsedAsRef = pLastThreadCtx->pCtx->uiNalRefIdc > 0; if (pLastThreadCtx->pDec->bUsedAsRef) { for (int32_t listIdx = LIST_0; listIdx < LIST_A; ++listIdx) { @@ -2686,7 +2686,19 @@ int32_t DecodeCurrentAccessUnit (PWelsDecoderContext pCtx, uint8_t** ppDst, SBuf if (pCtx->bNewSeqBegin) { iPrevFrameNum = 0; } else if (pLastThreadCtx->pDec != NULL) { - iPrevFrameNum = pLastThreadCtx->pDec->iFrameNum; + if (pLastThreadCtx->pDec->uiTimeStamp == pCtx->uiTimeStamp - 1) { + iPrevFrameNum = pLastThreadCtx->pDec->iFrameNum; + if (iPrevFrameNum == -1) iPrevFrameNum = pLastThreadCtx->pCtx->iFrameNum; + } else { + int32_t id = pThreadCtx->sThreadInfo.uiThrNum; + for (int32_t i = 0; i < iThreadCount; ++i) { + if (pThreadCtx[i - id].pCtx->uiTimeStamp == pCtx->uiTimeStamp - 1) { + if (pThreadCtx[i - id].pDec != NULL) iPrevFrameNum = pThreadCtx[i - id].pDec->iFrameNum; + if (iPrevFrameNum == -1) iPrevFrameNum = pThreadCtx[i - id].pCtx->iFrameNum; + break; + } + } + } } else { iPrevFrameNum = pCtx->bNewSeqBegin ? 0 : pLastThreadCtx->pCtx->iFrameNum; } @@ -2734,8 +2746,10 @@ int32_t DecodeCurrentAccessUnit (PWelsDecoderContext pCtx, uint8_t** ppDst, SBuf ComputeColocatedTemporalScaling (pCtx); if (iThreadCount > 1) { - memset (&pCtx->lastReadyHeightOffset[0][0], -1, LIST_A * MAX_REF_PIC_COUNT * sizeof (int16_t)); - SET_EVENT (&pThreadCtx->sSliceDecodeStart); + if (iIdx == 0) { + memset (&pCtx->lastReadyHeightOffset[0][0], -1, LIST_A * MAX_REF_PIC_COUNT * sizeof (int16_t)); + SET_EVENT (&pThreadCtx->sSliceDecodeStart); + } iRet = WelsDecodeAndConstructSlice (pCtx); } else { iRet = WelsDecodeSlice (pCtx, bFreshSliceAvailable, pNalCur); diff --git a/chromium/third_party/openh264/src/codec/decoder/core/src/manage_dec_ref.cpp b/chromium/third_party/openh264/src/codec/decoder/core/src/manage_dec_ref.cpp index 05aa40e1342..571ce41d7d9 100644 --- a/chromium/third_party/openh264/src/codec/decoder/core/src/manage_dec_ref.cpp +++ b/chromium/third_party/openh264/src/codec/decoder/core/src/manage_dec_ref.cpp @@ -394,7 +394,10 @@ int32_t WelsReorderRefList (PWelsDecoderContext pCtx) { for (int32_t listIdx = 0; listIdx < ListCount; ++listIdx) { PPicture pPic = NULL; PPicture* ppRefList = pCtx->sRefPic.pRefList[listIdx]; - int32_t iMaxRefIdx = pCtx->iPicQueueNumber; + int32_t iMaxRefIdx = pCtx->iPicQueueNumber; + if (iMaxRefIdx >= MAX_REF_PIC_COUNT) { + iMaxRefIdx = MAX_REF_PIC_COUNT - 1; + } int32_t iRefCount = pSliceHeader->uiRefCount[listIdx]; int32_t iPredFrameNum = pSliceHeader->iFrameNum; int32_t iMaxPicNum = 1 << pSliceHeader->pSps->uiLog2MaxFrameNum; diff --git a/chromium/third_party/openh264/src/codec/decoder/meson.build b/chromium/third_party/openh264/src/codec/decoder/meson.build index e4d123cca06..1131022ffe4 100644 --- a/chromium/third_party/openh264/src/codec/decoder/meson.build +++ b/chromium/third_party/openh264/src/codec/decoder/meson.build @@ -22,13 +22,27 @@ cpp_sources = [ 'core/src/wels_decoder_thread.cpp', ] -asm_sources = [ - 'core/x86/dct.asm', - 'core/x86/intra_pred.asm', -] - -objs_asm = asm_gen.process(asm_sources) +objs_asm = [] +if ['x86', 'x86_64'].contains(cpu_family) + asm_sources = [ + 'core/x86/dct.asm', + 'core/x86/intra_pred.asm', + ] + objs_asm = asm_gen.process(asm_sources) +elif cpu_family == 'arm' + cpp_sources += [ + 'core/arm/block_add_neon.S', + 'core/arm/intra_pred_neon.S', + ] +elif cpu_family == 'aarch64' + cpp_sources += [ + 'core/arm64/block_add_aarch64_neon.S', + 'core/arm64/intra_pred_aarch64_neon.S', + ] +else + error('Unsupported cpu family @0@'.format(cpu_family)) +endif libdecoder = static_library('decoder', cpp_sources, objs_asm, - include_directories: [inc, decoder_inc], + include_directories: [inc, decoder_inc, casm_inc], dependencies: deps) diff --git a/chromium/third_party/openh264/src/codec/decoder/plus/src/welsDecoderExt.cpp b/chromium/third_party/openh264/src/codec/decoder/plus/src/welsDecoderExt.cpp index c402d7987ac..85a10600886 100644 --- a/chromium/third_party/openh264/src/codec/decoder/plus/src/welsDecoderExt.cpp +++ b/chromium/third_party/openh264/src/codec/decoder/plus/src/welsDecoderExt.cpp @@ -1374,6 +1374,13 @@ DECODING_STATE CWelsDecoder::ParseAccessUnit (SWelsDecoderThreadCTX& sThreadCtx) sThreadCtx.pCtx->iImgHeightInPixel = m_pLastDecThrCtx->pCtx->iImgHeightInPixel; } } + + //if threadCount > 1, then each thread must contain exact one complete frame. + if (GetThreadCount (sThreadCtx.pCtx) > 1) { + sThreadCtx.pCtx->pAccessUnitList->uiAvailUnitsNum = 0; + sThreadCtx.pCtx->pAccessUnitList->uiActualUnitsNum = 0; + } + int32_t iRet = DecodeFrame2WithCtx (sThreadCtx.pCtx, sThreadCtx.kpSrc, sThreadCtx.kiSrcLen, sThreadCtx.ppDst, &sThreadCtx.sDstInfo); diff --git a/chromium/third_party/openh264/src/codec/encoder/core/src/deblocking.cpp b/chromium/third_party/openh264/src/codec/encoder/core/src/deblocking.cpp index aec6b111788..8fd00ea6119 100644 --- a/chromium/third_party/openh264/src/codec/encoder/core/src/deblocking.cpp +++ b/chromium/third_party/openh264/src/codec/encoder/core/src/deblocking.cpp @@ -783,6 +783,11 @@ void WelsBlockFuncInit (PSetNoneZeroCountZeroFunc* pfSetNZCZero, int32_t iCpu) *pfSetNZCZero = WelsNonZeroCount_mmi; } #endif +#if defined(HAVE_MSA) + if (iCpu & WELS_CPU_MSA) { + *pfSetNZCZero = WelsNonZeroCount_msa; + } +#endif } void DeblockingInit (DeblockingFunc* pFunc, int32_t iCpu) { @@ -860,6 +865,19 @@ void DeblockingInit (DeblockingFunc* pFunc, int32_t iCpu) { pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_mmi; } #endif//HAVE_MMI + +#if defined(HAVE_MSA) + if (iCpu & WELS_CPU_MSA) { + pFunc->pfLumaDeblockingLT4Ver = DeblockLumaLt4V_msa; + pFunc->pfLumaDeblockingEQ4Ver = DeblockLumaEq4V_msa; + pFunc->pfLumaDeblockingLT4Hor = DeblockLumaLt4H_msa; + pFunc->pfLumaDeblockingEQ4Hor = DeblockLumaEq4H_msa; + pFunc->pfChromaDeblockingLT4Ver = DeblockChromaLt4V_msa; + pFunc->pfChromaDeblockingEQ4Ver = DeblockChromaEq4V_msa; + pFunc->pfChromaDeblockingLT4Hor = DeblockChromaLt4H_msa; + pFunc->pfChromaDeblockingEQ4Hor = DeblockChromaEq4H_msa; + } +#endif//HAVE_MSA } diff --git a/chromium/third_party/openh264/src/codec/encoder/core/src/encode_mb_aux.cpp b/chromium/third_party/openh264/src/codec/encoder/core/src/encode_mb_aux.cpp index 6f11f36ebf6..f9bc6c4768f 100644 --- a/chromium/third_party/openh264/src/codec/encoder/core/src/encode_mb_aux.cpp +++ b/chromium/third_party/openh264/src/codec/encoder/core/src/encode_mb_aux.cpp @@ -464,7 +464,7 @@ int32_t WelsHadamardQuant2x2Skip_AArch64_neon (int16_t* pRes, int16_t iFF, int1 void WelsInitEncodingFuncs (SWelsFuncPtrList* pFuncList, uint32_t uiCpuFlag) { pFuncList->pfCopy8x8Aligned = WelsCopy8x8_c; pFuncList->pfCopy16x16Aligned = - pFuncList->pfCopy16x16NotAligned = WelsCopy16x16_c; + pFuncList->pfCopy16x16NotAligned = WelsCopy16x16_c; pFuncList->pfCopy16x8NotAligned = WelsCopy16x8_c; pFuncList->pfCopy8x16Aligned = WelsCopy8x16_c; pFuncList->pfCopy4x4 = WelsCopy4x4_c; @@ -612,5 +612,16 @@ void WelsInitEncodingFuncs (SWelsFuncPtrList* pFuncList, uint32_t uiCpuFlag) { pFuncList->pfDctFourT4 = WelsDctFourT4_mmi; } #endif//HAVE_MMI + +#if defined(HAVE_MSA) + if (uiCpuFlag & WELS_CPU_MSA) { + pFuncList->pfCopy8x8Aligned = WelsCopy8x8_msa; + pFuncList->pfCopy8x16Aligned = WelsCopy8x16_msa; + + pFuncList->pfCopy16x16Aligned = + pFuncList->pfCopy16x16NotAligned = WelsCopy16x16_msa; + pFuncList->pfCopy16x8NotAligned = WelsCopy16x8_msa; + } +#endif } } diff --git a/chromium/third_party/openh264/src/codec/encoder/core/src/encoder_ext.cpp b/chromium/third_party/openh264/src/codec/encoder/core/src/encoder_ext.cpp index 9f79da89365..9bc6e103b73 100644 --- a/chromium/third_party/openh264/src/codec/encoder/core/src/encoder_ext.cpp +++ b/chromium/third_party/openh264/src/codec/encoder/core/src/encoder_ext.cpp @@ -374,12 +374,12 @@ int32_t ParamValidation (SLogContext* pLogCtx, SWelsSvcCodingParam* pCfg) { pCfg->bEnableFrameSkip); if ((pCfg->iMaxQp <= 0) || (pCfg->iMinQp <= 0)) { if (pCfg->iUsageType == SCREEN_CONTENT_REAL_TIME) { - WelsLog (pLogCtx, WELS_LOG_WARNING, "Change QP Range from(%d,%d) to (%d,%d)", pCfg->iMinQp, pCfg->iMaxQp, MIN_SCREEN_QP, + WelsLog (pLogCtx, WELS_LOG_INFO, "Change QP Range from(%d,%d) to (%d,%d)", pCfg->iMinQp, pCfg->iMaxQp, MIN_SCREEN_QP, MAX_SCREEN_QP); pCfg->iMinQp = MIN_SCREEN_QP; pCfg->iMaxQp = MAX_SCREEN_QP; } else { - WelsLog (pLogCtx, WELS_LOG_WARNING, "Change QP Range from(%d,%d) to (%d,%d)", pCfg->iMinQp, pCfg->iMaxQp, + WelsLog (pLogCtx, WELS_LOG_INFO, "Change QP Range from(%d,%d) to (%d,%d)", pCfg->iMinQp, pCfg->iMaxQp, GOM_MIN_QP_MODE, MAX_LOW_BR_QP); pCfg->iMinQp = GOM_MIN_QP_MODE; pCfg->iMaxQp = MAX_LOW_BR_QP; diff --git a/chromium/third_party/openh264/src/codec/encoder/meson.build b/chromium/third_party/openh264/src/codec/encoder/meson.build index 4e5c3314b6c..5f46854510b 100644 --- a/chromium/third_party/openh264/src/codec/encoder/meson.build +++ b/chromium/third_party/openh264/src/codec/encoder/meson.build @@ -33,19 +33,41 @@ cpp_sources = [ 'plus/src/welsEncoderExt.cpp', ] -asm_sources = [ - 'core/x86/coeff.asm', - 'core/x86/dct.asm', - 'core/x86/intra_pred.asm', - 'core/x86/matrix_transpose.asm', - 'core/x86/memzero.asm', - 'core/x86/quant.asm', - 'core/x86/sample_sc.asm', - 'core/x86/score.asm', -] - -objs_asm = asm_gen.process(asm_sources) +objs_asm = [] +if ['x86', 'x86_64'].contains(cpu_family) + asm_sources = [ + 'core/x86/coeff.asm', + 'core/x86/dct.asm', + 'core/x86/intra_pred.asm', + 'core/x86/matrix_transpose.asm', + 'core/x86/memzero.asm', + 'core/x86/quant.asm', + 'core/x86/sample_sc.asm', + 'core/x86/score.asm', + ] + objs_asm = asm_gen.process(asm_sources) +elif cpu_family == 'arm' + cpp_sources += [ + 'core/arm/intra_pred_neon.S', + 'core/arm/intra_pred_sad_3_opt_neon.S', + 'core/arm/memory_neon.S', + 'core/arm/pixel_neon.S', + 'core/arm/reconstruct_neon.S', + 'core/arm/svc_motion_estimation.S', + ] +elif cpu_family == 'aarch64' + cpp_sources += [ + 'core/arm64/intra_pred_aarch64_neon.S', + 'core/arm64/intra_pred_sad_3_opt_aarch64_neon.S', + 'core/arm64/memory_aarch64_neon.S', + 'core/arm64/pixel_aarch64_neon.S', + 'core/arm64/reconstruct_aarch64_neon.S', + 'core/arm64/svc_motion_estimation_aarch64_neon.S', + ] +else + error('Unsupported cpu family @0@'.format(cpu_family)) +endif libencoder = static_library('encoder', cpp_sources, objs_asm, - include_directories: [inc, processing_inc, encoder_inc], + include_directories: [inc, processing_inc, encoder_inc, casm_inc], dependencies: deps) diff --git a/chromium/third_party/openh264/src/codec/meson.build b/chromium/third_party/openh264/src/codec/meson.build index 7b610d24c4d..7a427f498df 100644 --- a/chromium/third_party/openh264/src/codec/meson.build +++ b/chromium/third_party/openh264/src/codec/meson.build @@ -2,5 +2,8 @@ subdir('common') subdir('decoder') subdir('encoder') subdir('processing') -subdir('console') +if not ['android', 'ios'].contains(system) + # also disabled in the Makefile for these platforms + subdir('console') +endif subdir('api') diff --git a/chromium/third_party/openh264/src/codec/processing/meson.build b/chromium/third_party/openh264/src/codec/processing/meson.build index b7560e3d69a..d38dfb1f2ef 100644 --- a/chromium/third_party/openh264/src/codec/processing/meson.build +++ b/chromium/third_party/openh264/src/codec/processing/meson.build @@ -18,14 +18,32 @@ cpp_sources = [ 'src/vaacalc/vaacalculation.cpp', ] -asm_sources = [ - 'src/x86/denoisefilter.asm', - 'src/x86/downsample_bilinear.asm', - 'src/x86/vaa.asm', -] - -objs_asm = asm_gen.process(asm_sources) +objs_asm = [] +if ['x86', 'x86_64'].contains(cpu_family) + asm_sources = [ + 'src/x86/denoisefilter.asm', + 'src/x86/downsample_bilinear.asm', + 'src/x86/vaa.asm', + ] + objs_asm = asm_gen.process(asm_sources) +elif cpu_family == 'arm' + cpp_sources += [ + 'src/arm/adaptive_quantization.S', + 'src/arm/down_sample_neon.S', + 'src/arm/pixel_sad_neon.S', + 'src/arm/vaa_calc_neon.S', + ] +elif cpu_family == 'aarch64' + cpp_sources += [ + 'src/arm64/adaptive_quantization_aarch64_neon.S', + 'src/arm64/down_sample_aarch64_neon.S', + 'src/arm64/pixel_sad_aarch64_neon.S', + 'src/arm64/vaa_calc_aarch64_neon.S', + ] +else + error('Unsupported cpu family @0@'.format(cpu_family)) +endif libprocessing = static_library('processing', cpp_sources, objs_asm, - include_directories: [inc, processing_inc], + include_directories: [inc, processing_inc, casm_inc], dependencies: deps) diff --git a/chromium/third_party/openh264/src/include/wels/meson.build b/chromium/third_party/openh264/src/include/wels/meson.build index 1b0049222a5..73fcef3acfe 100644 --- a/chromium/third_party/openh264/src/include/wels/meson.build +++ b/chromium/third_party/openh264/src/include/wels/meson.build @@ -4,5 +4,5 @@ foreach header : api_headers api_header_deps += configure_file( input : header[1], output : header[0], - configuration : configuration_data()) + copy : true) endforeach diff --git a/chromium/third_party/openh264/src/meson.build b/chromium/third_party/openh264/src/meson.build index dc677ef2918..a8692285743 100644 --- a/chromium/third_party/openh264/src/meson.build +++ b/chromium/third_party/openh264/src/meson.build @@ -1,10 +1,10 @@ project('openh264', ['c', 'cpp'], - version : '2.0.0', + version : '2.1.0', meson_version : '>= 0.47', default_options : [ 'warning_level=1', 'buildtype=debugoptimized' ]) -major_version = '5' +major_version = '6' cpp = meson.get_compiler('cpp') @@ -36,8 +36,6 @@ encoder_inc = include_directories([ join_paths('codec', 'encoder', 'plus', 'inc'), ]) -asm_inc = join_paths(meson.current_source_dir(), 'codec', 'common', 'x86', '') - nasm = find_program('nasm', 'nasm.exe') system = host_machine.system() @@ -54,24 +52,52 @@ deps = [dependency('threads')] c_args = [] cpp_args = [] asm_args = [] - -if system == 'linux' +asm_inc = [] +casm_inc = [] +cpp_lib = '-lstdc++' + +# TODO: should rely on dependency('threads') instead and change the pkg-config +# generator below +pthread_dep = cpp.find_library('pthread', required : false) +libm_dep = cpp.find_library('libm', required : false) +deps += [libm_dep] + +if ['linux', 'android', 'ios', 'darwin'].contains(system) + asm_format32 = 'elf' + asm_format64 = 'elf64' + if ['ios', 'darwin'].contains(system) + asm_format32 = 'macho32' + asm_format64 = 'macho64' + endif if cpu_family == 'x86' - asm_format = 'elf' - asm_args += ['-DX86_32'] - add_project_arguments('-DX86_32_ASM', language: 'c') + asm_format = asm_format32 + asm_args += ['-DX86_32', '-DHAVE_AVX2'] + add_project_arguments('-DHAVE_AVX2', language: 'cpp') + add_project_arguments('-DHAVE_AVX2', '-DX86_ASM', '-DX86_32_ASM', language: 'c') + asm_inc = join_paths(meson.current_source_dir(), 'codec', 'common', 'x86', '') elif cpu_family == 'x86_64' - asm_format = 'elf64' - asm_args += ['-DUNIX64'] + asm_format = asm_format64 + asm_args += ['-DUNIX64', '-DHAVE_AVX2'] + add_project_arguments('-DHAVE_AVX2', language: 'cpp') + add_project_arguments('-DHAVE_AVX2', '-DX86_ASM', language: 'c') + asm_inc = join_paths(meson.current_source_dir(), 'codec', 'common', 'x86', '') + elif cpu_family == 'arm' + asm_format = asm_format32 + add_project_arguments('-DHAVE_NEON', language: 'c') + add_project_arguments('-DHAVE_NEON', language: 'c') + casm_inc = include_directories(join_paths('codec', 'common', 'arm')) + elif cpu_family == 'aarch64' + asm_format = asm_format64 + add_project_arguments('-DHAVE_NEON_ARM64', language: 'c') + add_project_arguments('-DHAVE_NEON_ARM64', language: 'cpp') + casm_inc = include_directories(join_paths('codec', 'common', 'arm64')) else - error ('FIXME: unhandled CPU family @0@ for Linux'.format(cpu_family)) + error ('FIXME: unhandled CPU family @0@ for @1@'.format(cpu_family, system)) endif - deps += [cpp.find_library('libm')] - - asm_args += ['-DHAVE_AVX2'] - add_project_arguments('-DHAVE_AVX2', language: 'cpp') - add_project_arguments('-DHAVE_AVX2', '-DX86_ASM', language: 'c') + if ['ios', 'darwin', 'android'].contains(system) + cpp_lib = '-lc++' + endif elif system == 'windows' if cpu_family == 'x86' asm_format = 'win32' @@ -82,17 +108,20 @@ elif system == 'windows' else error ('FIXME: unhandled CPU family @0@ for Windows'.format(cpu_family)) endif + asm_inc = join_paths(meson.current_source_dir(), 'codec', 'common', 'x86', '') else error ('FIXME: Unhandled system @0@'.format(system)) endif -asm_gen = generator(nasm, - output : '@BASENAME@.o', - arguments : [ - '-f', asm_format, - '-i', asm_inc, - '@INPUT@', - '-o', '@OUTPUT@'] + asm_args) +if ['x86', 'x86_64'].contains(cpu_family) + asm_gen = generator(nasm, + output : '@BASENAME@.o', + arguments : [ + '-f', asm_format, + '-i', asm_inc, + '@INPUT@', + '-o', '@OUTPUT@'] + asm_args) +endif api_headers = [] api_header_deps = [] @@ -127,18 +156,21 @@ foreach t : ['', '-static'] pkgconf.set('prefix', join_paths(get_option('prefix'))) pkgconf.set('libdir', '${prefix}/@0@'.format(get_option('libdir'))) pkgconf.set('VERSION', meson.project_version()) + pkglibs = cpp_lib + if libm_dep.found() + pkglibs += ' -lm' + endif + if pthread_dep.found() + pkglibs += ' -lpthread' + endif if t == '-static' - do_install = false - pkgconf.set('LIBS', '-lstdc++ -lpthread -lm') + pkgconf.set('LIBS', pkglibs) pkgconf.set('LIBS_PRIVATE', '') else - do_install = true pkgconf.set('LIBS', '') - pkgconf.set('LIBS_PRIVATE', '-lstdc++ -lpthread -lm') + pkgconf.set('LIBS_PRIVATE', pkglibs) endif - message('do_install: @0@'.format(do_install)) - configure_file( input: 'openh264.pc.in', output: 'openh264@0@.pc'.format(t), |