diff options
author | Lu Wang <wanglu@loongson.cn> | 2021-11-10 15:21:17 +0800 |
---|---|---|
committer | yuanhecai <yuanhecai@loongson.cn> | 2022-02-08 14:55:09 +0800 |
commit | b3cc4b625d1d2c9a0913dcfbda97dd3bf845f998 (patch) | |
tree | 54b8f15029a41afdd86050df995b2cb59fddb45a /vp8 | |
parent | 85a9bdc6cc0ab6be4a2fb2c93f9e1551688489f6 (diff) | |
download | libvpx-b3cc4b625d1d2c9a0913dcfbda97dd3bf845f998.tar.gz |
vp8[loongarch]: Optimize vp8_loop/sixtap, vpx_dc with LSX.
1. vp8_loop_filter_mbh, vp8_loop_filter_mbv
2. vp8_sixtap_predict16x16, vp8_sixtap_predict8x8
3. vpx_dc_predictor_16x16, vpx_dc_predictor_8x8
./vpxdec --progress -o YUV_1920X1080.yuv original_1200f/VP8_1920X1080.webm
before: 37.77fps
after : 220.90fps
Bug: webm:1755
Change-Id: I1a3ce16f0c872261d813b6531cfdf25bd59bb774
Diffstat (limited to 'vp8')
-rw-r--r-- | vp8/common/loongarch/loopfilter_filters_lsx.c | 393 | ||||
-rw-r--r-- | vp8/common/loongarch/sixtap_filter_lsx.c | 1164 | ||||
-rw-r--r-- | vp8/common/rtcd_defs.pl | 8 | ||||
-rw-r--r-- | vp8/vp8_common.mk | 4 |
4 files changed, 1565 insertions, 4 deletions
diff --git a/vp8/common/loongarch/loopfilter_filters_lsx.c b/vp8/common/loongarch/loopfilter_filters_lsx.c new file mode 100644 index 000000000..484b3d6ad --- /dev/null +++ b/vp8/common/loongarch/loopfilter_filters_lsx.c @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * Contributed by Lu Wang <wanglu@loongson.cn> + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vp8_rtcd.h" +#include "vp8/common/loopfilter.h" +#include "vpx_util/loongson_intrinsics.h" + +#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \ + { \ + __m128i p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \ + __m128i u, filt, t1, t2, filt_sign, q0_sub_p0; \ + __m128i filt_r, filt_l; \ + __m128i temp0, temp1, temp2, temp3; \ + const __m128i cnst4b = __lsx_vldi(4); \ + const __m128i cnst3b = __lsx_vldi(3); \ + const __m128i cnst9h = __lsx_vldi(1033); \ + const __m128i cnst63h = __lsx_vldi(1087); \ + \ + p2_m = __lsx_vxori_b(p2, 0x80); \ + p1_m = __lsx_vxori_b(p1, 0x80); \ + p0_m = __lsx_vxori_b(p0, 0x80); \ + q0_m = __lsx_vxori_b(q0, 0x80); \ + q1_m = __lsx_vxori_b(q1, 0x80); \ + q2_m = __lsx_vxori_b(q2, 0x80); \ + \ + filt = __lsx_vssub_b(p1_m, q1_m); \ + q0_sub_p0 = __lsx_vssub_b(q0_m, p0_m); \ + filt = __lsx_vsadd_b(filt, q0_sub_p0); \ + filt = __lsx_vsadd_b(filt, q0_sub_p0); \ + filt = __lsx_vsadd_b(filt, q0_sub_p0); \ + filt = __lsx_vand_v(filt, mask); \ + \ + t2 = __lsx_vand_v(filt, hev); \ + hev = __lsx_vxori_b(hev, 0xff); \ + filt = __lsx_vand_v(hev, filt); \ + t1 = __lsx_vsadd_b(t2, cnst4b); \ + t1 = __lsx_vsra_b(t1, cnst3b); \ + t2 = __lsx_vsadd_b(t2, cnst3b); \ + t2 = __lsx_vsra_b(t2, cnst3b); \ + q0_m = __lsx_vssub_b(q0_m, t1); \ + p0_m = __lsx_vsadd_b(p0_m, t2); \ + filt_sign = __lsx_vslti_b(filt, 0); \ + filt_r = __lsx_vilvl_b(filt_sign, filt); \ + filt_l = __lsx_vilvh_b(filt_sign, filt); \ + temp0 = __lsx_vmul_h(filt_r, cnst9h); \ + temp1 = __lsx_vadd_h(temp0, cnst63h); \ + temp2 = __lsx_vmul_h(filt_l, cnst9h); \ + temp3 = __lsx_vadd_h(temp2, cnst63h); \ + \ + u = __lsx_vssrani_b_h(temp3, temp1, 7); \ + q2_m = __lsx_vssub_b(q2_m, u); \ + p2_m = __lsx_vsadd_b(p2_m, u); \ + q2 = __lsx_vxori_b(q2_m, 0x80); \ + p2 = __lsx_vxori_b(p2_m, 0x80); \ + \ + temp1 = __lsx_vadd_h(temp1, temp0); \ + temp3 = __lsx_vadd_h(temp3, temp2); \ + \ + u = __lsx_vssrani_b_h(temp3, temp1, 7); \ + q1_m = __lsx_vssub_b(q1_m, u); \ + p1_m = __lsx_vsadd_b(p1_m, u); \ + q1 = __lsx_vxori_b(q1_m, 0x80); \ + p1 = __lsx_vxori_b(p1_m, 0x80); \ + \ + temp1 = __lsx_vadd_h(temp1, temp0); \ + temp3 = __lsx_vadd_h(temp3, temp2); \ + \ + u = __lsx_vssrani_b_h(temp3, temp1, 7); \ + q0_m = __lsx_vssub_b(q0_m, u); \ + p0_m = __lsx_vsadd_b(p0_m, u); \ + q0 = __lsx_vxori_b(q0_m, 0x80); \ + p0 = __lsx_vxori_b(p0_m, 0x80); \ + } + +#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \ + limit_in, b_limit_in, thresh_in, hev_out, mask_out, \ + flat_out) \ + { \ + __m128i p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ + __m128i p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ + \ + p3_asub_p2_m = __lsx_vabsd_bu(p3_in, p2_in); \ + p2_asub_p1_m = __lsx_vabsd_bu(p2_in, p1_in); \ + p1_asub_p0_m = __lsx_vabsd_bu(p1_in, p0_in); \ + q1_asub_q0_m = __lsx_vabsd_bu(q1_in, q0_in); \ + q2_asub_q1_m = __lsx_vabsd_bu(q2_in, q1_in); \ + q3_asub_q2_m = __lsx_vabsd_bu(q3_in, q2_in); \ + p0_asub_q0_m = __lsx_vabsd_bu(p0_in, q0_in); \ + p1_asub_q1_m = __lsx_vabsd_bu(p1_in, q1_in); \ + flat_out = __lsx_vmax_bu(p1_asub_p0_m, q1_asub_q0_m); \ + hev_out = __lsx_vslt_bu(thresh_in, flat_out); \ + p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p0_asub_q0_m); \ + p1_asub_q1_m = __lsx_vsrli_b(p1_asub_q1_m, 1); \ + p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p1_asub_q1_m); \ + mask_out = __lsx_vslt_bu(b_limit_in, p0_asub_q0_m); \ + mask_out = __lsx_vmax_bu(flat_out, mask_out); \ + p3_asub_p2_m = __lsx_vmax_bu(p3_asub_p2_m, p2_asub_p1_m); \ + mask_out = __lsx_vmax_bu(p3_asub_p2_m, mask_out); \ + q2_asub_q1_m = __lsx_vmax_bu(q2_asub_q1_m, q3_asub_q2_m); \ + mask_out = __lsx_vmax_bu(q2_asub_q1_m, mask_out); \ + mask_out = __lsx_vslt_bu(limit_in, mask_out); \ + mask_out = __lsx_vxori_b(mask_out, 0xff); \ + } + +#define VP8_ST6x1_B(in0, in0_idx, in1, in1_idx, pdst, stride) \ + { \ + __lsx_vstelm_w(in0, pdst, 0, in0_idx); \ + __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \ + } + +static inline void mbloop_filter_horizontal_edge_y_lsx( + uint8_t *src, int32_t pitch, const uint8_t b_limit_in, + const uint8_t limit_in, const uint8_t thresh_in) { + uint8_t *temp_src; + int32_t pitch_x2 = pitch << 1; + int32_t pitch_x3 = pitch_x2 + pitch; + int32_t pitch_x4 = pitch << 2; + + __m128i p3, p2, p1, p0, q3, q2, q1, q0; + __m128i mask, hev, flat, thresh, limit, b_limit; + + DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit); + thresh = __lsx_vldrepl_b(&thresh_in, 0); + + temp_src = src - pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, p3, p2, p1, p0); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + + temp_src = src - pitch_x3; + __lsx_vstx(p2, temp_src, 0); + __lsx_vstx(p1, temp_src, pitch); + __lsx_vstx(p0, temp_src, pitch_x2); + __lsx_vstx(q0, temp_src, pitch_x3); + temp_src += pitch_x4; + __lsx_vstx(q1, temp_src, 0); + __lsx_vstx(q2, temp_src, pitch); +} + +static inline void mbloop_filter_horizontal_edge_uv_lsx( + uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, + const uint8_t limit_in, const uint8_t thresh_in) { + uint8_t *temp_src; + int32_t pitch_x2 = pitch << 1; + int32_t pitch_x3 = pitch_x2 + pitch; + int32_t pitch_x4 = pitch << 2; + __m128i p3, p2, p1, p0, q3, q2, q1, q0; + __m128i mask, hev, flat, thresh, limit, b_limit; + __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; + __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; + + DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit); + thresh = __lsx_vldrepl_b(&thresh_in, 0); + + temp_src = src_u - pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, p3_u, p2_u, p1_u, p0_u); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, q0_u, q1_u, q2_u, q3_u); + temp_src = src_v - pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, p3_v, p2_v, p1_v, p0_v); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, q0_v, q1_v, q2_v, q3_v); + + DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, + p2, p1, p0); + DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, + q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + + src_u -= pitch_x3; + __lsx_vstelm_d(p2, src_u, 0, 0); + __lsx_vstelm_d(p1, src_u + pitch, 0, 0); + __lsx_vstelm_d(p0, src_u + pitch_x2, 0, 0); + __lsx_vstelm_d(q0, src_u + pitch_x3, 0, 0); + src_u += pitch_x4; + __lsx_vstelm_d(q1, src_u, 0, 0); + src_u += pitch; + __lsx_vstelm_d(q2, src_u, 0, 0); + + src_v -= pitch_x3; + __lsx_vstelm_d(p2, src_v, 0, 1); + __lsx_vstelm_d(p1, src_v + pitch, 0, 1); + __lsx_vstelm_d(p0, src_v + pitch_x2, 0, 1); + __lsx_vstelm_d(q0, src_v + pitch_x3, 0, 1); + src_v += pitch_x4; + __lsx_vstelm_d(q1, src_v, 0, 1); + src_v += pitch; + __lsx_vstelm_d(q2, src_v, 0, 1); +} + +static inline void mbloop_filter_vertical_edge_y_lsx(uint8_t *src, + int32_t pitch, + const uint8_t b_limit_in, + const uint8_t limit_in, + const uint8_t thresh_in) { + uint8_t *temp_src; + int32_t pitch_x2 = pitch << 1; + int32_t pitch_x3 = pitch_x2 + pitch; + int32_t pitch_x4 = pitch << 2; + + __m128i p3, p2, p1, p0, q3, q2, q1, q0; + __m128i mask, hev, flat, thresh, limit, b_limit; + __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8; + __m128i row9, row10, row11, row12, row13, row14, row15; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + + DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit); + thresh = __lsx_vldrepl_b(&thresh_in, 0); + temp_src = src - 4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, row0, row1, row2, row3); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, row4, row5, row6, row7); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, row8, row9, row10, row11); + temp_src += pitch_x4; + DUP4_ARG2(__lsx_vldx, temp_src, 0, temp_src, pitch, temp_src, pitch_x2, + temp_src, pitch_x3, row12, row13, row14, row15); + temp_src -= pitch_x4; + LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); + + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, tmp0, tmp1); + tmp3 = __lsx_vilvl_h(tmp1, tmp0); + tmp4 = __lsx_vilvh_h(tmp1, tmp0); + DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, tmp0, tmp1); + tmp6 = __lsx_vilvl_h(tmp1, tmp0); + tmp7 = __lsx_vilvh_h(tmp1, tmp0); + tmp2 = __lsx_vilvl_b(q2, q1); + tmp5 = __lsx_vilvh_b(q2, q1); + + temp_src = src - 3; + VP8_ST6x1_B(tmp3, 0, tmp2, 0, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp3, 1, tmp2, 1, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp3, 2, tmp2, 2, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp3, 3, tmp2, 3, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp4, 0, tmp2, 4, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp4, 1, tmp2, 5, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp4, 2, tmp2, 6, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp4, 3, tmp2, 7, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp6, 0, tmp5, 0, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp6, 1, tmp5, 1, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp6, 2, tmp5, 2, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp6, 3, tmp5, 3, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp7, 0, tmp5, 4, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp7, 1, tmp5, 5, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp7, 2, tmp5, 6, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_B(tmp7, 3, tmp5, 7, temp_src, 4); +} + +static inline void mbloop_filter_vertical_edge_uv_lsx( + uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, + const uint8_t limit_in, const uint8_t thresh_in) { + int32_t pitch_x2 = pitch << 1; + int32_t pitch_x3 = pitch_x2 + pitch; + int32_t pitch_x4 = pitch << 2; + __m128i p3, p2, p1, p0, q3, q2, q1, q0; + __m128i mask, hev, flat, thresh, limit, b_limit; + __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8; + __m128i row9, row10, row11, row12, row13, row14, row15; + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + + DUP2_ARG2(__lsx_vldrepl_b, &b_limit_in, 0, &limit_in, 0, b_limit, limit); + thresh = __lsx_vldrepl_b(&thresh_in, 0); + + src_u -= 4; + DUP4_ARG2(__lsx_vldx, src_u, 0, src_u, pitch, src_u, pitch_x2, src_u, + pitch_x3, row0, row1, row2, row3); + src_u += pitch_x4; + DUP4_ARG2(__lsx_vldx, src_u, 0, src_u, pitch, src_u, pitch_x2, src_u, + pitch_x3, row4, row5, row6, row7); + src_v -= 4; + DUP4_ARG2(__lsx_vldx, src_v, 0, src_v, pitch, src_v, pitch_x2, src_v, + pitch_x3, row8, row9, row10, row11); + src_v += pitch_x4; + DUP4_ARG2(__lsx_vldx, src_v, 0, src_v, pitch, src_v, pitch_x2, src_v, + pitch_x3, row12, row13, row14, row15); + LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); + + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + + DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, tmp0, tmp1); + tmp3 = __lsx_vilvl_h(tmp1, tmp0); + tmp4 = __lsx_vilvh_h(tmp1, tmp0); + DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, tmp0, tmp1); + tmp6 = __lsx_vilvl_h(tmp1, tmp0); + tmp7 = __lsx_vilvh_h(tmp1, tmp0); + tmp2 = __lsx_vilvl_b(q2, q1); + tmp5 = __lsx_vilvh_b(q2, q1); + + src_u += 1 - pitch_x4; + VP8_ST6x1_B(tmp3, 0, tmp2, 0, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp3, 1, tmp2, 1, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp3, 2, tmp2, 2, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp3, 3, tmp2, 3, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp4, 0, tmp2, 4, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp4, 1, tmp2, 5, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp4, 2, tmp2, 6, src_u, 4); + src_u += pitch; + VP8_ST6x1_B(tmp4, 3, tmp2, 7, src_u, 4); + + src_v += 1 - pitch_x4; + VP8_ST6x1_B(tmp6, 0, tmp5, 0, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp6, 1, tmp5, 1, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp6, 2, tmp5, 2, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp6, 3, tmp5, 3, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp7, 0, tmp5, 4, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp7, 1, tmp5, 5, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp7, 2, tmp5, 6, src_v, 4); + src_v += pitch; + VP8_ST6x1_B(tmp7, 3, tmp5, 7, src_v, 4); +} + +void vp8_loop_filter_mbh_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + mbloop_filter_horizontal_edge_y_lsx(src_y, pitch_y, *lpf_info_ptr->mblim, + *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + if (src_u) { + mbloop_filter_horizontal_edge_uv_lsx( + src_u, src_v, pitch_u_v, *lpf_info_ptr->mblim, *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + } +} + +void vp8_loop_filter_mbv_lsx(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + mbloop_filter_vertical_edge_y_lsx(src_y, pitch_y, *lpf_info_ptr->mblim, + *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr); + if (src_u) { + mbloop_filter_vertical_edge_uv_lsx(src_u, src_v, pitch_u_v, + *lpf_info_ptr->mblim, *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + } +} diff --git a/vp8/common/loongarch/sixtap_filter_lsx.c b/vp8/common/loongarch/sixtap_filter_lsx.c new file mode 100644 index 000000000..75fe533d9 --- /dev/null +++ b/vp8/common/loongarch/sixtap_filter_lsx.c @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2021 Loongson Technology Corporation Limited + * Contributed by Lu Wang <wanglu@loongson.cn> + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vp8_rtcd.h" +#include "vp8/common/filter.h" +#include "vpx_ports/mem.h" +#include "vpx_util/loongson_intrinsics.h" + +DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_lsx[7][8]) = { + { 0, -6, 123, 12, -1, 0, 0, 0 }, + { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -9, 93, 50, -6, 0, 0, 0 }, + { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */ + { 0, -6, 50, 93, -9, 0, 0, 0 }, + { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -1, 12, 123, -6, 0, 0, 0 }, +}; + +static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { + /* 8 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + /* 4 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, + /* 4 width cases */ + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 +}; + +#define DPADD_H3(in0, in1, in2, coeff0, coeff1, coeff2) \ + ({ \ + __m128i out0_m; \ + \ + out0_m = __lsx_vdp2_h_b(in0, coeff0); \ + out0_m = __lsx_vdp2add_h_b(out0_m, in1, coeff1); \ + out0_m = __lsx_vdp2add_h_b(out0_m, in2, coeff2); \ + \ + out0_m; \ + }) + +#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_h0, filt_h1, \ + filt_h2) \ + ({ \ + __m128i vec0_m, vec1_m, vec2_m; \ + __m128i hz_out_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src0, src1, mask0, src0, src1, mask1, vec0_m, \ + vec1_m); \ + vec2_m = __lsx_vshuf_b(src0, src1, mask2); \ + hz_out_m = DPADD_H3(vec0_m, vec1_m, vec2_m, filt_h0, filt_h1, filt_h2); \ + \ + hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __lsx_vsat_h(hz_out_m, 7); \ + \ + hz_out_m; \ + }) + +#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, filt0, filt1, filt2, out0, out1, \ + out2, out3) \ + ({ \ + __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, vec0_m, \ + vec1_m); \ + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2_m, \ + vec3_m); \ + DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \ + vec3_m, filt0, out0, out1, out2, out3); \ + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, vec0_m, \ + vec1_m); \ + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2_m, \ + vec3_m); \ + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, vec4_m, \ + vec5_m); \ + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2, vec6_m, \ + vec7_m); \ + DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \ + out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, \ + out3); \ + DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \ + out2, vec6_m, filt2, out3, vec7_m, filt2, out0, out1, out2, \ + out3); \ + }) + +#define FILT_4TAP_DPADD_H(vec0, vec1, filt0, filt1) \ + ({ \ + __m128i tmp0; \ + \ + tmp0 = __lsx_vdp2_h_b(vec0, filt0); \ + tmp0 = __lsx_vdp2add_h_b(tmp0, vec1, filt1); \ + \ + tmp0; \ + }) + +#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \ + ({ \ + __m128i vec0_m, vec1_m; \ + __m128i hz_out_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src0, src1, mask0, src0, src1, mask1, vec0_m, \ + vec1_m); \ + hz_out_m = FILT_4TAP_DPADD_H(vec0_m, vec1_m, filt_h0, filt_h1); \ + hz_out_m = __lsx_vsrari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __lsx_vsat_h(hz_out_m, 7); \ + \ + hz_out_m; \ + }) + +#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + filt0, filt1, out0, out1, out2, out3) \ + ({ \ + __m128i vec0_m, vec1_m, vec2_m, vec3_m; \ + \ + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, vec0_m, \ + vec1_m); \ + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2_m, \ + vec3_m); \ + DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \ + vec3_m, filt0, out0, out1, out2, out3); \ + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, vec0_m, \ + vec1_m); \ + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2_m, \ + vec3_m); \ + DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \ + out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, \ + out3); \ + }) + +static void common_hz_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, filt0, filt1, filt2; + __m128i mask0, mask1, mask2, tmp0, tmp1; + __m128i filt, out0, out1, out2, out3; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 2; + + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + filt2 = __lsx_vreplvei_h(filt, 2); + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src += src_stride_x4; + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1, out2, out3); + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + + for (loop_cnt = (height >> 2) - 1; loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src += src_stride_x4; + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + } +} + +static void common_hz_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2; + __m128i mask0, mask1, mask2, out; + __m128i filt, out0, out1, out2, out3, out4, out5, out6, out7; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 2; + + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + filt2 = __lsx_vreplvei_h(filt, 2); + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src2, src4, src6); + src += 8; + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src1, src3, src5, src7); + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128, src4, + src5, src6, src7); + src += src_stride_x4 - 8; + + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2, + filt0, filt1, filt2, out4, out5, out6, out7); + DUP4_ARG2(__lsx_vsrari_h, out0, VP8_FILTER_SHIFT, out1, VP8_FILTER_SHIFT, + out2, VP8_FILTER_SHIFT, out3, VP8_FILTER_SHIFT, out0, out1, out2, + out3); + DUP4_ARG2(__lsx_vsrari_h, out4, VP8_FILTER_SHIFT, out5, VP8_FILTER_SHIFT, + out6, VP8_FILTER_SHIFT, out7, VP8_FILTER_SHIFT, out4, out5, out6, + out7); + DUP4_ARG2(__lsx_vsat_h, out0, 7, out1, 7, out2, 7, out3, 7, out0, out1, + out2, out3); + DUP4_ARG2(__lsx_vsat_h, out4, 7, out5, 7, out6, 7, out7, 7, out4, out5, + out6, out7); + out = __lsx_vpickev_b(out1, out0); + out = __lsx_vxori_b(out, 128); + __lsx_vst(out, dst, 0); + out = __lsx_vpickev_b(out3, out2); + out = __lsx_vxori_b(out, 128); + __lsx_vstx(out, dst, dst_stride); + out = __lsx_vpickev_b(out5, out4); + out = __lsx_vxori_b(out, 128); + __lsx_vstx(out, dst, dst_stride_x2); + out = __lsx_vpickev_b(out7, out6); + out = __lsx_vxori_b(out, 128); + __lsx_vstx(out, dst, dst_stride_x3); + dst += dst_stride_x4; + } +} + +static void common_vt_6t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src7, src8, src9, src10; + __m128i src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r; + __m128i src109_r, filt0, filt1, filt2; + __m128i tmp0, tmp1; + __m128i filt, out0_r, out1_r, out2_r, out3_r; + + src -= src_stride_x2; + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + filt2 = __lsx_vreplvei_h(filt, 2); + + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + src += src_stride_x4; + src4 = __lsx_vld(src, 0); + src += src_stride; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src2, src1, src4, src3, + src10_r, src32_r, src21_r, src43_r); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src7, src8, src9, src10); + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128, src7, + src8, src9, src10); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vilvl_b, src7, src4, src8, src7, src9, src8, src10, src9, + src76_r, src87_r, src98_r, src109_r); + out0_r = DPADD_H3(src10_r, src32_r, src76_r, filt0, filt1, filt2); + out1_r = DPADD_H3(src21_r, src43_r, src87_r, filt0, filt1, filt2); + out2_r = DPADD_H3(src32_r, src76_r, src98_r, filt0, filt1, filt2); + out3_r = DPADD_H3(src43_r, src87_r, src109_r, filt0, filt1, filt2); + DUP2_ARG3(__lsx_vssrarni_b_h, out1_r, out0_r, VP8_FILTER_SHIFT, out3_r, + out2_r, VP8_FILTER_SHIFT, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + + src10_r = src76_r; + src32_r = src98_r; + src21_r = src87_r; + src43_r = src109_r; + src4 = src10; + } +} + +static void common_vt_6t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8; + __m128i src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + __m128i src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; + __m128i src65_l, src87_l, filt0, filt1, filt2; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; + + src -= src_stride_x2; + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + filt2 = __lsx_vreplvei_h(filt, 2); + + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + src += src_stride_x4; + src4 = __lsx_vldx(src, 0); + src += src_stride; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src4, src3, src2, src1, + src10_r, src32_r, src43_r, src21_r); + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src4, src3, src2, src1, + src10_l, src32_l, src43_l, src21_l); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src5, src6, src7, src8); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5, + src6, src7, src8); + DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7, + src54_r, src65_r, src76_r, src87_r); + DUP4_ARG2(__lsx_vilvh_b, src5, src4, src6, src5, src7, src6, src8, src7, + src54_l, src65_l, src76_l, src87_l); + out0_r = DPADD_H3(src10_r, src32_r, src54_r, filt0, filt1, filt2); + out1_r = DPADD_H3(src21_r, src43_r, src65_r, filt0, filt1, filt2); + out2_r = DPADD_H3(src32_r, src54_r, src76_r, filt0, filt1, filt2); + out3_r = DPADD_H3(src43_r, src65_r, src87_r, filt0, filt1, filt2); + out0_l = DPADD_H3(src10_l, src32_l, src54_l, filt0, filt1, filt2); + out1_l = DPADD_H3(src21_l, src43_l, src65_l, filt0, filt1, filt2); + out2_l = DPADD_H3(src32_l, src54_l, src76_l, filt0, filt1, filt2); + out3_l = DPADD_H3(src43_l, src65_l, src87_l, filt0, filt1, filt2); + DUP4_ARG3(__lsx_vssrarni_b_h, out0_l, out0_r, VP8_FILTER_SHIFT, out1_l, + out1_r, VP8_FILTER_SHIFT, out2_l, out2_r, VP8_FILTER_SHIFT, + out3_l, out3_r, VP8_FILTER_SHIFT, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp2, 128, tmp3, 128, tmp0, + tmp1, tmp2, tmp3); + __lsx_vstx(tmp0, dst, 0); + __lsx_vstx(tmp1, dst, dst_stride); + __lsx_vstx(tmp2, dst, dst_stride_x2); + __lsx_vstx(tmp3, dst, dst_stride_x3); + dst += dst_stride_x4; + + src10_r = src54_r; + src32_r = src76_r; + src21_r = src65_r; + src43_r = src87_r; + src10_l = src54_l; + src32_l = src76_l; + src21_l = src65_l; + src43_l = src87_l; + src4 = src8; + } +} + +static void common_hv_6ht_6vt_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8; + __m128i filt_hz0, filt_hz1, filt_hz2; + __m128i mask0, mask1, mask2, vec0, vec1; + __m128i filt, filt_vt0, filt_vt1, filt_vt2; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + __m128i tmp0, tmp1, tmp2, tmp3; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= (2 + src_stride_x2); + + filt = __lsx_vld(filter_horiz, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1); + filt_hz2 = __lsx_vreplvei_h(filt, 2); + + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + src += src_stride_x4; + src4 = __lsx_vldx(src, 0); + src += src_stride; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + filt = __lsx_vld(filter_vert, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1); + filt_vt2 = __lsx_vreplvei_h(filt, 2); + + DUP4_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, hz_out2, + hz_out1, hz_out4, hz_out3, out0, out1, out3, out4); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src5, src6, src7, src8); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5, + src6, src7, src8); + hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp0 = DPADD_H3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out5 = __lsx_vpackev_b(hz_out6, hz_out5); + tmp1 = DPADD_H3(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out7 = __lsx_vpackev_b(hz_out7, hz_out6); + tmp2 = DPADD_H3(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2); + + hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out6 = __lsx_vpackev_b(hz_out8, hz_out7); + tmp3 = DPADD_H3(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2); + + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, VP8_FILTER_SHIFT, tmp3, tmp2, + VP8_FILTER_SHIFT, vec0, vec1); + DUP2_ARG2(__lsx_vxori_b, vec0, 128, vec1, 128, vec0, vec1); + + __lsx_vstelm_d(vec0, dst, 0, 0); + __lsx_vstelm_d(vec0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(vec1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(vec1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + + hz_out4 = hz_out8; + out0 = out2; + out1 = out7; + out3 = out5; + out4 = out6; + } +} + +static void common_hv_6ht_6vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + common_hv_6ht_6vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + common_hv_6ht_6vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride, + filter_horiz, filter_vert, height); +} + +static void common_hz_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, filt0, filt1, mask0, mask1; + __m128i tmp0, tmp1; + __m128i filt, out0, out1, out2, out3; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 1; + + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + } +} + +static void common_hz_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, src7; + __m128i filt0, filt1, mask0, mask1; + __m128i filt, out0, out1, out2, out3, out4, out5, out6, out7; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 1; + + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src2, src4, src6); + src += 8; + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src1, src3, src5, src7); + src += src_stride_x4 - 8; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128, src4, + src5, src6, src7); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0, + filt1, out4, out5, out6, out7); + DUP4_ARG3(__lsx_vssrarni_b_h, out1, out0, VP8_FILTER_SHIFT, out3, out2, + VP8_FILTER_SHIFT, out5, out4, VP8_FILTER_SHIFT, out7, out6, + VP8_FILTER_SHIFT, out0, out1, out2, out3); + DUP4_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out2, 128, out3, 128, out0, + out1, out2, out3); + __lsx_vstx(out0, dst, 0); + __lsx_vstx(out1, dst, dst_stride); + __lsx_vstx(out2, dst, dst_stride_x2); + __lsx_vstx(out3, dst, dst_stride_x3); + dst += dst_stride_x4; + } +} + +static void common_vt_4t_8w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src7, src8, src9, src10; + __m128i src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1; + __m128i tmp0, tmp1; + __m128i filt, out0_r, out1_r, out2_r, out3_r; + + src -= src_stride; + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + + DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1); + src2 = __lsx_vldx(src, src_stride_x2); + src += src_stride_x3; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src7, src8, src9, src10); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128, src7, + src8, src9, src10); + DUP4_ARG2(__lsx_vilvl_b, src7, src2, src8, src7, src9, src8, src10, src9, + src72_r, src87_r, src98_r, src109_r); + out0_r = FILT_4TAP_DPADD_H(src10_r, src72_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_H(src21_r, src87_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_H(src72_r, src98_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_H(src87_r, src109_r, filt0, filt1); + DUP2_ARG3(__lsx_vssrarni_b_h, out1_r, out0_r, VP8_FILTER_SHIFT, out3_r, + out2_r, VP8_FILTER_SHIFT, tmp0, tmp1); + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1); + __lsx_vstelm_d(tmp0, dst, 0, 0); + __lsx_vstelm_d(tmp0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(tmp1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(tmp1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + + src10_r = src98_r; + src21_r = src109_r; + src2 = src10; + } +} + +static void common_vt_4t_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6; + __m128i src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l; + __m128i src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1; + __m128i tmp0, tmp1, tmp2, tmp3; + __m128i filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; + + src -= src_stride; + filt = __lsx_vld(filter, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt0, filt1); + + DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1); + src2 = __lsx_vldx(src, src_stride_x2); + src += src_stride_x3; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r); + DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src3, src4, src5, src6); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3, + src4, src5, src6); + DUP4_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src5, src4, src6, src5, + src32_r, src43_r, src54_r, src65_r); + DUP4_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src5, src4, src6, src5, + src32_l, src43_l, src54_l, src65_l); + out0_r = FILT_4TAP_DPADD_H(src10_r, src32_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_H(src21_r, src43_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_H(src32_r, src54_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_H(src43_r, src65_r, filt0, filt1); + out0_l = FILT_4TAP_DPADD_H(src10_l, src32_l, filt0, filt1); + out1_l = FILT_4TAP_DPADD_H(src21_l, src43_l, filt0, filt1); + out2_l = FILT_4TAP_DPADD_H(src32_l, src54_l, filt0, filt1); + out3_l = FILT_4TAP_DPADD_H(src43_l, src65_l, filt0, filt1); + DUP4_ARG3(__lsx_vssrarni_b_h, out0_l, out0_r, VP8_FILTER_SHIFT, out1_l, + out1_r, VP8_FILTER_SHIFT, out2_l, out2_r, VP8_FILTER_SHIFT, + out3_l, out3_r, VP8_FILTER_SHIFT, tmp0, tmp1, tmp2, tmp3); + DUP4_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp2, 128, tmp3, 128, tmp0, + tmp1, tmp2, tmp3); + __lsx_vstx(tmp0, dst, 0); + __lsx_vstx(tmp1, dst, dst_stride); + __lsx_vstx(tmp2, dst, dst_stride_x2); + __lsx_vstx(tmp3, dst, dst_stride_x3); + dst += dst_stride_x4; + + src10_r = src54_r; + src21_r = src65_r; + src10_l = src54_l; + src21_l = src65_l; + src2 = src6; + } +} + +static inline void common_hv_4ht_4vt_8w_lsx( + uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + __m128i mask0, mask1, out0, out1; + __m128i filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3; + __m128i hz_out0, hz_out1, hz_out2, hz_out3; + __m128i vec0, vec1, vec2, vec3, vec4; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 1 + src_stride; + + filt = __lsx_vld(filter_horiz, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1); + src2 = __lsx_vldx(src, src_stride_x2); + src += src_stride_x3; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2); + + filt = __lsx_vld(filter_vert, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src3, src4, src5, src6); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3, + src4, src5, src6); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + vec1 = __lsx_vpackev_b(hz_out3, hz_out2); + tmp0 = FILT_4TAP_DPADD_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + vec3 = __lsx_vpackev_b(hz_out0, hz_out3); + tmp1 = FILT_4TAP_DPADD_H(vec2, vec3, filt_vt0, filt_vt1); + + hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + vec4 = __lsx_vpackev_b(hz_out1, hz_out0); + tmp2 = FILT_4TAP_DPADD_H(vec1, vec4, filt_vt0, filt_vt1); + + hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec0, vec1); + tmp3 = FILT_4TAP_DPADD_H(vec0, vec1, filt_vt0, filt_vt1); + + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, out0, out1); + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1); + __lsx_vstelm_d(out0, dst, 0, 0); + __lsx_vstelm_d(out0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(out1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(out1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + + vec0 = vec4; + vec2 = vec1; + } +} + +static void common_hv_4ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + common_hv_4ht_4vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + common_hv_4ht_4vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride, + filter_horiz, filter_vert, height); +} + +static inline void common_hv_6ht_4vt_8w_lsx( + uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + + __m128i src0, src1, src2, src3, src4, src5, src6; + __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; + __m128i filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3; + __m128i tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3; + __m128i out0, out1; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= (2 + src_stride); + + filt = __lsx_vld(filter_horiz, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1); + filt_hz2 = __lsx_vreplvei_h(filt, 2); + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2); + + DUP2_ARG2(__lsx_vldx, src, 0, src, src_stride, src0, src1); + src2 = __lsx_vldx(src, src_stride_x2); + src += src_stride_x3; + + DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1); + src2 = __lsx_vxori_b(src2, 128); + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2); + + filt = __lsx_vld(filter_vert, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src3, src4, src5, src6); + src += src_stride_x4; + DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128, src3, + src4, src5, src6); + + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec1 = __lsx_vpackev_b(hz_out3, hz_out2); + tmp0 = FILT_4TAP_DPADD_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec3 = __lsx_vpackev_b(hz_out0, hz_out3); + tmp1 = FILT_4TAP_DPADD_H(vec2, vec3, filt_vt0, filt_vt1); + + hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec0 = __lsx_vpackev_b(hz_out1, hz_out0); + tmp2 = FILT_4TAP_DPADD_H(vec1, vec0, filt_vt0, filt_vt1); + + hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec1, vec2); + tmp3 = FILT_4TAP_DPADD_H(vec1, vec2, filt_vt0, filt_vt1); + + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, out0, out1); + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1); + __lsx_vstelm_d(out0, dst, 0, 0); + __lsx_vstelm_d(out0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(out1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(out1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + } +} + +static void common_hv_6ht_4vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + common_hv_6ht_4vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + common_hv_6ht_4vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride, + filter_horiz, filter_vert, height); +} + +static inline void common_hv_4ht_6vt_8w_lsx( + uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + int32_t src_stride_x2 = src_stride << 1; + int32_t src_stride_x3 = src_stride_x2 + src_stride; + int32_t src_stride_x4 = src_stride << 2; + int32_t dst_stride_x2 = dst_stride << 1; + int32_t dst_stride_x3 = dst_stride_x2 + dst_stride; + int32_t dst_stride_x4 = dst_stride << 2; + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8; + __m128i filt_hz0, filt_hz1, mask0, mask1; + __m128i filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3; + __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + __m128i vec0, vec1; + + mask0 = __lsx_vld(vp8_mc_filt_mask_arr, 0); + src -= 1 + src_stride_x2; + + filt = __lsx_vld(filter_horiz, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_hz0, filt_hz1); + mask1 = __lsx_vaddi_bu(mask0, 2); + + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src0, src1, src2, src3); + src += src_stride_x4; + src4 = __lsx_vld(src, 0); + src += src_stride; + + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0, + src1, src2, src3); + src4 = __lsx_vxori_b(src4, 128); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1); + DUP2_ARG2(__lsx_vpackev_b, hz_out2, hz_out1, hz_out4, hz_out3, out3, out4); + + filt = __lsx_vld(filter_vert, 0); + DUP2_ARG2(__lsx_vreplvei_h, filt, 0, filt, 1, filt_vt0, filt_vt1); + filt_vt2 = __lsx_vreplvei_h(filt, 2); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + DUP4_ARG2(__lsx_vldx, src, 0, src, src_stride, src, src_stride_x2, src, + src_stride_x3, src5, src6, src7, src8); + src += src_stride_x4; + + DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128, src5, + src6, src7, src8); + hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + out2 = __lsx_vpackev_b(hz_out5, hz_out4); + tmp0 = DPADD_H3(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + out5 = __lsx_vpackev_b(hz_out6, hz_out5); + tmp1 = DPADD_H3(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1); + out6 = __lsx_vpackev_b(hz_out7, hz_out6); + tmp2 = DPADD_H3(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2); + + hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1); + out7 = __lsx_vpackev_b(hz_out8, hz_out7); + tmp3 = DPADD_H3(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2); + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, vec0, vec1); + DUP2_ARG2(__lsx_vxori_b, vec0, 128, vec1, 128, vec0, vec1); + __lsx_vstelm_d(vec0, dst, 0, 0); + __lsx_vstelm_d(vec0, dst + dst_stride, 0, 1); + __lsx_vstelm_d(vec1, dst + dst_stride_x2, 0, 0); + __lsx_vstelm_d(vec1, dst + dst_stride_x3, 0, 1); + dst += dst_stride_x4; + hz_out4 = hz_out8; + out0 = out2; + out1 = out6; + out3 = out5; + out4 = out7; + } +} + +static void common_hv_4ht_6vt_16w_lsx(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) { + common_hv_4ht_6vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + common_hv_4ht_6vt_8w_lsx(src + 8, src_stride, dst + 8, dst_stride, + filter_horiz, filter_vert, height); +} + +typedef void (*PVp8SixtapPredictFunc1)( + uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, + int32_t height); + +typedef void (*PVp8SixtapPredictFunc2)(uint8_t *RESTRICT src, + int32_t src_stride, + uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter, + int32_t height); + +void vp8_sixtap_predict8x8_lsx(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1]; + + static PVp8SixtapPredictFunc1 Predict8x8Funcs1[4] = { + common_hv_6ht_6vt_8w_lsx, + common_hv_6ht_4vt_8w_lsx, + common_hv_4ht_6vt_8w_lsx, + common_hv_4ht_4vt_8w_lsx, + }; + + static PVp8SixtapPredictFunc2 Predict8x8Funcs2[4] = { common_vt_6t_8w_lsx, + common_vt_4t_8w_lsx, + common_hz_6t_8w_lsx, + common_hz_4t_8w_lsx }; + + if (yoffset < 8 && xoffset < 8) { + if (yoffset) { + if (xoffset) { + switch (xoffset & 1) { + case 0: + switch (yoffset & 1) { + case 0: + Predict8x8Funcs1[0](src, src_stride, dst, dst_stride, h_filter, + v_filter, 8); + break; + + case 1: + Predict8x8Funcs1[1](src, src_stride, dst, dst_stride, h_filter, + v_filter + 1, 8); + break; + } + break; + + case 1: + switch (yoffset & 1) { + case 0: + Predict8x8Funcs1[2](src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 8); + break; + + case 1: + Predict8x8Funcs1[3](src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 8); + break; + } + break; + } + } else { + switch (yoffset & 1) { + case 0: + Predict8x8Funcs2[0](src, src_stride, dst, dst_stride, v_filter, 8); + break; + + case 1: + Predict8x8Funcs2[1](src, src_stride, dst, dst_stride, v_filter + 1, + 8); + break; + } + } + } else { + switch (xoffset & 1) { + case 1: + Predict8x8Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1, + 8); + break; + } + switch (xoffset) { + case 0: vp8_copy_mem8x8(src, src_stride, dst, dst_stride); break; + case 2: + case 4: + case 6: + Predict8x8Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 8); + break; + } + } + } +} + +void vp8_sixtap_predict16x16_lsx(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_lsx[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_lsx[yoffset - 1]; + + static PVp8SixtapPredictFunc1 Predict16x16Funcs1[4] = { + common_hv_6ht_6vt_16w_lsx, + common_hv_6ht_4vt_16w_lsx, + common_hv_4ht_6vt_16w_lsx, + common_hv_4ht_4vt_16w_lsx, + }; + + static PVp8SixtapPredictFunc2 Predict16x16Funcs2[4] = { + common_vt_6t_16w_lsx, common_vt_4t_16w_lsx, common_hz_6t_16w_lsx, + common_hz_4t_16w_lsx + }; + + if (yoffset < 8 && xoffset < 8) { + if (yoffset) { + if (xoffset) { + switch (xoffset & 1) { + case 0: + switch (yoffset & 1) { + case 0: + Predict16x16Funcs1[0](src, src_stride, dst, dst_stride, + h_filter, v_filter, 16); + break; + + case 1: + Predict16x16Funcs1[1](src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 16); + break; + } + break; + + case 1: + switch (yoffset & 1) { + case 0: + Predict16x16Funcs1[2](src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 16); + break; + + case 1: + Predict16x16Funcs1[3](src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 16); + break; + } + break; + } + } else { + switch (yoffset & 1) { + case 0: + Predict16x16Funcs2[0](src, src_stride, dst, dst_stride, v_filter, + 16); + break; + + case 1: + Predict16x16Funcs2[1](src, src_stride, dst, dst_stride, + v_filter + 1, 16); + break; + } + } + } else { + switch (xoffset & 1) { + case 1: + Predict16x16Funcs2[3](src, src_stride, dst, dst_stride, h_filter + 1, + 16); + break; + } + switch (xoffset) { + case 0: vp8_copy_mem16x16(src, src_stride, dst, dst_stride); break; + case 2: + case 4: + case 6: + Predict16x16Funcs2[2](src, src_stride, dst, dst_stride, h_filter, 16); + break; + } + } + } +} diff --git a/vp8/common/rtcd_defs.pl b/vp8/common/rtcd_defs.pl index 8452b5e85..40117e367 100644 --- a/vp8/common/rtcd_defs.pl +++ b/vp8/common/rtcd_defs.pl @@ -47,13 +47,13 @@ specialize qw/vp8_dequant_idct_add_uv_block sse2 neon dspr2 msa mmi/; # Loopfilter # add_proto qw/void vp8_loop_filter_mbv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_mbv sse2 neon dspr2 msa mmi/; +specialize qw/vp8_loop_filter_mbv sse2 neon dspr2 msa mmi lsx/; add_proto qw/void vp8_loop_filter_bv/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi"; specialize qw/vp8_loop_filter_bv sse2 neon dspr2 msa mmi/; add_proto qw/void vp8_loop_filter_mbh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_mbh sse2 neon dspr2 msa mmi/; +specialize qw/vp8_loop_filter_mbh sse2 neon dspr2 msa mmi lsx/; add_proto qw/void vp8_loop_filter_bh/, "unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, struct loop_filter_info *lfi"; specialize qw/vp8_loop_filter_bh sse2 neon dspr2 msa mmi/; @@ -146,10 +146,10 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes") { # Subpixel # add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch"; -specialize qw/vp8_sixtap_predict16x16 sse2 ssse3 neon dspr2 msa mmi/; +specialize qw/vp8_sixtap_predict16x16 sse2 ssse3 neon dspr2 msa mmi lsx/; add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch"; -specialize qw/vp8_sixtap_predict8x8 sse2 ssse3 neon dspr2 msa mmi/; +specialize qw/vp8_sixtap_predict8x8 sse2 ssse3 neon dspr2 msa mmi lsx/; add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, unsigned char *dst_ptr, int dst_pitch"; specialize qw/vp8_sixtap_predict8x4 sse2 ssse3 neon dspr2 msa mmi/; diff --git a/vp8/vp8_common.mk b/vp8/vp8_common.mk index 286a93a05..909924ce8 100644 --- a/vp8/vp8_common.mk +++ b/vp8/vp8_common.mk @@ -124,6 +124,10 @@ ifeq ($(CONFIG_POSTPROC),yes) VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/mfqe_msa.c endif +# common (loongarch LSX intrinsics) +VP8_COMMON_SRCS-$(HAVE_LSX) += common/loongarch/loopfilter_filters_lsx.c +VP8_COMMON_SRCS-$(HAVE_LSX) += common/loongarch/sixtap_filter_lsx.c + # common (neon intrinsics) VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/loopfilter_arm.c VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/loopfilter_arm.h |