1/* 2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include "./vp8_rtcd.h" 12#include "vp8/common/blockd.h" 13#include "vp8/common/mips/msa/vp8_macros_msa.h" 14 15static const int32_t cospi8sqrt2minus1 = 20091; 16static const int32_t sinpi8sqrt2 = 35468; 17 18#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ 19 { \ 20 v8i16 s4_m, s5_m, s6_m, s7_m; \ 21 \ 22 TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \ 23 ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \ 24 out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \ 25 out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \ 26 } 27 28#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \ 29 ({ \ 30 v8i16 out_m; \ 31 v8i16 zero_m = { 0 }; \ 32 v4i32 tmp1_m, tmp2_m; \ 33 v4i32 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ 34 \ 35 ILVRL_H2_SW(in, zero_m, tmp1_m, tmp2_m); \ 36 tmp1_m >>= 16; \ 37 tmp2_m >>= 16; \ 38 tmp1_m = (tmp1_m * sinpi8_sqrt2_m) >> 16; \ 39 tmp2_m = (tmp2_m * sinpi8_sqrt2_m) >> 16; \ 40 out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \ 41 \ 42 out_m; \ 43 }) 44 45#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \ 46 { \ 47 v8i16 a1_m, b1_m, c1_m, d1_m; \ 48 v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ 49 v8i16 const_cospi8sqrt2minus1_m; \ 50 \ 51 const_cospi8sqrt2minus1_m = __msa_fill_h(cospi8sqrt2minus1); \ 52 a1_m = in0 + in2; \ 53 b1_m = in0 - in2; \ 54 c_tmp1_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1); \ 55 c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \ 56 c_tmp2_m = c_tmp2_m >> 1; \ 57 c_tmp2_m = in3 + c_tmp2_m; \ 58 c1_m = c_tmp1_m - c_tmp2_m; \ 59 d_tmp1_m = __msa_mul_q_h(in1, const_cospi8sqrt2minus1_m); \ 60 d_tmp1_m = d_tmp1_m >> 1; \ 61 d_tmp1_m = in1 + d_tmp1_m; \ 62 d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \ 63 d1_m = d_tmp1_m + d_tmp2_m; \ 64 BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ 65 } 66 67#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \ 68 { \ 69 v4i32 a1_m, b1_m, c1_m, d1_m; \ 70 v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ 71 v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \ 72 \ 73 const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \ 74 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ 75 a1_m = in0 + in2; \ 76 b1_m = in0 - in2; \ 77 c_tmp1_m = (in1 * sinpi8_sqrt2_m) >> 16; \ 78 c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \ 79 c1_m = c_tmp1_m - c_tmp2_m; \ 80 d_tmp1_m = in1 + ((in1 * const_cospi8sqrt2minus1_m) >> 16); \ 81 d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \ 82 d1_m = d_tmp1_m + d_tmp2_m; \ 83 BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ 84 } 85 86static void idct4x4_addblk_msa(int16_t *input, uint8_t *pred, 87 int32_t pred_stride, uint8_t *dest, 88 int32_t dest_stride) { 89 v8i16 input0, input1; 90 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; 91 v4i32 res0, res1, res2, res3; 92 v16i8 zero = { 0 }; 93 v16i8 pred0, pred1, pred2, pred3; 94 95 LD_SH2(input, 8, input0, input1); 96 UNPCK_SH_SW(input0, in0, in1); 97 UNPCK_SH_SW(input1, in2, in3); 98 VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); 99 TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); 100 VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); 101 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); 102 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); 103 LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); 104 ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, 105 res2, res3); 106 ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2, 107 res3); 108 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); 109 res0 = CLIP_SW_0_255(res0); 110 res1 = CLIP_SW_0_255(res1); 111 res2 = CLIP_SW_0_255(res2); 112 res3 = CLIP_SW_0_255(res3); 113 PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); 114 res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); 115 ST4x4_UB(res0, res0, 3, 2, 1, 0, dest, dest_stride); 116} 117 118static void idct4x4_addconst_msa(int16_t in_dc, uint8_t *pred, 119 int32_t pred_stride, uint8_t *dest, 120 int32_t dest_stride) { 121 v8i16 vec, res0, res1, res2, res3, dst0, dst1; 122 v16i8 zero = { 0 }; 123 v16i8 pred0, pred1, pred2, pred3; 124 125 vec = __msa_fill_h(in_dc); 126 vec = __msa_srari_h(vec, 3); 127 LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); 128 ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, 129 res2, res3); 130 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); 131 CLIP_SH4_0_255(res0, res1, res2, res3); 132 PCKEV_B2_SH(res1, res0, res3, res2, dst0, dst1); 133 dst0 = (v8i16)__msa_pckev_w((v4i32)dst1, (v4i32)dst0); 134 ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dest, dest_stride); 135} 136 137void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff) { 138 v8i16 input0, input1, tmp0, tmp1, tmp2, tmp3, out0, out1; 139 const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; 140 const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; 141 const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 }; 142 const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 }; 143 144 LD_SH2(input, 8, input0, input1); 145 input1 = (v8i16)__msa_sldi_b((v16i8)input1, (v16i8)input1, 8); 146 tmp0 = input0 + input1; 147 tmp1 = input0 - input1; 148 VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); 149 out0 = tmp2 + tmp3; 150 out1 = tmp2 - tmp3; 151 VSHF_H2_SH(out0, out1, out0, out1, mask2, mask3, input0, input1); 152 tmp0 = input0 + input1; 153 tmp1 = input0 - input1; 154 VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); 155 tmp0 = tmp2 + tmp3; 156 tmp1 = tmp2 - tmp3; 157 ADD2(tmp0, 3, tmp1, 3, out0, out1); 158 out0 >>= 3; 159 out1 >>= 3; 160 mb_dq_coeff[0] = __msa_copy_s_h(out0, 0); 161 mb_dq_coeff[16] = __msa_copy_s_h(out0, 4); 162 mb_dq_coeff[32] = __msa_copy_s_h(out1, 0); 163 mb_dq_coeff[48] = __msa_copy_s_h(out1, 4); 164 mb_dq_coeff[64] = __msa_copy_s_h(out0, 1); 165 mb_dq_coeff[80] = __msa_copy_s_h(out0, 5); 166 mb_dq_coeff[96] = __msa_copy_s_h(out1, 1); 167 mb_dq_coeff[112] = __msa_copy_s_h(out1, 5); 168 mb_dq_coeff[128] = __msa_copy_s_h(out0, 2); 169 mb_dq_coeff[144] = __msa_copy_s_h(out0, 6); 170 mb_dq_coeff[160] = __msa_copy_s_h(out1, 2); 171 mb_dq_coeff[176] = __msa_copy_s_h(out1, 6); 172 mb_dq_coeff[192] = __msa_copy_s_h(out0, 3); 173 mb_dq_coeff[208] = __msa_copy_s_h(out0, 7); 174 mb_dq_coeff[224] = __msa_copy_s_h(out1, 3); 175 mb_dq_coeff[240] = __msa_copy_s_h(out1, 7); 176} 177 178static void dequant_idct4x4_addblk_msa(int16_t *input, int16_t *dequant_input, 179 uint8_t *dest, int32_t dest_stride) { 180 v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1; 181 v8i16 in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h; 182 v16u8 dest0, dest1, dest2, dest3; 183 v4i32 hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3, res0, res1, res2, res3; 184 v2i64 zero = { 0 }; 185 186 LD_SH2(input, 8, input0, input1); 187 LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); 188 MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1); 189 PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2); 190 PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3); 191 VP8_IDCT_1D_H(in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h); 192 PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1); 193 UNPCK_SH_SW(mul0, hz0_w, hz1_w); 194 UNPCK_SH_SW(mul1, hz2_w, hz3_w); 195 TRANSPOSE4x4_SW_SW(hz0_w, hz1_w, hz2_w, hz3_w, hz0_w, hz1_w, hz2_w, hz3_w); 196 VP8_IDCT_1D_W(hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3); 197 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); 198 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); 199 LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); 200 ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, 201 res2, res3); 202 ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2, 203 res3); 204 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); 205 res0 = CLIP_SW_0_255(res0); 206 res1 = CLIP_SW_0_255(res1); 207 res2 = CLIP_SW_0_255(res2); 208 res3 = CLIP_SW_0_255(res3); 209 PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); 210 res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); 211 ST4x4_UB(res0, res0, 3, 2, 1, 0, dest, dest_stride); 212} 213 214static void dequant_idct4x4_addblk_2x_msa(int16_t *input, 215 int16_t *dequant_input, uint8_t *dest, 216 int32_t dest_stride) { 217 v16u8 dest0, dest1, dest2, dest3; 218 v8i16 in0, in1, in2, in3, mul0, mul1, mul2, mul3, dequant_in0, dequant_in1; 219 v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3, res0, res1, res2, res3; 220 v4i32 hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r; 221 v4i32 vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r; 222 v16i8 zero = { 0 }; 223 224 LD_SH4(input, 8, in0, in1, in2, in3); 225 LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); 226 MUL4(in0, dequant_in0, in1, dequant_in1, in2, dequant_in0, in3, dequant_in1, 227 mul0, mul1, mul2, mul3); 228 PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2); 229 PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3); 230 VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3); 231 TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); 232 UNPCK_SH_SW(hz0, hz0r, hz0l); 233 UNPCK_SH_SW(hz1, hz1r, hz1l); 234 UNPCK_SH_SW(hz2, hz2r, hz2l); 235 UNPCK_SH_SW(hz3, hz3r, hz3l); 236 VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l); 237 SRARI_W4_SW(vt0l, vt1l, vt2l, vt3l, 3); 238 VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r); 239 SRARI_W4_SW(vt0r, vt1r, vt2r, vt3r, 3); 240 PCKEV_H4_SH(vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r, vt0, vt1, vt2, 241 vt3); 242 TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); 243 LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); 244 ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, 245 res2, res3); 246 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); 247 CLIP_SH4_0_255(res0, res1, res2, res3); 248 PCKEV_B2_SW(res1, res0, res3, res2, vt0l, vt1l); 249 ST8x4_UB(vt0l, vt1l, dest, dest_stride); 250 251 __asm__ __volatile__( 252 "sw $zero, 0(%[input]) \n\t" 253 "sw $zero, 4(%[input]) \n\t" 254 "sw $zero, 8(%[input]) \n\t" 255 "sw $zero, 12(%[input]) \n\t" 256 "sw $zero, 16(%[input]) \n\t" 257 "sw $zero, 20(%[input]) \n\t" 258 "sw $zero, 24(%[input]) \n\t" 259 "sw $zero, 28(%[input]) \n\t" 260 "sw $zero, 32(%[input]) \n\t" 261 "sw $zero, 36(%[input]) \n\t" 262 "sw $zero, 40(%[input]) \n\t" 263 "sw $zero, 44(%[input]) \n\t" 264 "sw $zero, 48(%[input]) \n\t" 265 "sw $zero, 52(%[input]) \n\t" 266 "sw $zero, 56(%[input]) \n\t" 267 "sw $zero, 60(%[input]) \n\t" :: 268 269 [input] "r"(input)); 270} 271 272static void dequant_idct_addconst_2x_msa(int16_t *input, int16_t *dequant_input, 273 uint8_t *dest, int32_t dest_stride) { 274 v8i16 input_dc0, input_dc1, vec, res0, res1, res2, res3; 275 v16u8 dest0, dest1, dest2, dest3; 276 v16i8 zero = { 0 }; 277 278 input_dc0 = __msa_fill_h(input[0] * dequant_input[0]); 279 input_dc1 = __msa_fill_h(input[16] * dequant_input[0]); 280 SRARI_H2_SH(input_dc0, input_dc1, 3); 281 vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0); 282 input[0] = 0; 283 input[16] = 0; 284 LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); 285 ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, 286 res2, res3); 287 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); 288 CLIP_SH4_0_255(res0, res1, res2, res3); 289 PCKEV_B2_SH(res1, res0, res3, res2, res0, res1); 290 ST8x4_UB(res0, res1, dest, dest_stride); 291} 292 293void vp8_short_idct4x4llm_msa(int16_t *input, uint8_t *pred_ptr, 294 int32_t pred_stride, uint8_t *dst_ptr, 295 int32_t dst_stride) { 296 idct4x4_addblk_msa(input, pred_ptr, pred_stride, dst_ptr, dst_stride); 297} 298 299void vp8_dc_only_idct_add_msa(int16_t input_dc, uint8_t *pred_ptr, 300 int32_t pred_stride, uint8_t *dst_ptr, 301 int32_t dst_stride) { 302 idct4x4_addconst_msa(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride); 303} 304 305void vp8_dequantize_b_msa(BLOCKD *d, int16_t *DQC) { 306 v8i16 dqc0, dqc1, q0, q1, dq0, dq1; 307 308 LD_SH2(DQC, 8, dqc0, dqc1); 309 LD_SH2(d->qcoeff, 8, q0, q1); 310 MUL2(dqc0, q0, dqc1, q1, dq0, dq1); 311 ST_SH2(dq0, dq1, d->dqcoeff, 8); 312} 313 314void vp8_dequant_idct_add_msa(int16_t *input, int16_t *dq, uint8_t *dest, 315 int32_t stride) { 316 dequant_idct4x4_addblk_msa(input, dq, dest, stride); 317 318 __asm__ __volatile__( 319 "sw $zero, 0(%[input]) \n\t" 320 "sw $zero, 4(%[input]) \n\t" 321 "sw $zero, 8(%[input]) \n\t" 322 "sw $zero, 12(%[input]) \n\t" 323 "sw $zero, 16(%[input]) \n\t" 324 "sw $zero, 20(%[input]) \n\t" 325 "sw $zero, 24(%[input]) \n\t" 326 "sw $zero, 28(%[input]) \n\t" 327 328 : 329 : [input] "r"(input)); 330} 331 332void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq, uint8_t *dst, 333 int32_t stride, char *eobs) { 334 int16_t *eobs_h = (int16_t *)eobs; 335 uint8_t i; 336 337 for (i = 4; i--;) { 338 if (eobs_h[0]) { 339 if (eobs_h[0] & 0xfefe) { 340 dequant_idct4x4_addblk_2x_msa(q, dq, dst, stride); 341 } else { 342 dequant_idct_addconst_2x_msa(q, dq, dst, stride); 343 } 344 } 345 346 q += 32; 347 348 if (eobs_h[1]) { 349 if (eobs_h[1] & 0xfefe) { 350 dequant_idct4x4_addblk_2x_msa(q, dq, dst + 8, stride); 351 } else { 352 dequant_idct_addconst_2x_msa(q, dq, dst + 8, stride); 353 } 354 } 355 356 q += 32; 357 dst += (4 * stride); 358 eobs_h += 2; 359 } 360} 361 362void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dstu, 363 uint8_t *dstv, int32_t stride, 364 char *eobs) { 365 int16_t *eobs_h = (int16_t *)eobs; 366 367 if (eobs_h[0]) { 368 if (eobs_h[0] & 0xfefe) { 369 dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); 370 } else { 371 dequant_idct_addconst_2x_msa(q, dq, dstu, stride); 372 } 373 } 374 375 q += 32; 376 dstu += (stride * 4); 377 378 if (eobs_h[1]) { 379 if (eobs_h[1] & 0xfefe) { 380 dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); 381 } else { 382 dequant_idct_addconst_2x_msa(q, dq, dstu, stride); 383 } 384 } 385 386 q += 32; 387 388 if (eobs_h[2]) { 389 if (eobs_h[2] & 0xfefe) { 390 dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); 391 } else { 392 dequant_idct_addconst_2x_msa(q, dq, dstv, stride); 393 } 394 } 395 396 q += 32; 397 dstv += (stride * 4); 398 399 if (eobs_h[3]) { 400 if (eobs_h[3] & 0xfefe) { 401 dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); 402 } else { 403 dequant_idct_addconst_2x_msa(q, dq, dstv, stride); 404 } 405 } 406} 407