1/*
2 *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10#include <stdlib.h>
11
12#include "./vpx_dsp_rtcd.h"
13#include "vpx_dsp/mips/macros_msa.h"
14
15uint32_t vpx_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
16  uint32_t sum_out;
17  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
18  v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
19  v4u32 sum = { 0 };
20
21  LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
22  HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3);
23  HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7);
24  ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6);
25  ADD2(sum0, sum2, sum4, sum6, sum0, sum4);
26  sum0 += sum4;
27
28  sum = __msa_hadd_u_w(sum0, sum0);
29  sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum);
30  sum = __msa_hadd_u_w(sum0, sum0);
31  sum = (v4u32)__msa_srari_w((v4i32)sum, 6);
32  sum_out = __msa_copy_u_w((v4i32)sum, 0);
33
34  return sum_out;
35}
36
37uint32_t vpx_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
38  uint32_t sum_out;
39  uint32_t src0, src1, src2, src3;
40  v16u8 vec = { 0 };
41  v8u16 sum0;
42  v4u32 sum1;
43  v2u64 sum2;
44
45  LW4(src, src_stride, src0, src1, src2, src3);
46  INSERT_W4_UB(src0, src1, src2, src3, vec);
47
48  sum0 = __msa_hadd_u_h(vec, vec);
49  sum1 = __msa_hadd_u_w(sum0, sum0);
50  sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1);
51  sum1 = __msa_hadd_u_w(sum0, sum0);
52  sum2 = __msa_hadd_u_d(sum1, sum1);
53  sum1 = (v4u32)__msa_srari_w((v4i32)sum2, 4);
54  sum_out = __msa_copy_u_w((v4i32)sum1, 0);
55
56  return sum_out;
57}
58
59void vpx_hadamard_8x8_msa(const int16_t *src, int src_stride, int16_t *dst) {
60  v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
61  v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
62
63  LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
64  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
65              tmp6, tmp7, tmp5, tmp3, tmp1);
66  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
67              src5, src7, src6, src3, src2);
68  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
69              tmp4, tmp5, tmp1, tmp6, tmp2);
70  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
71                     src2, src3, src4, src5, src6, src7);
72  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
73              tmp6, tmp7, tmp5, tmp3, tmp1);
74  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
75              src5, src7, src6, src3, src2);
76  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
77              tmp4, tmp5, tmp1, tmp6, tmp2);
78  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
79                     src2, src3, src4, src5, src6, src7);
80  ST_SH8(src0, src1, src2, src3, src4, src5, src6, src7, dst, 8);
81}
82
83void vpx_hadamard_16x16_msa(const int16_t *src, int src_stride, int16_t *dst) {
84  v8i16 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
85  v8i16 src11, src12, src13, src14, src15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
86  v8i16 tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
87  v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
88
89  LD_SH2(src, 8, src0, src8);
90  src += src_stride;
91  LD_SH2(src, 8, src1, src9);
92  src += src_stride;
93  LD_SH2(src, 8, src2, src10);
94  src += src_stride;
95  LD_SH2(src, 8, src3, src11);
96  src += src_stride;
97  LD_SH2(src, 8, src4, src12);
98  src += src_stride;
99  LD_SH2(src, 8, src5, src13);
100  src += src_stride;
101  LD_SH2(src, 8, src6, src14);
102  src += src_stride;
103  LD_SH2(src, 8, src7, src15);
104  src += src_stride;
105
106  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
107              tmp6, tmp7, tmp5, tmp3, tmp1);
108  BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10,
109              tmp12, tmp14, tmp15, tmp13, tmp11, tmp9);
110
111  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
112              src5, src7, src6, src3, src2);
113  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
114              tmp4, tmp5, tmp1, tmp6, tmp2);
115  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
116                     src2, src3, src4, src5, src6, src7);
117  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
118              tmp6, tmp7, tmp5, tmp3, tmp1);
119  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
120              src5, src7, src6, src3, src2);
121  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
122              tmp4, tmp5, tmp1, tmp6, tmp2);
123  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
124                     src2, src11, src4, src5, src6, src7);
125  ST_SH8(src0, src1, src2, src11, src4, src5, src6, src7, dst, 8);
126
127  BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9,
128              src12, src13, src15, src14, src11, src10);
129  BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15,
130              tmp11, tmp12, tmp13, tmp9, tmp14, tmp10);
131  TRANSPOSE8x8_SH_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, src8,
132                     src9, src10, src11, src12, src13, src14, src15);
133  BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10,
134              tmp12, tmp14, tmp15, tmp13, tmp11, tmp9);
135  BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9,
136              src12, src13, src15, src14, src11, src10);
137  BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15,
138              tmp11, tmp12, tmp13, tmp9, tmp14, tmp10);
139  TRANSPOSE8x8_SH_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, res0,
140                     res1, res2, res3, res4, res5, res6, res7);
141
142  LD_SH2(src, 8, src0, src8);
143  src += src_stride;
144  LD_SH2(src, 8, src1, src9);
145  src += src_stride;
146  LD_SH2(src, 8, src2, src10);
147  src += src_stride;
148  LD_SH2(src, 8, src3, src11);
149  src += src_stride;
150
151  ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 64, 8);
152
153  LD_SH2(src, 8, src4, src12);
154  src += src_stride;
155  LD_SH2(src, 8, src5, src13);
156  src += src_stride;
157  LD_SH2(src, 8, src6, src14);
158  src += src_stride;
159  LD_SH2(src, 8, src7, src15);
160  src += src_stride;
161
162  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
163              tmp6, tmp7, tmp5, tmp3, tmp1);
164  BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10,
165              tmp12, tmp14, tmp15, tmp13, tmp11, tmp9);
166
167  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
168              src5, src7, src6, src3, src2);
169  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
170              tmp4, tmp5, tmp1, tmp6, tmp2);
171  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
172                     src2, src3, src4, src5, src6, src7);
173  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
174              tmp6, tmp7, tmp5, tmp3, tmp1);
175  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
176              src5, src7, src6, src3, src2);
177  BUTTERFLY_8(src0, src1, src2, src3, src7, src6, src5, src4, tmp0, tmp7, tmp3,
178              tmp4, tmp5, tmp1, tmp6, tmp2);
179  TRANSPOSE8x8_SH_SH(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, src0, src1,
180                     src2, src3, src4, src5, src6, src7);
181  ST_SH8(src0, src1, src2, src3, src4, src5, src6, src7, dst + 2 * 64, 8);
182
183  BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9,
184              src12, src13, src15, src14, src11, src10);
185  BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15,
186              tmp11, tmp12, tmp13, tmp9, tmp14, tmp10);
187  TRANSPOSE8x8_SH_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, src8,
188                     src9, src10, src11, src12, src13, src14, src15);
189  BUTTERFLY_8(src8, src10, src12, src14, src15, src13, src11, src9, tmp8, tmp10,
190              tmp12, tmp14, tmp15, tmp13, tmp11, tmp9);
191  BUTTERFLY_8(tmp8, tmp9, tmp12, tmp13, tmp15, tmp14, tmp11, tmp10, src8, src9,
192              src12, src13, src15, src14, src11, src10);
193  BUTTERFLY_8(src8, src9, src10, src11, src15, src14, src13, src12, tmp8, tmp15,
194              tmp11, tmp12, tmp13, tmp9, tmp14, tmp10);
195  TRANSPOSE8x8_SH_SH(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, res0,
196                     res1, res2, res3, res4, res5, res6, res7);
197  ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 3 * 64, 8);
198
199  LD_SH4(dst, 64, src0, src1, src2, src3);
200  LD_SH4(dst + 8, 64, src4, src5, src6, src7);
201
202  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
203              tmp6, tmp7, tmp5, tmp3, tmp1);
204  SRA_4V(tmp0, tmp1, tmp2, tmp3, 1);
205  SRA_4V(tmp4, tmp5, tmp6, tmp7, 1);
206  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
207              src5, src7, src6, src3, src2);
208
209  ST_SH4(src0, src1, src2, src3, dst, 64);
210  ST_SH4(src4, src5, src6, src7, dst + 8, 64);
211  dst += 16;
212
213  LD_SH4(dst, 64, src0, src1, src2, src3);
214  LD_SH4(dst + 8, 64, src4, src5, src6, src7);
215
216  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
217              tmp6, tmp7, tmp5, tmp3, tmp1);
218  SRA_4V(tmp0, tmp1, tmp2, tmp3, 1);
219  SRA_4V(tmp4, tmp5, tmp6, tmp7, 1);
220  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
221              src5, src7, src6, src3, src2);
222
223  ST_SH4(src0, src1, src2, src3, dst, 64);
224  ST_SH4(src4, src5, src6, src7, dst + 8, 64);
225  dst += 16;
226
227  LD_SH4(dst, 64, src0, src1, src2, src3);
228  LD_SH4(dst + 8, 64, src4, src5, src6, src7);
229
230  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
231              tmp6, tmp7, tmp5, tmp3, tmp1);
232  SRA_4V(tmp0, tmp1, tmp2, tmp3, 1);
233  SRA_4V(tmp4, tmp5, tmp6, tmp7, 1);
234  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
235              src5, src7, src6, src3, src2);
236
237  ST_SH4(src0, src1, src2, src3, dst, 64);
238  ST_SH4(src4, src5, src6, src7, dst + 8, 64);
239  dst += 16;
240
241  LD_SH4(dst, 64, src0, src1, src2, src3);
242  LD_SH4(dst + 8, 64, src4, src5, src6, src7);
243
244  BUTTERFLY_8(src0, src2, src4, src6, src7, src5, src3, src1, tmp0, tmp2, tmp4,
245              tmp6, tmp7, tmp5, tmp3, tmp1);
246  SRA_4V(tmp0, tmp1, tmp2, tmp3, 1);
247  SRA_4V(tmp4, tmp5, tmp6, tmp7, 1);
248  BUTTERFLY_8(tmp0, tmp1, tmp4, tmp5, tmp7, tmp6, tmp3, tmp2, src0, src1, src4,
249              src5, src7, src6, src3, src2);
250
251  ST_SH4(src0, src1, src2, src3, dst, 64);
252  ST_SH4(src4, src5, src6, src7, dst + 8, 64);
253}
254
255int vpx_satd_msa(const int16_t *data, int length) {
256  int i, satd;
257  v8i16 src0, src1, src2, src3, src4, src5, src6, src7;
258  v8i16 src8, src9, src10, src11, src12, src13, src14, src15;
259  v8i16 zero = { 0 };
260  v8u16 tmp0_h, tmp1_h, tmp2_h, tmp3_h, tmp4_h, tmp5_h, tmp6_h, tmp7_h;
261  v4u32 tmp0_w = { 0 };
262
263  if (16 == length) {
264    LD_SH2(data, 8, src0, src1);
265    tmp0_h = (v8u16)__msa_asub_s_h(src0, zero);
266    tmp1_h = (v8u16)__msa_asub_s_h(src1, zero);
267    tmp0_w = __msa_hadd_u_w(tmp0_h, tmp0_h);
268    tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
269    satd = HADD_UW_U32(tmp0_w);
270  } else if (64 == length) {
271    LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7);
272
273    tmp0_h = (v8u16)__msa_asub_s_h(src0, zero);
274    tmp1_h = (v8u16)__msa_asub_s_h(src1, zero);
275    tmp2_h = (v8u16)__msa_asub_s_h(src2, zero);
276    tmp3_h = (v8u16)__msa_asub_s_h(src3, zero);
277    tmp4_h = (v8u16)__msa_asub_s_h(src4, zero);
278    tmp5_h = (v8u16)__msa_asub_s_h(src5, zero);
279    tmp6_h = (v8u16)__msa_asub_s_h(src6, zero);
280    tmp7_h = (v8u16)__msa_asub_s_h(src7, zero);
281
282    tmp0_w = __msa_hadd_u_w(tmp0_h, tmp0_h);
283    tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
284    tmp0_w += __msa_hadd_u_w(tmp2_h, tmp2_h);
285    tmp0_w += __msa_hadd_u_w(tmp3_h, tmp3_h);
286    tmp0_w += __msa_hadd_u_w(tmp4_h, tmp4_h);
287    tmp0_w += __msa_hadd_u_w(tmp5_h, tmp5_h);
288    tmp0_w += __msa_hadd_u_w(tmp6_h, tmp6_h);
289    tmp0_w += __msa_hadd_u_w(tmp7_h, tmp7_h);
290
291    satd = HADD_UW_U32(tmp0_w);
292  } else if (256 == length) {
293    for (i = 0; i < 2; ++i) {
294      LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7);
295      data += 8 * 8;
296      LD_SH8(data, 8, src8, src9, src10, src11, src12, src13, src14, src15);
297      data += 8 * 8;
298
299      tmp0_h = (v8u16)__msa_asub_s_h(src0, zero);
300      tmp1_h = (v8u16)__msa_asub_s_h(src1, zero);
301      tmp2_h = (v8u16)__msa_asub_s_h(src2, zero);
302      tmp3_h = (v8u16)__msa_asub_s_h(src3, zero);
303      tmp4_h = (v8u16)__msa_asub_s_h(src4, zero);
304      tmp5_h = (v8u16)__msa_asub_s_h(src5, zero);
305      tmp6_h = (v8u16)__msa_asub_s_h(src6, zero);
306      tmp7_h = (v8u16)__msa_asub_s_h(src7, zero);
307
308      tmp0_w += __msa_hadd_u_w(tmp0_h, tmp0_h);
309      tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
310      tmp0_w += __msa_hadd_u_w(tmp2_h, tmp2_h);
311      tmp0_w += __msa_hadd_u_w(tmp3_h, tmp3_h);
312      tmp0_w += __msa_hadd_u_w(tmp4_h, tmp4_h);
313      tmp0_w += __msa_hadd_u_w(tmp5_h, tmp5_h);
314      tmp0_w += __msa_hadd_u_w(tmp6_h, tmp6_h);
315      tmp0_w += __msa_hadd_u_w(tmp7_h, tmp7_h);
316
317      tmp0_h = (v8u16)__msa_asub_s_h(src8, zero);
318      tmp1_h = (v8u16)__msa_asub_s_h(src9, zero);
319      tmp2_h = (v8u16)__msa_asub_s_h(src10, zero);
320      tmp3_h = (v8u16)__msa_asub_s_h(src11, zero);
321      tmp4_h = (v8u16)__msa_asub_s_h(src12, zero);
322      tmp5_h = (v8u16)__msa_asub_s_h(src13, zero);
323      tmp6_h = (v8u16)__msa_asub_s_h(src14, zero);
324      tmp7_h = (v8u16)__msa_asub_s_h(src15, zero);
325
326      tmp0_w += __msa_hadd_u_w(tmp0_h, tmp0_h);
327      tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
328      tmp0_w += __msa_hadd_u_w(tmp2_h, tmp2_h);
329      tmp0_w += __msa_hadd_u_w(tmp3_h, tmp3_h);
330      tmp0_w += __msa_hadd_u_w(tmp4_h, tmp4_h);
331      tmp0_w += __msa_hadd_u_w(tmp5_h, tmp5_h);
332      tmp0_w += __msa_hadd_u_w(tmp6_h, tmp6_h);
333      tmp0_w += __msa_hadd_u_w(tmp7_h, tmp7_h);
334    }
335
336    satd = HADD_UW_U32(tmp0_w);
337  } else if (1024 == length) {
338    for (i = 0; i < 8; ++i) {
339      LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7);
340      data += 8 * 8;
341      LD_SH8(data, 8, src8, src9, src10, src11, src12, src13, src14, src15);
342      data += 8 * 8;
343
344      tmp0_h = (v8u16)__msa_asub_s_h(src0, zero);
345      tmp1_h = (v8u16)__msa_asub_s_h(src1, zero);
346      tmp2_h = (v8u16)__msa_asub_s_h(src2, zero);
347      tmp3_h = (v8u16)__msa_asub_s_h(src3, zero);
348      tmp4_h = (v8u16)__msa_asub_s_h(src4, zero);
349      tmp5_h = (v8u16)__msa_asub_s_h(src5, zero);
350      tmp6_h = (v8u16)__msa_asub_s_h(src6, zero);
351      tmp7_h = (v8u16)__msa_asub_s_h(src7, zero);
352
353      tmp0_w += __msa_hadd_u_w(tmp0_h, tmp0_h);
354      tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
355      tmp0_w += __msa_hadd_u_w(tmp2_h, tmp2_h);
356      tmp0_w += __msa_hadd_u_w(tmp3_h, tmp3_h);
357      tmp0_w += __msa_hadd_u_w(tmp4_h, tmp4_h);
358      tmp0_w += __msa_hadd_u_w(tmp5_h, tmp5_h);
359      tmp0_w += __msa_hadd_u_w(tmp6_h, tmp6_h);
360      tmp0_w += __msa_hadd_u_w(tmp7_h, tmp7_h);
361
362      tmp0_h = (v8u16)__msa_asub_s_h(src8, zero);
363      tmp1_h = (v8u16)__msa_asub_s_h(src9, zero);
364      tmp2_h = (v8u16)__msa_asub_s_h(src10, zero);
365      tmp3_h = (v8u16)__msa_asub_s_h(src11, zero);
366      tmp4_h = (v8u16)__msa_asub_s_h(src12, zero);
367      tmp5_h = (v8u16)__msa_asub_s_h(src13, zero);
368      tmp6_h = (v8u16)__msa_asub_s_h(src14, zero);
369      tmp7_h = (v8u16)__msa_asub_s_h(src15, zero);
370
371      tmp0_w += __msa_hadd_u_w(tmp0_h, tmp0_h);
372      tmp0_w += __msa_hadd_u_w(tmp1_h, tmp1_h);
373      tmp0_w += __msa_hadd_u_w(tmp2_h, tmp2_h);
374      tmp0_w += __msa_hadd_u_w(tmp3_h, tmp3_h);
375      tmp0_w += __msa_hadd_u_w(tmp4_h, tmp4_h);
376      tmp0_w += __msa_hadd_u_w(tmp5_h, tmp5_h);
377      tmp0_w += __msa_hadd_u_w(tmp6_h, tmp6_h);
378      tmp0_w += __msa_hadd_u_w(tmp7_h, tmp7_h);
379    }
380
381    satd = HADD_UW_U32(tmp0_w);
382  } else {
383    satd = 0;
384
385    for (i = 0; i < length; ++i) {
386      satd += abs(data[i]);
387    }
388  }
389
390  return satd;
391}
392
393void vpx_int_pro_row_msa(int16_t hbuf[16], const uint8_t *ref,
394                         const int ref_stride, const int height) {
395  int i;
396  v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7;
397  v8i16 hbuf_r = { 0 };
398  v8i16 hbuf_l = { 0 };
399  v8i16 ref0_r, ref0_l, ref1_r, ref1_l, ref2_r, ref2_l, ref3_r, ref3_l;
400  v8i16 ref4_r, ref4_l, ref5_r, ref5_l, ref6_r, ref6_l, ref7_r, ref7_l;
401
402  if (16 == height) {
403    for (i = 2; i--;) {
404      LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
405      ref += 8 * ref_stride;
406      UNPCK_UB_SH(ref0, ref0_r, ref0_l);
407      UNPCK_UB_SH(ref1, ref1_r, ref1_l);
408      UNPCK_UB_SH(ref2, ref2_r, ref2_l);
409      UNPCK_UB_SH(ref3, ref3_r, ref3_l);
410      UNPCK_UB_SH(ref4, ref4_r, ref4_l);
411      UNPCK_UB_SH(ref5, ref5_r, ref5_l);
412      UNPCK_UB_SH(ref6, ref6_r, ref6_l);
413      UNPCK_UB_SH(ref7, ref7_r, ref7_l);
414      ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l,
415           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
416      ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l,
417           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
418      ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l,
419           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
420      ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l,
421           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
422    }
423
424    SRA_2V(hbuf_r, hbuf_l, 3);
425    ST_SH2(hbuf_r, hbuf_l, hbuf, 8);
426  } else if (32 == height) {
427    for (i = 2; i--;) {
428      LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
429      ref += 8 * ref_stride;
430      UNPCK_UB_SH(ref0, ref0_r, ref0_l);
431      UNPCK_UB_SH(ref1, ref1_r, ref1_l);
432      UNPCK_UB_SH(ref2, ref2_r, ref2_l);
433      UNPCK_UB_SH(ref3, ref3_r, ref3_l);
434      UNPCK_UB_SH(ref4, ref4_r, ref4_l);
435      UNPCK_UB_SH(ref5, ref5_r, ref5_l);
436      UNPCK_UB_SH(ref6, ref6_r, ref6_l);
437      UNPCK_UB_SH(ref7, ref7_r, ref7_l);
438      ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l,
439           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
440      ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l,
441           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
442      ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l,
443           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
444      ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l,
445           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
446      LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
447      ref += 8 * ref_stride;
448      UNPCK_UB_SH(ref0, ref0_r, ref0_l);
449      UNPCK_UB_SH(ref1, ref1_r, ref1_l);
450      UNPCK_UB_SH(ref2, ref2_r, ref2_l);
451      UNPCK_UB_SH(ref3, ref3_r, ref3_l);
452      UNPCK_UB_SH(ref4, ref4_r, ref4_l);
453      UNPCK_UB_SH(ref5, ref5_r, ref5_l);
454      UNPCK_UB_SH(ref6, ref6_r, ref6_l);
455      UNPCK_UB_SH(ref7, ref7_r, ref7_l);
456      ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l,
457           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
458      ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l,
459           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
460      ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l,
461           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
462      ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l,
463           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
464    }
465
466    SRA_2V(hbuf_r, hbuf_l, 4);
467    ST_SH2(hbuf_r, hbuf_l, hbuf, 8);
468  } else if (64 == height) {
469    for (i = 4; i--;) {
470      LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
471      ref += 8 * ref_stride;
472      UNPCK_UB_SH(ref0, ref0_r, ref0_l);
473      UNPCK_UB_SH(ref1, ref1_r, ref1_l);
474      UNPCK_UB_SH(ref2, ref2_r, ref2_l);
475      UNPCK_UB_SH(ref3, ref3_r, ref3_l);
476      UNPCK_UB_SH(ref4, ref4_r, ref4_l);
477      UNPCK_UB_SH(ref5, ref5_r, ref5_l);
478      UNPCK_UB_SH(ref6, ref6_r, ref6_l);
479      UNPCK_UB_SH(ref7, ref7_r, ref7_l);
480      ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l,
481           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
482      ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l,
483           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
484      ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l,
485           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
486      ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l,
487           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
488      LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
489      ref += 8 * ref_stride;
490      UNPCK_UB_SH(ref0, ref0_r, ref0_l);
491      UNPCK_UB_SH(ref1, ref1_r, ref1_l);
492      UNPCK_UB_SH(ref2, ref2_r, ref2_l);
493      UNPCK_UB_SH(ref3, ref3_r, ref3_l);
494      UNPCK_UB_SH(ref4, ref4_r, ref4_l);
495      UNPCK_UB_SH(ref5, ref5_r, ref5_l);
496      UNPCK_UB_SH(ref6, ref6_r, ref6_l);
497      UNPCK_UB_SH(ref7, ref7_r, ref7_l);
498      ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l,
499           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
500      ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l,
501           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
502      ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l,
503           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
504      ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l,
505           hbuf_r, hbuf_l, hbuf_r, hbuf_l);
506    }
507
508    SRA_2V(hbuf_r, hbuf_l, 5);
509    ST_SH2(hbuf_r, hbuf_l, hbuf, 8);
510  } else {
511    const int norm_factor = height >> 1;
512    int cnt;
513
514    for (cnt = 0; cnt < 16; cnt++) {
515      hbuf[cnt] = 0;
516    }
517
518    for (i = 0; i < height; ++i) {
519      for (cnt = 0; cnt < 16; cnt++) {
520        hbuf[cnt] += ref[cnt];
521      }
522
523      ref += ref_stride;
524    }
525
526    for (cnt = 0; cnt < 16; cnt++) {
527      hbuf[cnt] /= norm_factor;
528    }
529  }
530}
531
532int16_t vpx_int_pro_col_msa(const uint8_t *ref, const int width) {
533  int16_t sum;
534  v16u8 ref0, ref1, ref2, ref3;
535  v8u16 ref0_h;
536
537  if (16 == width) {
538    ref0 = LD_UB(ref);
539    ref0_h = __msa_hadd_u_h(ref0, ref0);
540    sum = HADD_UH_U32(ref0_h);
541  } else if (32 == width) {
542    LD_UB2(ref, 16, ref0, ref1);
543    ref0_h = __msa_hadd_u_h(ref0, ref0);
544    ref0_h += __msa_hadd_u_h(ref1, ref1);
545    sum = HADD_UH_U32(ref0_h);
546  } else if (64 == width) {
547    LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
548    ref0_h = __msa_hadd_u_h(ref0, ref0);
549    ref0_h += __msa_hadd_u_h(ref1, ref1);
550    ref0_h += __msa_hadd_u_h(ref2, ref2);
551    ref0_h += __msa_hadd_u_h(ref3, ref3);
552    sum = HADD_UH_U32(ref0_h);
553  } else {
554    int idx;
555
556    sum = 0;
557    for (idx = 0; idx < width; ++idx) {
558      sum += ref[idx];
559    }
560  }
561
562  return sum;
563}
564
565int vpx_vector_var_msa(const int16_t *ref, const int16_t *src, const int bwl) {
566  int sse, mean, var;
567  v8i16 src0, src1, src2, src3, src4, src5, src6, src7, ref0, ref1, ref2;
568  v8i16 ref3, ref4, ref5, ref6, ref7, src_l0_m, src_l1_m, src_l2_m, src_l3_m;
569  v8i16 src_l4_m, src_l5_m, src_l6_m, src_l7_m;
570  v4i32 res_l0_m, res_l1_m, res_l2_m, res_l3_m, res_l4_m, res_l5_m, res_l6_m;
571  v4i32 res_l7_m, mean_v;
572  v2i64 sse_v;
573
574  if (2 == bwl) {
575    LD_SH2(src, 8, src0, src1);
576    LD_SH2(ref, 8, ref0, ref1);
577
578    ILVRL_H2_SH(src0, ref0, src_l0_m, src_l1_m);
579    ILVRL_H2_SH(src1, ref1, src_l2_m, src_l3_m);
580    HSUB_UH2_SW(src_l0_m, src_l1_m, res_l0_m, res_l1_m);
581    HSUB_UH2_SW(src_l2_m, src_l3_m, res_l2_m, res_l3_m);
582    sse_v = __msa_dotp_s_d(res_l0_m, res_l0_m);
583    sse_v = __msa_dpadd_s_d(sse_v, res_l1_m, res_l1_m);
584    DPADD_SD2_SD(res_l2_m, res_l3_m, sse_v, sse_v);
585    mean_v = res_l0_m + res_l1_m;
586    mean_v += res_l2_m + res_l3_m;
587
588    sse_v += __msa_splati_d(sse_v, 1);
589    sse = __msa_copy_s_w((v4i32)sse_v, 0);
590
591    mean = HADD_SW_S32(mean_v);
592  } else if (3 == bwl) {
593    LD_SH4(src, 8, src0, src1, src2, src3);
594    LD_SH4(ref, 8, ref0, ref1, ref2, ref3);
595
596    ILVRL_H2_SH(src0, ref0, src_l0_m, src_l1_m);
597    ILVRL_H2_SH(src1, ref1, src_l2_m, src_l3_m);
598    ILVRL_H2_SH(src2, ref2, src_l4_m, src_l5_m);
599    ILVRL_H2_SH(src3, ref3, src_l6_m, src_l7_m);
600    HSUB_UH2_SW(src_l0_m, src_l1_m, res_l0_m, res_l1_m);
601    HSUB_UH2_SW(src_l2_m, src_l3_m, res_l2_m, res_l3_m);
602    HSUB_UH2_SW(src_l4_m, src_l5_m, res_l4_m, res_l5_m);
603    HSUB_UH2_SW(src_l6_m, src_l7_m, res_l6_m, res_l7_m);
604    sse_v = __msa_dotp_s_d(res_l0_m, res_l0_m);
605    sse_v = __msa_dpadd_s_d(sse_v, res_l1_m, res_l1_m);
606    DPADD_SD2_SD(res_l2_m, res_l3_m, sse_v, sse_v);
607    DPADD_SD2_SD(res_l4_m, res_l5_m, sse_v, sse_v);
608    DPADD_SD2_SD(res_l6_m, res_l7_m, sse_v, sse_v);
609    mean_v = res_l0_m + res_l1_m;
610    mean_v += res_l2_m + res_l3_m;
611    mean_v += res_l4_m + res_l5_m;
612    mean_v += res_l6_m + res_l7_m;
613
614    sse_v += __msa_splati_d(sse_v, 1);
615    sse = __msa_copy_s_w((v4i32)sse_v, 0);
616
617    mean = HADD_SW_S32(mean_v);
618  } else if (4 == bwl) {
619    LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7);
620    LD_SH8(ref, 8, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7);
621
622    ILVRL_H2_SH(src0, ref0, src_l0_m, src_l1_m);
623    ILVRL_H2_SH(src1, ref1, src_l2_m, src_l3_m);
624    ILVRL_H2_SH(src2, ref2, src_l4_m, src_l5_m);
625    ILVRL_H2_SH(src3, ref3, src_l6_m, src_l7_m);
626    HSUB_UH2_SW(src_l0_m, src_l1_m, res_l0_m, res_l1_m);
627    HSUB_UH2_SW(src_l2_m, src_l3_m, res_l2_m, res_l3_m);
628    HSUB_UH2_SW(src_l4_m, src_l5_m, res_l4_m, res_l5_m);
629    HSUB_UH2_SW(src_l6_m, src_l7_m, res_l6_m, res_l7_m);
630    sse_v = __msa_dotp_s_d(res_l0_m, res_l0_m);
631    sse_v = __msa_dpadd_s_d(sse_v, res_l1_m, res_l1_m);
632    DPADD_SD2_SD(res_l2_m, res_l3_m, sse_v, sse_v);
633    DPADD_SD2_SD(res_l4_m, res_l5_m, sse_v, sse_v);
634    DPADD_SD2_SD(res_l6_m, res_l7_m, sse_v, sse_v);
635    mean_v = res_l0_m + res_l1_m;
636    mean_v += res_l2_m + res_l3_m;
637    mean_v += res_l4_m + res_l5_m;
638    mean_v += res_l6_m + res_l7_m;
639
640    ILVRL_H2_SH(src4, ref4, src_l0_m, src_l1_m);
641    ILVRL_H2_SH(src5, ref5, src_l2_m, src_l3_m);
642    ILVRL_H2_SH(src6, ref6, src_l4_m, src_l5_m);
643    ILVRL_H2_SH(src7, ref7, src_l6_m, src_l7_m);
644    HSUB_UH2_SW(src_l0_m, src_l1_m, res_l0_m, res_l1_m);
645    HSUB_UH2_SW(src_l2_m, src_l3_m, res_l2_m, res_l3_m);
646    HSUB_UH2_SW(src_l4_m, src_l5_m, res_l4_m, res_l5_m);
647    HSUB_UH2_SW(src_l6_m, src_l7_m, res_l6_m, res_l7_m);
648    DPADD_SD2_SD(res_l0_m, res_l1_m, sse_v, sse_v);
649    DPADD_SD2_SD(res_l2_m, res_l3_m, sse_v, sse_v);
650    DPADD_SD2_SD(res_l4_m, res_l5_m, sse_v, sse_v);
651    DPADD_SD2_SD(res_l6_m, res_l7_m, sse_v, sse_v);
652    mean_v += res_l0_m + res_l1_m;
653    mean_v += res_l2_m + res_l3_m;
654    mean_v += res_l4_m + res_l5_m;
655    mean_v += res_l6_m + res_l7_m;
656
657    sse_v += __msa_splati_d(sse_v, 1);
658    sse = __msa_copy_s_w((v4i32)sse_v, 0);
659
660    mean = HADD_SW_S32(mean_v);
661  } else {
662    int i;
663    const int width = 4 << bwl;
664
665    sse = 0;
666    mean = 0;
667
668    for (i = 0; i < width; ++i) {
669      const int diff = ref[i] - src[i];
670
671      mean += diff;
672      sse += diff * diff;
673    }
674  }
675
676  var = sse - ((mean * mean) >> (bwl + 2));
677
678  return var;
679}
680
681void vpx_minmax_8x8_msa(const uint8_t *s, int p, const uint8_t *d, int dp,
682                        int *min, int *max) {
683  v16u8 s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7;
684  v16u8 diff0, diff1, diff2, diff3, min0, min1, max0, max1;
685
686  LD_UB8(s, p, s0, s1, s2, s3, s4, s5, s6, s7);
687  LD_UB8(d, dp, d0, d1, d2, d3, d4, d5, d6, d7);
688  PCKEV_D4_UB(s1, s0, s3, s2, s5, s4, s7, s6, s0, s1, s2, s3);
689  PCKEV_D4_UB(d1, d0, d3, d2, d5, d4, d7, d6, d0, d1, d2, d3);
690
691  diff0 = __msa_asub_u_b(s0, d0);
692  diff1 = __msa_asub_u_b(s1, d1);
693  diff2 = __msa_asub_u_b(s2, d2);
694  diff3 = __msa_asub_u_b(s3, d3);
695
696  min0 = __msa_min_u_b(diff0, diff1);
697  min1 = __msa_min_u_b(diff2, diff3);
698  min0 = __msa_min_u_b(min0, min1);
699
700  max0 = __msa_max_u_b(diff0, diff1);
701  max1 = __msa_max_u_b(diff2, diff3);
702  max0 = __msa_max_u_b(max0, max1);
703
704  min1 = (v16u8)__msa_sldi_b((v16i8)min1, (v16i8)min0, 8);
705  min0 = __msa_min_u_b(min0, min1);
706  max1 = (v16u8)__msa_sldi_b((v16i8)max1, (v16i8)max0, 8);
707  max0 = __msa_max_u_b(max0, max1);
708
709  min1 = (v16u8)__msa_sldi_b((v16i8)min1, (v16i8)min0, 4);
710  min0 = __msa_min_u_b(min0, min1);
711  max1 = (v16u8)__msa_sldi_b((v16i8)max1, (v16i8)max0, 4);
712  max0 = __msa_max_u_b(max0, max1);
713
714  min1 = (v16u8)__msa_sldi_b((v16i8)min1, (v16i8)min0, 2);
715  min0 = __msa_min_u_b(min0, min1);
716  max1 = (v16u8)__msa_sldi_b((v16i8)max1, (v16i8)max0, 2);
717  max0 = __msa_max_u_b(max0, max1);
718
719  min1 = (v16u8)__msa_sldi_b((v16i8)min1, (v16i8)min0, 1);
720  min0 = __msa_min_u_b(min0, min1);
721  max1 = (v16u8)__msa_sldi_b((v16i8)max1, (v16i8)max0, 1);
722  max0 = __msa_max_u_b(max0, max1);
723
724  *min = min0[0];
725  *max = max0[0];
726}
727