idct32x32_msa.c revision 7ce0a1d1337c01056ba24006efab21f00e179e04
1/*
2 *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "vpx_dsp/mips/inv_txfm_msa.h"
12
13static void idct32x8_row_transpose_store(const int16_t *input,
14                                         int16_t *tmp_buf) {
15  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
16
17  /* 1st & 2nd 8x8 */
18  LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3);
19  LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7);
20  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
21                     m0, n0, m1, n1, m2, n2, m3, n3);
22  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
23                     m4, n4, m5, n5, m6, n6, m7, n7);
24  ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8);
25  ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8);
26  ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8);
27
28  /* 3rd & 4th 8x8 */
29  LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3);
30  LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7);
31  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
32                     m0, n0, m1, n1, m2, n2, m3, n3);
33  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
34                     m4, n4, m5, n5, m6, n6, m7, n7);
35  ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8);
36  ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8);
37  ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8);
38  ST_SH4(m6, n6, m7, n7, (tmp_buf + 28 * 8), 8);
39}
40
41static void idct32x8_row_even_process_store(int16_t *tmp_buf,
42                                            int16_t *tmp_eve_buf) {
43  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
44  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
45  v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
46
47  /* Even stage 1 */
48  LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
49
50  DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
51  DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
52  BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
53  DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
54
55  loc1 = vec3;
56  loc0 = vec1;
57
58  DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
59  DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
60  BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
61  BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
62  BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
63
64  /* Even stage 2 */
65  LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
66  DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
67  DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
68  DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
69  DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
70
71  vec0 = reg0 + reg4;
72  reg0 = reg0 - reg4;
73  reg4 = reg6 + reg2;
74  reg6 = reg6 - reg2;
75  reg2 = reg1 + reg5;
76  reg1 = reg1 - reg5;
77  reg5 = reg7 + reg3;
78  reg7 = reg7 - reg3;
79  reg3 = vec0;
80
81  vec1 = reg2;
82  reg2 = reg3 + reg4;
83  reg3 = reg3 - reg4;
84  reg4 = reg5 - vec1;
85  reg5 = reg5 + vec1;
86
87  DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
88  DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
89
90  vec0 = reg0 - reg6;
91  reg0 = reg0 + reg6;
92  vec1 = reg7 - reg1;
93  reg7 = reg7 + reg1;
94
95  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
96  DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
97
98  /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
99  BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
100  ST_SH(loc0, (tmp_eve_buf + 15 * 8));
101  ST_SH(loc1, (tmp_eve_buf));
102  ST_SH(loc2, (tmp_eve_buf + 14 * 8));
103  ST_SH(loc3, (tmp_eve_buf + 8));
104
105  BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
106  ST_SH(loc0, (tmp_eve_buf + 13 * 8));
107  ST_SH(loc1, (tmp_eve_buf + 2 * 8));
108  ST_SH(loc2, (tmp_eve_buf + 12 * 8));
109  ST_SH(loc3, (tmp_eve_buf + 3 * 8));
110
111  /* Store 8 */
112  BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
113  ST_SH(loc0, (tmp_eve_buf + 11 * 8));
114  ST_SH(loc1, (tmp_eve_buf + 4 * 8));
115  ST_SH(loc2, (tmp_eve_buf + 10 * 8));
116  ST_SH(loc3, (tmp_eve_buf + 5 * 8));
117
118  BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
119  ST_SH(loc0, (tmp_eve_buf + 9 * 8));
120  ST_SH(loc1, (tmp_eve_buf + 6 * 8));
121  ST_SH(loc2, (tmp_eve_buf + 8 * 8));
122  ST_SH(loc3, (tmp_eve_buf + 7 * 8));
123}
124
125static void idct32x8_row_odd_process_store(int16_t *tmp_buf,
126                                           int16_t *tmp_odd_buf) {
127  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
128  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
129
130  /* Odd stage 1 */
131  reg0 = LD_SH(tmp_buf + 8);
132  reg1 = LD_SH(tmp_buf + 7 * 8);
133  reg2 = LD_SH(tmp_buf + 9 * 8);
134  reg3 = LD_SH(tmp_buf + 15 * 8);
135  reg4 = LD_SH(tmp_buf + 17 * 8);
136  reg5 = LD_SH(tmp_buf + 23 * 8);
137  reg6 = LD_SH(tmp_buf + 25 * 8);
138  reg7 = LD_SH(tmp_buf + 31 * 8);
139
140  DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
141  DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
142  DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
143  DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
144
145  vec0 = reg0 + reg3;
146  reg0 = reg0 - reg3;
147  reg3 = reg7 + reg4;
148  reg7 = reg7 - reg4;
149  reg4 = reg1 + reg2;
150  reg1 = reg1 - reg2;
151  reg2 = reg6 + reg5;
152  reg6 = reg6 - reg5;
153  reg5 = vec0;
154
155  /* 4 Stores */
156  ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
157  ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
158
159  SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
160  DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
161  ST_SH2(vec0, vec1, (tmp_odd_buf), 8);
162
163  /* 4 Stores */
164  DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
165  DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
166  BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
167  ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
168
169  DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
170  ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
171
172  /* Odd stage 2 */
173  /* 8 loads */
174  reg0 = LD_SH(tmp_buf + 3 * 8);
175  reg1 = LD_SH(tmp_buf + 5 * 8);
176  reg2 = LD_SH(tmp_buf + 11 * 8);
177  reg3 = LD_SH(tmp_buf + 13 * 8);
178  reg4 = LD_SH(tmp_buf + 19 * 8);
179  reg5 = LD_SH(tmp_buf + 21 * 8);
180  reg6 = LD_SH(tmp_buf + 27 * 8);
181  reg7 = LD_SH(tmp_buf + 29 * 8);
182
183  DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
184  DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
185  DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
186  DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
187
188  /* 4 Stores */
189  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
190       vec0, vec1, vec2, vec3);
191  DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
192  DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
193
194  BUTTERFLY_4(loc3, loc2, loc0, loc1, vec1, vec0, vec2, vec3);
195  ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
196
197  DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
198  ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
199
200  /* 4 Stores */
201  ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
202       vec1, vec2, vec0, vec3);
203  BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
204  ST_SH(reg0, (tmp_odd_buf + 13 * 8));
205  ST_SH(reg1, (tmp_odd_buf + 14 * 8));
206
207  DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
208  ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
209
210  /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
211
212  /* Load 8 & Store 8 */
213  LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
214  LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
215
216  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
217       loc0, loc1, loc2, loc3);
218  ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
219
220  SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
221  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
222
223  SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
224  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
225  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
226
227  /* Load 8 & Store 8 */
228  LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
229  LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
230
231  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
232       loc0, loc1, loc2, loc3);
233  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
234
235  SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
236  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
237
238  SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
239  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
240  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
241}
242
243static void idct_butterfly_transpose_store(int16_t *tmp_buf,
244                                           int16_t *tmp_eve_buf,
245                                           int16_t *tmp_odd_buf,
246                                           int16_t *dst) {
247  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
248  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
249
250  /* FINAL BUTTERFLY : Dependency on Even & Odd */
251  vec0 = LD_SH(tmp_odd_buf);
252  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
253  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
254  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
255  loc0 = LD_SH(tmp_eve_buf);
256  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
257  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
258  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
259
260  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
261
262  ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
263  ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
264  ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
265  ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
266
267  /* Load 8 & Store 8 */
268  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
269  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
270  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
271  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
272  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
273  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
274  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
275  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
276
277  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
278
279  ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
280  ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
281  ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
282  ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
283
284  /* Load 8 & Store 8 */
285  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
286  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
287  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
288  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
289  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
290  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
291  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
292  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
293
294  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
295
296  ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
297  ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
298  ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
299  ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
300
301  /* Load 8 & Store 8 */
302  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
303  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
304  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
305  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
306  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
307  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
308  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
309  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
310
311  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
312
313  ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
314  ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
315  ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
316  ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
317
318  /* Transpose : 16 vectors */
319  /* 1st & 2nd 8x8 */
320  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
321                     m0, n0, m1, n1, m2, n2, m3, n3);
322  ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
323  ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
324
325  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
326                     m4, n4, m5, n5, m6, n6, m7, n7);
327  ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
328  ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
329
330  /* 3rd & 4th 8x8 */
331  LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
332  LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
333  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
334                     m0, n0, m1, n1, m2, n2, m3, n3);
335  ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
336  ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
337
338  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
339                     m4, n4, m5, n5, m6, n6, m7, n7);
340  ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
341  ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
342}
343
344static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) {
345  DECLARE_ALIGNED(32, int16_t, tmp_buf[8 * 32]);
346  DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
347  DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
348
349  idct32x8_row_transpose_store(input, &tmp_buf[0]);
350  idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]);
351  idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]);
352  idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0],
353                                 &tmp_odd_buf[0], output);
354}
355
356static void idct8x32_column_even_process_store(int16_t *tmp_buf,
357                                               int16_t *tmp_eve_buf) {
358  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
359  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
360  v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
361
362  /* Even stage 1 */
363  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
364  tmp_buf += (2 * 32);
365
366  DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
367  DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
368  BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
369  DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
370
371  loc1 = vec3;
372  loc0 = vec1;
373
374  DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
375  DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
376  BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
377  BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
378  BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
379
380  /* Even stage 2 */
381  /* Load 8 */
382  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
383
384  DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
385  DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
386  DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
387  DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
388
389  vec0 = reg0 + reg4;
390  reg0 = reg0 - reg4;
391  reg4 = reg6 + reg2;
392  reg6 = reg6 - reg2;
393  reg2 = reg1 + reg5;
394  reg1 = reg1 - reg5;
395  reg5 = reg7 + reg3;
396  reg7 = reg7 - reg3;
397  reg3 = vec0;
398
399  vec1 = reg2;
400  reg2 = reg3 + reg4;
401  reg3 = reg3 - reg4;
402  reg4 = reg5 - vec1;
403  reg5 = reg5 + vec1;
404
405  DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
406  DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
407
408  vec0 = reg0 - reg6;
409  reg0 = reg0 + reg6;
410  vec1 = reg7 - reg1;
411  reg7 = reg7 + reg1;
412
413  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
414  DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
415
416  /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
417  /* Store 8 */
418  BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
419  ST_SH2(loc1, loc3, tmp_eve_buf, 8);
420  ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
421
422  BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
423  ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
424  ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
425
426  /* Store 8 */
427  BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
428  ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
429  ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
430
431  BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
432  ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
433  ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
434}
435
436static void idct8x32_column_odd_process_store(int16_t *tmp_buf,
437                                              int16_t *tmp_odd_buf) {
438  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
439  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
440
441  /* Odd stage 1 */
442  reg0 = LD_SH(tmp_buf + 32);
443  reg1 = LD_SH(tmp_buf + 7 * 32);
444  reg2 = LD_SH(tmp_buf + 9 * 32);
445  reg3 = LD_SH(tmp_buf + 15 * 32);
446  reg4 = LD_SH(tmp_buf + 17 * 32);
447  reg5 = LD_SH(tmp_buf + 23 * 32);
448  reg6 = LD_SH(tmp_buf + 25 * 32);
449  reg7 = LD_SH(tmp_buf + 31 * 32);
450
451  DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
452  DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
453  DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
454  DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
455
456  vec0 = reg0 + reg3;
457  reg0 = reg0 - reg3;
458  reg3 = reg7 + reg4;
459  reg7 = reg7 - reg4;
460  reg4 = reg1 + reg2;
461  reg1 = reg1 - reg2;
462  reg2 = reg6 + reg5;
463  reg6 = reg6 - reg5;
464  reg5 = vec0;
465
466  /* 4 Stores */
467  ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
468  ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
469  SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
470  DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
471  ST_SH2(vec0, vec1, tmp_odd_buf, 8);
472
473  /* 4 Stores */
474  DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
475  DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
476  BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
477  ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
478  DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
479  ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
480
481  /* Odd stage 2 */
482  /* 8 loads */
483  reg0 = LD_SH(tmp_buf + 3 * 32);
484  reg1 = LD_SH(tmp_buf + 5 * 32);
485  reg2 = LD_SH(tmp_buf + 11 * 32);
486  reg3 = LD_SH(tmp_buf + 13 * 32);
487  reg4 = LD_SH(tmp_buf + 19 * 32);
488  reg5 = LD_SH(tmp_buf + 21 * 32);
489  reg6 = LD_SH(tmp_buf + 27 * 32);
490  reg7 = LD_SH(tmp_buf + 29 * 32);
491
492  DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
493  DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
494  DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
495  DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
496
497  /* 4 Stores */
498  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3);
499  DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
500  DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
501  BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
502  ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
503  DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
504  ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
505
506  /* 4 Stores */
507  ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3);
508  BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
509  ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
510  DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
511  ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
512
513  /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
514  /* Load 8 & Store 8 */
515  LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
516  LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
517
518  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
519  ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
520
521  SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
522  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
523
524  SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
525  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
526  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
527
528  /* Load 8 & Store 8 */
529  LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
530  LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
531
532  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3);
533  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
534
535  SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
536  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
537
538  SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
539  DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
540  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
541}
542
543static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
544                                             int16_t *tmp_odd_buf,
545                                             uint8_t *dst,
546                                             int32_t dst_stride) {
547  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
548  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
549
550  /* FINAL BUTTERFLY : Dependency on Even & Odd */
551  vec0 = LD_SH(tmp_odd_buf);
552  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
553  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
554  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
555  loc0 = LD_SH(tmp_eve_buf);
556  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
557  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
558  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
559
560  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
561  SRARI_H4_SH(m0, m2, m4, m6, 6);
562  VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
563
564  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
565  SRARI_H4_SH(m0, m2, m4, m6, 6);
566  VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
567                      m0, m2, m4, m6);
568
569  /* Load 8 & Store 8 */
570  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
571  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
572  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
573  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
574  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
575  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
576  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
577  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
578
579  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
580  SRARI_H4_SH(m1, m3, m5, m7, 6);
581  VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
582                      m1, m3, m5, m7);
583
584  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
585  SRARI_H4_SH(m1, m3, m5, m7, 6);
586  VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
587                      m1, m3, m5, m7);
588
589  /* Load 8 & Store 8 */
590  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
591  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
592  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
593  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
594  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
595  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
596  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
597  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
598
599  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
600  SRARI_H4_SH(n0, n2, n4, n6, 6);
601  VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
602                      n0, n2, n4, n6);
603
604  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
605  SRARI_H4_SH(n0, n2, n4, n6, 6);
606  VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
607                      n0, n2, n4, n6);
608
609  /* Load 8 & Store 8 */
610  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
611  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
612  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
613  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
614  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
615  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
616  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
617  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
618
619  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
620  SRARI_H4_SH(n1, n3, n5, n7, 6);
621  VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
622                      n1, n3, n5, n7);
623
624  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
625  SRARI_H4_SH(n1, n3, n5, n7, 6);
626  VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
627                      n1, n3, n5, n7);
628}
629
630static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
631                                           int32_t dst_stride) {
632  DECLARE_ALIGNED(32, int16_t, tmp_odd_buf[16 * 8]);
633  DECLARE_ALIGNED(32, int16_t, tmp_eve_buf[16 * 8]);
634
635  idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
636  idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
637  idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
638                                   dst, dst_stride);
639}
640
641void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst,
642                                int32_t dst_stride) {
643  int32_t i;
644  DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
645  int16_t *out_ptr = out_arr;
646
647  /* transform rows */
648  for (i = 0; i < 4; ++i) {
649    /* process 32 * 8 block */
650    idct32x8_1d_rows_msa((input + (i << 8)), (out_ptr + (i << 8)));
651  }
652
653  /* transform columns */
654  for (i = 0; i < 4; ++i) {
655    /* process 8 * 32 block */
656    idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
657                                   dst_stride);
658  }
659}
660
661void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst,
662                              int32_t dst_stride) {
663  int32_t i;
664  DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]);
665  int16_t *out_ptr = out_arr;
666
667  for (i = 32; i--;) {
668    __asm__ __volatile__ (
669        "sw     $zero,      0(%[out_ptr])     \n\t"
670        "sw     $zero,      4(%[out_ptr])     \n\t"
671        "sw     $zero,      8(%[out_ptr])     \n\t"
672        "sw     $zero,     12(%[out_ptr])     \n\t"
673        "sw     $zero,     16(%[out_ptr])     \n\t"
674        "sw     $zero,     20(%[out_ptr])     \n\t"
675        "sw     $zero,     24(%[out_ptr])     \n\t"
676        "sw     $zero,     28(%[out_ptr])     \n\t"
677        "sw     $zero,     32(%[out_ptr])     \n\t"
678        "sw     $zero,     36(%[out_ptr])     \n\t"
679        "sw     $zero,     40(%[out_ptr])     \n\t"
680        "sw     $zero,     44(%[out_ptr])     \n\t"
681        "sw     $zero,     48(%[out_ptr])     \n\t"
682        "sw     $zero,     52(%[out_ptr])     \n\t"
683        "sw     $zero,     56(%[out_ptr])     \n\t"
684        "sw     $zero,     60(%[out_ptr])     \n\t"
685
686        :
687        : [out_ptr] "r" (out_ptr)
688    );
689
690    out_ptr += 32;
691  }
692
693  out_ptr = out_arr;
694
695  /* rows: only upper-left 8x8 has non-zero coeff */
696  idct32x8_1d_rows_msa(input, out_ptr);
697
698  /* transform columns */
699  for (i = 0; i < 4; ++i) {
700    /* process 8 * 32 block */
701    idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)),
702                                   dst_stride);
703  }
704}
705
706void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst,
707                             int32_t dst_stride) {
708  int32_t i;
709  int16_t out;
710  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
711  v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
712
713  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
714  out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
715  out = ROUND_POWER_OF_TWO(out, 6);
716
717  vec = __msa_fill_h(out);
718
719  for (i = 16; i--;) {
720    LD_UB2(dst, 16, dst0, dst1);
721    LD_UB2(dst + dst_stride, 16, dst2, dst3);
722
723    UNPCK_UB_SH(dst0, res0, res4);
724    UNPCK_UB_SH(dst1, res1, res5);
725    UNPCK_UB_SH(dst2, res2, res6);
726    UNPCK_UB_SH(dst3, res3, res7);
727    ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
728    ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
729    CLIP_SH4_0_255(res0, res1, res2, res3);
730    CLIP_SH4_0_255(res4, res5, res6, res7);
731    PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
732                tmp0, tmp1, tmp2, tmp3);
733
734    ST_UB2(tmp0, tmp1, dst, 16);
735    dst += dst_stride;
736    ST_UB2(tmp2, tmp3, dst, 16);
737    dst += dst_stride;
738  }
739}
740