1/*
2 *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <arm_neon.h>
12
13#include "./vpx_config.h"
14#include "./vpx_dsp_rtcd.h"
15#include "vpx_dsp/txfm_common.h"
16#include "vpx_dsp/arm/mem_neon.h"
17#include "vpx_dsp/arm/transpose_neon.h"
18
19// Some builds of gcc 4.9.2 and .3 have trouble with some of the inline
20// functions.
21#if !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) && \
22    __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4
23
24void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
25  vpx_fdct16x16_c(input, output, stride);
26}
27
28#else
29
30static INLINE void load(const int16_t *a, int stride, int16x8_t *b /*[16]*/) {
31  b[0] = vld1q_s16(a);
32  a += stride;
33  b[1] = vld1q_s16(a);
34  a += stride;
35  b[2] = vld1q_s16(a);
36  a += stride;
37  b[3] = vld1q_s16(a);
38  a += stride;
39  b[4] = vld1q_s16(a);
40  a += stride;
41  b[5] = vld1q_s16(a);
42  a += stride;
43  b[6] = vld1q_s16(a);
44  a += stride;
45  b[7] = vld1q_s16(a);
46  a += stride;
47  b[8] = vld1q_s16(a);
48  a += stride;
49  b[9] = vld1q_s16(a);
50  a += stride;
51  b[10] = vld1q_s16(a);
52  a += stride;
53  b[11] = vld1q_s16(a);
54  a += stride;
55  b[12] = vld1q_s16(a);
56  a += stride;
57  b[13] = vld1q_s16(a);
58  a += stride;
59  b[14] = vld1q_s16(a);
60  a += stride;
61  b[15] = vld1q_s16(a);
62}
63
64// Store 8 16x8 values, assuming stride == 16.
65static INLINE void store(tran_low_t *a, const int16x8_t *b /*[8]*/) {
66  store_s16q_to_tran_low(a, b[0]);
67  a += 16;
68  store_s16q_to_tran_low(a, b[1]);
69  a += 16;
70  store_s16q_to_tran_low(a, b[2]);
71  a += 16;
72  store_s16q_to_tran_low(a, b[3]);
73  a += 16;
74  store_s16q_to_tran_low(a, b[4]);
75  a += 16;
76  store_s16q_to_tran_low(a, b[5]);
77  a += 16;
78  store_s16q_to_tran_low(a, b[6]);
79  a += 16;
80  store_s16q_to_tran_low(a, b[7]);
81}
82
83// Load step of each pass. Add and subtract clear across the input, requiring
84// all 16 values to be loaded. For the first pass it also multiplies by 4.
85
86// To maybe reduce register usage this could be combined with the load() step to
87// get the first 4 and last 4 values, cross those, then load the middle 8 values
88// and cross them.
89static INLINE void cross_input(const int16x8_t *a /*[16]*/,
90                               int16x8_t *b /*[16]*/, const int pass) {
91  if (pass == 0) {
92    b[0] = vshlq_n_s16(vaddq_s16(a[0], a[15]), 2);
93    b[1] = vshlq_n_s16(vaddq_s16(a[1], a[14]), 2);
94    b[2] = vshlq_n_s16(vaddq_s16(a[2], a[13]), 2);
95    b[3] = vshlq_n_s16(vaddq_s16(a[3], a[12]), 2);
96    b[4] = vshlq_n_s16(vaddq_s16(a[4], a[11]), 2);
97    b[5] = vshlq_n_s16(vaddq_s16(a[5], a[10]), 2);
98    b[6] = vshlq_n_s16(vaddq_s16(a[6], a[9]), 2);
99    b[7] = vshlq_n_s16(vaddq_s16(a[7], a[8]), 2);
100
101    b[8] = vshlq_n_s16(vsubq_s16(a[7], a[8]), 2);
102    b[9] = vshlq_n_s16(vsubq_s16(a[6], a[9]), 2);
103    b[10] = vshlq_n_s16(vsubq_s16(a[5], a[10]), 2);
104    b[11] = vshlq_n_s16(vsubq_s16(a[4], a[11]), 2);
105    b[12] = vshlq_n_s16(vsubq_s16(a[3], a[12]), 2);
106    b[13] = vshlq_n_s16(vsubq_s16(a[2], a[13]), 2);
107    b[14] = vshlq_n_s16(vsubq_s16(a[1], a[14]), 2);
108    b[15] = vshlq_n_s16(vsubq_s16(a[0], a[15]), 2);
109  } else {
110    b[0] = vaddq_s16(a[0], a[15]);
111    b[1] = vaddq_s16(a[1], a[14]);
112    b[2] = vaddq_s16(a[2], a[13]);
113    b[3] = vaddq_s16(a[3], a[12]);
114    b[4] = vaddq_s16(a[4], a[11]);
115    b[5] = vaddq_s16(a[5], a[10]);
116    b[6] = vaddq_s16(a[6], a[9]);
117    b[7] = vaddq_s16(a[7], a[8]);
118
119    b[8] = vsubq_s16(a[7], a[8]);
120    b[9] = vsubq_s16(a[6], a[9]);
121    b[10] = vsubq_s16(a[5], a[10]);
122    b[11] = vsubq_s16(a[4], a[11]);
123    b[12] = vsubq_s16(a[3], a[12]);
124    b[13] = vsubq_s16(a[2], a[13]);
125    b[14] = vsubq_s16(a[1], a[14]);
126    b[15] = vsubq_s16(a[0], a[15]);
127  }
128}
129
130// Quarter round at the beginning of the second pass. Can't use vrshr (rounding)
131// because this only adds 1, not 1 << 2.
132static INLINE void partial_round_shift(int16x8_t *a /*[16]*/) {
133  const int16x8_t one = vdupq_n_s16(1);
134  a[0] = vshrq_n_s16(vaddq_s16(a[0], one), 2);
135  a[1] = vshrq_n_s16(vaddq_s16(a[1], one), 2);
136  a[2] = vshrq_n_s16(vaddq_s16(a[2], one), 2);
137  a[3] = vshrq_n_s16(vaddq_s16(a[3], one), 2);
138  a[4] = vshrq_n_s16(vaddq_s16(a[4], one), 2);
139  a[5] = vshrq_n_s16(vaddq_s16(a[5], one), 2);
140  a[6] = vshrq_n_s16(vaddq_s16(a[6], one), 2);
141  a[7] = vshrq_n_s16(vaddq_s16(a[7], one), 2);
142  a[8] = vshrq_n_s16(vaddq_s16(a[8], one), 2);
143  a[9] = vshrq_n_s16(vaddq_s16(a[9], one), 2);
144  a[10] = vshrq_n_s16(vaddq_s16(a[10], one), 2);
145  a[11] = vshrq_n_s16(vaddq_s16(a[11], one), 2);
146  a[12] = vshrq_n_s16(vaddq_s16(a[12], one), 2);
147  a[13] = vshrq_n_s16(vaddq_s16(a[13], one), 2);
148  a[14] = vshrq_n_s16(vaddq_s16(a[14], one), 2);
149  a[15] = vshrq_n_s16(vaddq_s16(a[15], one), 2);
150}
151
152// fdct_round_shift((a +/- b) * c)
153static INLINE void butterfly_one_coeff(const int16x8_t a, const int16x8_t b,
154                                       const tran_high_t c, int16x8_t *add,
155                                       int16x8_t *sub) {
156  const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), c);
157  const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), c);
158  const int32x4_t sum0 = vmlal_n_s16(a0, vget_low_s16(b), c);
159  const int32x4_t sum1 = vmlal_n_s16(a1, vget_high_s16(b), c);
160  const int32x4_t diff0 = vmlsl_n_s16(a0, vget_low_s16(b), c);
161  const int32x4_t diff1 = vmlsl_n_s16(a1, vget_high_s16(b), c);
162  const int16x4_t rounded0 = vqrshrn_n_s32(sum0, 14);
163  const int16x4_t rounded1 = vqrshrn_n_s32(sum1, 14);
164  const int16x4_t rounded2 = vqrshrn_n_s32(diff0, 14);
165  const int16x4_t rounded3 = vqrshrn_n_s32(diff1, 14);
166  *add = vcombine_s16(rounded0, rounded1);
167  *sub = vcombine_s16(rounded2, rounded3);
168}
169
170// fdct_round_shift(a * c0 +/- b * c1)
171static INLINE void butterfly_two_coeff(const int16x8_t a, const int16x8_t b,
172                                       const tran_coef_t c0,
173                                       const tran_coef_t c1, int16x8_t *add,
174                                       int16x8_t *sub) {
175  const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), c0);
176  const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), c0);
177  const int32x4_t a2 = vmull_n_s16(vget_low_s16(a), c1);
178  const int32x4_t a3 = vmull_n_s16(vget_high_s16(a), c1);
179  const int32x4_t sum0 = vmlal_n_s16(a2, vget_low_s16(b), c0);
180  const int32x4_t sum1 = vmlal_n_s16(a3, vget_high_s16(b), c0);
181  const int32x4_t diff0 = vmlsl_n_s16(a0, vget_low_s16(b), c1);
182  const int32x4_t diff1 = vmlsl_n_s16(a1, vget_high_s16(b), c1);
183  const int16x4_t rounded0 = vqrshrn_n_s32(sum0, 14);
184  const int16x4_t rounded1 = vqrshrn_n_s32(sum1, 14);
185  const int16x4_t rounded2 = vqrshrn_n_s32(diff0, 14);
186  const int16x4_t rounded3 = vqrshrn_n_s32(diff1, 14);
187  *add = vcombine_s16(rounded0, rounded1);
188  *sub = vcombine_s16(rounded2, rounded3);
189}
190
191// Transpose 8x8 to a new location. Don't use transpose_neon.h because those
192// are all in-place.
193static INLINE void transpose_8x8(const int16x8_t *a /*[8]*/,
194                                 int16x8_t *b /*[8]*/) {
195  // Swap 16 bit elements.
196  const int16x8x2_t c0 = vtrnq_s16(a[0], a[1]);
197  const int16x8x2_t c1 = vtrnq_s16(a[2], a[3]);
198  const int16x8x2_t c2 = vtrnq_s16(a[4], a[5]);
199  const int16x8x2_t c3 = vtrnq_s16(a[6], a[7]);
200
201  // Swap 32 bit elements.
202  const int32x4x2_t d0 = vtrnq_s32(vreinterpretq_s32_s16(c0.val[0]),
203                                   vreinterpretq_s32_s16(c1.val[0]));
204  const int32x4x2_t d1 = vtrnq_s32(vreinterpretq_s32_s16(c0.val[1]),
205                                   vreinterpretq_s32_s16(c1.val[1]));
206  const int32x4x2_t d2 = vtrnq_s32(vreinterpretq_s32_s16(c2.val[0]),
207                                   vreinterpretq_s32_s16(c3.val[0]));
208  const int32x4x2_t d3 = vtrnq_s32(vreinterpretq_s32_s16(c2.val[1]),
209                                   vreinterpretq_s32_s16(c3.val[1]));
210
211  // Swap 64 bit elements
212  const int16x8x2_t e0 = vpx_vtrnq_s64_to_s16(d0.val[0], d2.val[0]);
213  const int16x8x2_t e1 = vpx_vtrnq_s64_to_s16(d1.val[0], d3.val[0]);
214  const int16x8x2_t e2 = vpx_vtrnq_s64_to_s16(d0.val[1], d2.val[1]);
215  const int16x8x2_t e3 = vpx_vtrnq_s64_to_s16(d1.val[1], d3.val[1]);
216
217  b[0] = e0.val[0];
218  b[1] = e1.val[0];
219  b[2] = e2.val[0];
220  b[3] = e3.val[0];
221  b[4] = e0.val[1];
222  b[5] = e1.val[1];
223  b[6] = e2.val[1];
224  b[7] = e3.val[1];
225}
226
227// Main body of fdct16x16.
228static void dct_body(const int16x8_t *in /*[16]*/, int16x8_t *out /*[16]*/) {
229  int16x8_t s[8];
230  int16x8_t x[4];
231  int16x8_t step[8];
232
233  // stage 1
234  // From fwd_txfm.c: Work on the first eight values; fdct8(input,
235  // even_results);"
236  s[0] = vaddq_s16(in[0], in[7]);
237  s[1] = vaddq_s16(in[1], in[6]);
238  s[2] = vaddq_s16(in[2], in[5]);
239  s[3] = vaddq_s16(in[3], in[4]);
240  s[4] = vsubq_s16(in[3], in[4]);
241  s[5] = vsubq_s16(in[2], in[5]);
242  s[6] = vsubq_s16(in[1], in[6]);
243  s[7] = vsubq_s16(in[0], in[7]);
244
245  // fdct4(step, step);
246  x[0] = vaddq_s16(s[0], s[3]);
247  x[1] = vaddq_s16(s[1], s[2]);
248  x[2] = vsubq_s16(s[1], s[2]);
249  x[3] = vsubq_s16(s[0], s[3]);
250
251  // out[0] = fdct_round_shift((x0 + x1) * cospi_16_64)
252  // out[8] = fdct_round_shift((x0 - x1) * cospi_16_64)
253  butterfly_one_coeff(x[0], x[1], cospi_16_64, &out[0], &out[8]);
254  // out[4] = fdct_round_shift(x3 * cospi_8_64 + x2 * cospi_24_64);
255  // out[12] = fdct_round_shift(x3 * cospi_24_64 - x2 * cospi_8_64);
256  butterfly_two_coeff(x[3], x[2], cospi_24_64, cospi_8_64, &out[4], &out[12]);
257
258  //  Stage 2
259  // Re-using source s5/s6
260  // s5 = fdct_round_shift((s6 - s5) * cospi_16_64)
261  // s6 = fdct_round_shift((s6 + s5) * cospi_16_64)
262  butterfly_one_coeff(s[6], s[5], cospi_16_64, &s[6], &s[5]);
263
264  //  Stage 3
265  x[0] = vaddq_s16(s[4], s[5]);
266  x[1] = vsubq_s16(s[4], s[5]);
267  x[2] = vsubq_s16(s[7], s[6]);
268  x[3] = vaddq_s16(s[7], s[6]);
269
270  // Stage 4
271  // out[2] = fdct_round_shift(x0 * cospi_28_64 + x3 * cospi_4_64)
272  // out[14] = fdct_round_shift(x3 * cospi_28_64 + x0 * -cospi_4_64)
273  butterfly_two_coeff(x[3], x[0], cospi_28_64, cospi_4_64, &out[2], &out[14]);
274  // out[6] = fdct_round_shift(x1 * cospi_12_64 + x2 *  cospi_20_64)
275  // out[10] = fdct_round_shift(x2 * cospi_12_64 + x1 * -cospi_20_64)
276  butterfly_two_coeff(x[2], x[1], cospi_12_64, cospi_20_64, &out[10], &out[6]);
277
278  // step 2
279  // From fwd_txfm.c: Work on the next eight values; step1 -> odd_results"
280  // That file distinguished between "in_high" and "step1" but the only
281  // difference is that "in_high" is the first 8 values and "step 1" is the
282  // second. Here, since they are all in one array, "step1" values are += 8.
283
284  // step2[2] = fdct_round_shift((step1[5] - step1[2]) * cospi_16_64)
285  // step2[3] = fdct_round_shift((step1[4] - step1[3]) * cospi_16_64)
286  // step2[4] = fdct_round_shift((step1[4] + step1[3]) * cospi_16_64)
287  // step2[5] = fdct_round_shift((step1[5] + step1[2]) * cospi_16_64)
288  butterfly_one_coeff(in[13], in[10], cospi_16_64, &s[5], &s[2]);
289  butterfly_one_coeff(in[12], in[11], cospi_16_64, &s[4], &s[3]);
290
291  // step 3
292  s[0] = vaddq_s16(in[8], s[3]);
293  s[1] = vaddq_s16(in[9], s[2]);
294  x[0] = vsubq_s16(in[9], s[2]);
295  x[1] = vsubq_s16(in[8], s[3]);
296  x[2] = vsubq_s16(in[15], s[4]);
297  x[3] = vsubq_s16(in[14], s[5]);
298  s[6] = vaddq_s16(in[14], s[5]);
299  s[7] = vaddq_s16(in[15], s[4]);
300
301  // step 4
302  // step2[1] = fdct_round_shift(step3[1] *-cospi_8_64 + step3[6] * cospi_24_64)
303  // step2[6] = fdct_round_shift(step3[1] * cospi_24_64 + step3[6] * cospi_8_64)
304  butterfly_two_coeff(s[6], s[1], cospi_24_64, cospi_8_64, &s[6], &s[1]);
305
306  // step2[2] = fdct_round_shift(step3[2] * cospi_24_64 + step3[5] * cospi_8_64)
307  // step2[5] = fdct_round_shift(step3[2] * cospi_8_64 - step3[5] * cospi_24_64)
308  butterfly_two_coeff(x[0], x[3], cospi_8_64, cospi_24_64, &s[2], &s[5]);
309
310  // step 5
311  step[0] = vaddq_s16(s[0], s[1]);
312  step[1] = vsubq_s16(s[0], s[1]);
313  step[2] = vaddq_s16(x[1], s[2]);
314  step[3] = vsubq_s16(x[1], s[2]);
315  step[4] = vsubq_s16(x[2], s[5]);
316  step[5] = vaddq_s16(x[2], s[5]);
317  step[6] = vsubq_s16(s[7], s[6]);
318  step[7] = vaddq_s16(s[7], s[6]);
319
320  // step 6
321  // out[1] = fdct_round_shift(step1[0] * cospi_30_64 + step1[7] * cospi_2_64)
322  // out[9] = fdct_round_shift(step1[1] * cospi_14_64 + step1[6] * cospi_18_64)
323  // out[5] = fdct_round_shift(step1[2] * cospi_22_64 + step1[5] * cospi_10_64)
324  // out[13] = fdct_round_shift(step1[3] * cospi_6_64 + step1[4] * cospi_26_64)
325  // out[3] = fdct_round_shift(step1[3] * -cospi_26_64 + step1[4] * cospi_6_64)
326  // out[11] = fdct_round_shift(step1[2] * -cospi_10_64 + step1[5] *
327  // cospi_22_64)
328  // out[7] = fdct_round_shift(step1[1] * -cospi_18_64 + step1[6] * cospi_14_64)
329  // out[15] = fdct_round_shift(step1[0] * -cospi_2_64 + step1[7] * cospi_30_64)
330  butterfly_two_coeff(step[6], step[1], cospi_14_64, cospi_18_64, &out[9],
331                      &out[7]);
332  butterfly_two_coeff(step[7], step[0], cospi_30_64, cospi_2_64, &out[1],
333                      &out[15]);
334  butterfly_two_coeff(step[4], step[3], cospi_6_64, cospi_26_64, &out[13],
335                      &out[3]);
336  butterfly_two_coeff(step[5], step[2], cospi_22_64, cospi_10_64, &out[5],
337                      &out[11]);
338}
339
340void vpx_fdct16x16_neon(const int16_t *input, tran_low_t *output, int stride) {
341  int16x8_t temp0[16];
342  int16x8_t temp1[16];
343  int16x8_t temp2[16];
344  int16x8_t temp3[16];
345
346  // Left half.
347  load(input, stride, temp0);
348  cross_input(temp0, temp1, 0);
349  dct_body(temp1, temp0);
350
351  // Right half.
352  load(input + 8, stride, temp1);
353  cross_input(temp1, temp2, 0);
354  dct_body(temp2, temp1);
355
356  // Transpose top left and top right quarters into one contiguous location to
357  // process to the top half.
358  transpose_8x8(&temp0[0], &temp2[0]);
359  transpose_8x8(&temp1[0], &temp2[8]);
360  partial_round_shift(temp2);
361  cross_input(temp2, temp3, 1);
362  dct_body(temp3, temp2);
363  transpose_s16_8x8(&temp2[0], &temp2[1], &temp2[2], &temp2[3], &temp2[4],
364                    &temp2[5], &temp2[6], &temp2[7]);
365  transpose_s16_8x8(&temp2[8], &temp2[9], &temp2[10], &temp2[11], &temp2[12],
366                    &temp2[13], &temp2[14], &temp2[15]);
367  store(output, temp2);
368  store(output + 8, temp2 + 8);
369  output += 8 * 16;
370
371  // Transpose bottom left and bottom right quarters into one contiguous
372  // location to process to the bottom half.
373  transpose_8x8(&temp0[8], &temp1[0]);
374  transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
375                    &temp1[13], &temp1[14], &temp1[15]);
376  partial_round_shift(temp1);
377  cross_input(temp1, temp0, 1);
378  dct_body(temp0, temp1);
379  transpose_s16_8x8(&temp1[0], &temp1[1], &temp1[2], &temp1[3], &temp1[4],
380                    &temp1[5], &temp1[6], &temp1[7]);
381  transpose_s16_8x8(&temp1[8], &temp1[9], &temp1[10], &temp1[11], &temp1[12],
382                    &temp1[13], &temp1[14], &temp1[15]);
383  store(output, temp1);
384  store(output + 8, temp1 + 8);
385}
386#endif  // !defined(__clang__) && !defined(__ANDROID__) && defined(__GNUC__) &&
387        // __GNUC__ == 4 && __GNUC_MINOR__ == 9 && __GNUC_PATCHLEVEL__ < 4
388