1/*
2 *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <arm_neon.h>
12
13#include "./vp8_rtcd.h"
14
15static const int16_t cospi8sqrt2minus1 = 20091;
16// 35468 exceeds INT16_MAX and gets converted to a negative number. Because of
17// the way it is used in vqdmulh, where the result is doubled, it can be divided
18// by 2 beforehand. This saves compensating for the negative value as well as
19// shifting the result.
20static const int16_t sinpi8sqrt2 = 35468 >> 1;
21
22void vp8_short_idct4x4llm_neon(int16_t *input, unsigned char *pred_ptr,
23                               int pred_stride, unsigned char *dst_ptr,
24                               int dst_stride) {
25  int i;
26  uint32x2_t d6u32 = vdup_n_u32(0);
27  uint8x8_t d1u8;
28  int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
29  uint16x8_t q1u16;
30  int16x8_t q1s16, q2s16, q3s16, q4s16;
31  int32x2x2_t v2tmp0, v2tmp1;
32  int16x4x2_t v2tmp2, v2tmp3;
33
34  d2 = vld1_s16(input);
35  d3 = vld1_s16(input + 4);
36  d4 = vld1_s16(input + 8);
37  d5 = vld1_s16(input + 12);
38
39  // 1st for loop
40  q1s16 = vcombine_s16(d2, d4);  // Swap d3 d4 here
41  q2s16 = vcombine_s16(d3, d5);
42
43  q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
44  q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
45
46  d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
47  d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
48
49  q4s16 = vshrq_n_s16(q4s16, 1);
50
51  q4s16 = vqaddq_s16(q4s16, q2s16);
52
53  d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
54  d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
55
56  d2 = vqadd_s16(d12, d11);
57  d3 = vqadd_s16(d13, d10);
58  d4 = vqsub_s16(d13, d10);
59  d5 = vqsub_s16(d12, d11);
60
61  v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
62  v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
63  v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
64                    vreinterpret_s16_s32(v2tmp1.val[0]));
65  v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
66                    vreinterpret_s16_s32(v2tmp1.val[1]));
67
68  // 2nd for loop
69  q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
70  q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
71
72  q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
73  q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
74
75  d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
76  d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
77
78  q4s16 = vshrq_n_s16(q4s16, 1);
79
80  q4s16 = vqaddq_s16(q4s16, q2s16);
81
82  d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
83  d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
84
85  d2 = vqadd_s16(d12, d11);
86  d3 = vqadd_s16(d13, d10);
87  d4 = vqsub_s16(d13, d10);
88  d5 = vqsub_s16(d12, d11);
89
90  d2 = vrshr_n_s16(d2, 3);
91  d3 = vrshr_n_s16(d3, 3);
92  d4 = vrshr_n_s16(d4, 3);
93  d5 = vrshr_n_s16(d5, 3);
94
95  v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
96  v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
97  v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
98                    vreinterpret_s16_s32(v2tmp1.val[0]));
99  v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
100                    vreinterpret_s16_s32(v2tmp1.val[1]));
101
102  q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
103  q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
104
105  // dc_only_idct_add
106  for (i = 0; i < 2; i++, q1s16 = q2s16) {
107    d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
108    pred_ptr += pred_stride;
109    d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
110    pred_ptr += pred_stride;
111
112    q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), vreinterpret_u8_u32(d6u32));
113    d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
114
115    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
116    dst_ptr += dst_stride;
117    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
118    dst_ptr += dst_stride;
119  }
120  return;
121}
122