1/*
2 *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <arm_neon.h>
12
13static const int16_t cospi8sqrt2minus1 = 20091;
14static const int16_t sinpi8sqrt2       = 17734;
15// because the lowest bit in 0x8a8c is 0, we can pre-shift this
16
17void idct_dequant_full_2x_neon(
18        int16_t *q,
19        int16_t *dq,
20        unsigned char *dst,
21        int stride) {
22    unsigned char *dst0, *dst1;
23    int32x2_t d28, d29, d30, d31;
24    int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
25    int16x8_t qEmpty = vdupq_n_s16(0);
26    int32x4x2_t q2tmp0, q2tmp1;
27    int16x8x2_t q2tmp2, q2tmp3;
28    int16x4_t dLow0, dLow1, dHigh0, dHigh1;
29
30    d28 = d29 = d30 = d31 = vdup_n_s32(0);
31
32    // load dq
33    q0 = vld1q_s16(dq);
34    dq += 8;
35    q1 = vld1q_s16(dq);
36
37    // load q
38    q2 = vld1q_s16(q);
39    vst1q_s16(q, qEmpty);
40    q += 8;
41    q3 = vld1q_s16(q);
42    vst1q_s16(q, qEmpty);
43    q += 8;
44    q4 = vld1q_s16(q);
45    vst1q_s16(q, qEmpty);
46    q += 8;
47    q5 = vld1q_s16(q);
48    vst1q_s16(q, qEmpty);
49
50    // load src from dst
51    dst0 = dst;
52    dst1 = dst + 4;
53    d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
54    dst0 += stride;
55    d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
56    dst1 += stride;
57    d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
58    dst0 += stride;
59    d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
60    dst1 += stride;
61
62    d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
63    dst0 += stride;
64    d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
65    dst1 += stride;
66    d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
67    d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
68
69    q2 = vmulq_s16(q2, q0);
70    q3 = vmulq_s16(q3, q1);
71    q4 = vmulq_s16(q4, q0);
72    q5 = vmulq_s16(q5, q1);
73
74    // vswp
75    dLow0 = vget_low_s16(q2);
76    dHigh0 = vget_high_s16(q2);
77    dLow1 = vget_low_s16(q4);
78    dHigh1 = vget_high_s16(q4);
79    q2 = vcombine_s16(dLow0, dLow1);
80    q4 = vcombine_s16(dHigh0, dHigh1);
81
82    dLow0 = vget_low_s16(q3);
83    dHigh0 = vget_high_s16(q3);
84    dLow1 = vget_low_s16(q5);
85    dHigh1 = vget_high_s16(q5);
86    q3 = vcombine_s16(dLow0, dLow1);
87    q5 = vcombine_s16(dHigh0, dHigh1);
88
89    q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
90    q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
91    q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
92    q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
93
94    q10 = vqaddq_s16(q2, q3);
95    q11 = vqsubq_s16(q2, q3);
96
97    q8 = vshrq_n_s16(q8, 1);
98    q9 = vshrq_n_s16(q9, 1);
99
100    q4 = vqaddq_s16(q4, q8);
101    q5 = vqaddq_s16(q5, q9);
102
103    q2 = vqsubq_s16(q6, q5);
104    q3 = vqaddq_s16(q7, q4);
105
106    q4 = vqaddq_s16(q10, q3);
107    q5 = vqaddq_s16(q11, q2);
108    q6 = vqsubq_s16(q11, q2);
109    q7 = vqsubq_s16(q10, q3);
110
111    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
112    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
113    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
114                       vreinterpretq_s16_s32(q2tmp1.val[0]));
115    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
116                       vreinterpretq_s16_s32(q2tmp1.val[1]));
117
118    // loop 2
119    q8  = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
120    q9  = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
121    q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
122    q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
123
124    q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
125    q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
126
127    q10 = vshrq_n_s16(q10, 1);
128    q11 = vshrq_n_s16(q11, 1);
129
130    q10 = vqaddq_s16(q2tmp2.val[1], q10);
131    q11 = vqaddq_s16(q2tmp3.val[1], q11);
132
133    q8 = vqsubq_s16(q8, q11);
134    q9 = vqaddq_s16(q9, q10);
135
136    q4 = vqaddq_s16(q2, q9);
137    q5 = vqaddq_s16(q3, q8);
138    q6 = vqsubq_s16(q3, q8);
139    q7 = vqsubq_s16(q2, q9);
140
141    q4 = vrshrq_n_s16(q4, 3);
142    q5 = vrshrq_n_s16(q5, 3);
143    q6 = vrshrq_n_s16(q6, 3);
144    q7 = vrshrq_n_s16(q7, 3);
145
146    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
147    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
148    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
149                       vreinterpretq_s16_s32(q2tmp1.val[0]));
150    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
151                       vreinterpretq_s16_s32(q2tmp1.val[1]));
152
153    q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]),
154                                          vreinterpret_u8_s32(d28)));
155    q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]),
156                                          vreinterpret_u8_s32(d29)));
157    q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]),
158                                          vreinterpret_u8_s32(d30)));
159    q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]),
160                                          vreinterpret_u8_s32(d31)));
161
162    d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
163    d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
164    d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
165    d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
166
167    dst0 = dst;
168    dst1 = dst + 4;
169    vst1_lane_s32((int32_t *)dst0, d28, 0);
170    dst0 += stride;
171    vst1_lane_s32((int32_t *)dst1, d28, 1);
172    dst1 += stride;
173    vst1_lane_s32((int32_t *)dst0, d29, 0);
174    dst0 += stride;
175    vst1_lane_s32((int32_t *)dst1, d29, 1);
176    dst1 += stride;
177
178    vst1_lane_s32((int32_t *)dst0, d30, 0);
179    dst0 += stride;
180    vst1_lane_s32((int32_t *)dst1, d30, 1);
181    dst1 += stride;
182    vst1_lane_s32((int32_t *)dst0, d31, 0);
183    vst1_lane_s32((int32_t *)dst1, d31, 1);
184    return;
185}
186
187int main()
188{
189}
190