1/*
2 *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include <arm_neon.h>
12
13static const int16_t cospi8sqrt2minus1 = 20091;
14static const int16_t sinpi8sqrt2       = 35468;
15
16void vp8_short_idct4x4llm_neon(
17        int16_t *input,
18        unsigned char *pred_ptr,
19        int pred_stride,
20        unsigned char *dst_ptr,
21        int dst_stride) {
22    int i;
23    uint32x2_t d6u32 = vdup_n_u32(0);
24    uint8x8_t d1u8;
25    int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
26    uint16x8_t q1u16;
27    int16x8_t q1s16, q2s16, q3s16, q4s16;
28    int32x2x2_t v2tmp0, v2tmp1;
29    int16x4x2_t v2tmp2, v2tmp3;
30
31    d2 = vld1_s16(input);
32    d3 = vld1_s16(input + 4);
33    d4 = vld1_s16(input + 8);
34    d5 = vld1_s16(input + 12);
35
36    // 1st for loop
37    q1s16 = vcombine_s16(d2, d4);  // Swap d3 d4 here
38    q2s16 = vcombine_s16(d3, d5);
39
40    q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
41    q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
42
43    d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
44    d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
45
46    q3s16 = vshrq_n_s16(q3s16, 1);
47    q4s16 = vshrq_n_s16(q4s16, 1);
48
49    q3s16 = vqaddq_s16(q3s16, q2s16);
50    q4s16 = vqaddq_s16(q4s16, q2s16);
51
52    d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
53    d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
54
55    d2 = vqadd_s16(d12, d11);
56    d3 = vqadd_s16(d13, d10);
57    d4 = vqsub_s16(d13, d10);
58    d5 = vqsub_s16(d12, d11);
59
60    v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
61    v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
62    v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
63                      vreinterpret_s16_s32(v2tmp1.val[0]));
64    v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
65                      vreinterpret_s16_s32(v2tmp1.val[1]));
66
67    // 2nd for loop
68    q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
69    q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
70
71    q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
72    q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
73
74    d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
75    d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
76
77    q3s16 = vshrq_n_s16(q3s16, 1);
78    q4s16 = vshrq_n_s16(q4s16, 1);
79
80    q3s16 = vqaddq_s16(q3s16, q2s16);
81    q4s16 = vqaddq_s16(q4s16, q2s16);
82
83    d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
84    d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
85
86    d2 = vqadd_s16(d12, d11);
87    d3 = vqadd_s16(d13, d10);
88    d4 = vqsub_s16(d13, d10);
89    d5 = vqsub_s16(d12, d11);
90
91    d2 = vrshr_n_s16(d2, 3);
92    d3 = vrshr_n_s16(d3, 3);
93    d4 = vrshr_n_s16(d4, 3);
94    d5 = vrshr_n_s16(d5, 3);
95
96    v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
97    v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
98    v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
99                      vreinterpret_s16_s32(v2tmp1.val[0]));
100    v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
101                      vreinterpret_s16_s32(v2tmp1.val[1]));
102
103    q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
104    q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
105
106    // dc_only_idct_add
107    for (i = 0; i < 2; i++, q1s16 = q2s16) {
108        d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
109        pred_ptr += pred_stride;
110        d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
111        pred_ptr += pred_stride;
112
113        q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16),
114                         vreinterpret_u8_u32(d6u32));
115        d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
116
117        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
118        dst_ptr += dst_stride;
119        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
120        dst_ptr += dst_stride;
121    }
122    return;
123}
124