1/******************************************************************************
2 *
3 * Copyright (C) 2015 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 *****************************************************************************
18 * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
19*/
20/**
21 *******************************************************************************
22 * @file
23 *  ih264_ihadamard_scaling_ssse3.c
24 *
25 * @brief
26 *  Contains definition of functions for h264 inverse hadamard 4x4 transform and scaling
27 *
28 * @author
29 *  Mohit
30 *
31 *  @par List of Functions:
32 *  - ih264_ihadamard_scaling_4x4_ssse3()
33 *
34 * @remarks
35 *
36 *******************************************************************************
37 */
38/*****************************************************************************/
39/* File Includes                                                             */
40/*****************************************************************************/
41
42/* User include files */
43#include "ih264_typedefs.h"
44#include "ih264_defs.h"
45#include "ih264_trans_macros.h"
46#include "ih264_macros.h"
47#include "ih264_trans_data.h"
48#include "ih264_size_defs.h"
49#include "ih264_structs.h"
50#include "ih264_trans_quant_itrans_iquant.h"
51#include <immintrin.h>
52
53/*
54 ********************************************************************************
55 *
56 * @brief This function performs a 4x4 inverse hadamard transform on the 4x4 DC coefficients
57 * of a 16x16 intra prediction macroblock, and then performs scaling.
58 * prediction buffer
59 *
60 * @par Description:
61 *  The DC coefficients pass through a 2-stage inverse hadamard transform.
62 *  This inverse transformed content is scaled to based on Qp value.
63 *
64 * @param[in] pi2_src
65 *  input 4x4 block of DC coefficients
66 *
67 * @param[out] pi2_out
68 *  output 4x4 block
69 *
70 * @param[in] pu2_iscal_mat
71 *  pointer to scaling list
72 *
73 * @param[in] pu2_weigh_mat
74 *  pointer to weight matrix
75 *
76 * @param[in] u4_qp_div_6
77 *  Floor (qp/6)
78 *
79 * @param[in] pi4_tmp
80 * temporary buffer of size 1*16
81 *
82 * @returns none
83 *
84 * @remarks none
85 *
86 *******************************************************************************
87 */
88void ih264_ihadamard_scaling_4x4_ssse3(WORD16* pi2_src,
89                                       WORD16* pi2_out,
90                                       const UWORD16 *pu2_iscal_mat,
91                                       const UWORD16 *pu2_weigh_mat,
92                                       UWORD32 u4_qp_div_6,
93                                       WORD32* pi4_tmp)
94{
95    int val = 0xFFFF;
96    __m128i src_r0_r1, src_r2_r3, sign_reg, zero_8x16b = _mm_setzero_si128();
97    __m128i src_r0, src_r1, src_r2, src_r3;
98    __m128i temp0, temp1, temp2, temp3;
99    __m128i add_rshift = _mm_set1_epi32((1 << (5 - u4_qp_div_6)));
100    __m128i mult_val = _mm_set1_epi32(pu2_iscal_mat[0] * pu2_weigh_mat[0]);
101
102    __m128i mask = _mm_set1_epi32(val);
103    UNUSED (pi4_tmp);
104
105    mult_val = _mm_and_si128(mult_val, mask);
106
107    src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src)); //a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
108    src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8)); //a20 a21 a22 a23 a30 a31 a32 a33 -- the source matrix 2nd,3rd row
109    sign_reg = _mm_cmpgt_epi16(zero_8x16b, src_r0_r1);
110    src_r0 = _mm_unpacklo_epi16(src_r0_r1, sign_reg);
111    src_r1 = _mm_unpackhi_epi16(src_r0_r1, sign_reg);
112    sign_reg = _mm_cmpgt_epi16(zero_8x16b, src_r2_r3);
113    src_r2 = _mm_unpacklo_epi16(src_r2_r3, sign_reg);
114    src_r3 = _mm_unpackhi_epi16(src_r2_r3, sign_reg);
115
116    /* Perform Inverse transform */
117    /*-------------------------------------------------------------*/
118    /* IDCT [ Horizontal transformation ]                          */
119    /*-------------------------------------------------------------*/
120    // Matrix transpose
121    /*
122     *  a0 a1 a2 a3
123     *  b0 b1 b2 b3
124     *  c0 c1 c2 c3
125     *  d0 d1 d2 d3
126     */
127    temp0 = _mm_unpacklo_epi32(src_r0, src_r1);                  //a0 b0 a1 b1
128    temp2 = _mm_unpacklo_epi32(src_r2, src_r3);                  //c0 d0 c1 d1
129    temp1 = _mm_unpackhi_epi32(src_r0, src_r1);                  //a2 b2 a3 b3
130    temp3 = _mm_unpackhi_epi32(src_r2, src_r3);                  //c2 d2 c3 d3
131    src_r0 = _mm_unpacklo_epi64(temp0, temp2);                    //a0 b0 c0 d0
132    src_r1 = _mm_unpackhi_epi64(temp0, temp2);                    //a1 b1 c1 d1
133    src_r2 = _mm_unpacklo_epi64(temp1, temp3);                    //a2 b2 c2 d2
134    src_r3 = _mm_unpackhi_epi64(temp1, temp3);                    //a3 b3 c3 d3
135
136    temp0 = _mm_add_epi32(src_r0, src_r3);
137    temp1 = _mm_add_epi32(src_r1, src_r2);
138    temp2 = _mm_sub_epi32(src_r1, src_r2);
139    temp3 = _mm_sub_epi32(src_r0, src_r3);
140
141    src_r0 = _mm_add_epi32(temp0, temp1);
142    src_r1 = _mm_add_epi32(temp2, temp3);
143    src_r2 = _mm_sub_epi32(temp0, temp1);
144    src_r3 = _mm_sub_epi32(temp3, temp2);
145
146    /*-------------------------------------------------------------*/
147    /* IDCT [ Vertical transformation ]                          */
148    /*-------------------------------------------------------------*/
149    // Matrix transpose
150    /*
151     *  a0 b0 c0 d0
152     *  a1 b1 c1 d1
153     *  a2 b2 c2 d2
154     *  a3 b3 c3 d3
155     */
156    temp0 = _mm_unpacklo_epi32(src_r0, src_r1);                  //a0 a1 b0 b1
157    temp2 = _mm_unpacklo_epi32(src_r2, src_r3);                  //a2 a3 b2 b3
158    temp1 = _mm_unpackhi_epi32(src_r0, src_r1);                  //c0 c1 d0 d1
159    temp3 = _mm_unpackhi_epi32(src_r2, src_r3);                  //c2 c3 d2 d3
160    src_r0 = _mm_unpacklo_epi64(temp0, temp2);                   //a0 a1 a2 a3
161    src_r1 = _mm_unpackhi_epi64(temp0, temp2);                   //b0 b1 b2 b3
162    src_r2 = _mm_unpacklo_epi64(temp1, temp3);                   //c0 c1 c2 c3
163    src_r3 = _mm_unpackhi_epi64(temp1, temp3);                   //d0 d1 d2 d3
164
165    temp0 = _mm_add_epi32(src_r0, src_r3);
166    temp1 = _mm_add_epi32(src_r1, src_r2);
167    temp2 = _mm_sub_epi32(src_r1, src_r2);
168    temp3 = _mm_sub_epi32(src_r0, src_r3);
169
170    src_r0 = _mm_add_epi32(temp0, temp1);
171    src_r1 = _mm_add_epi32(temp2, temp3);
172    src_r2 = _mm_sub_epi32(temp0, temp1);
173    src_r3 = _mm_sub_epi32(temp3, temp2);
174
175    src_r0 = _mm_and_si128(src_r0, mask);
176    src_r1 = _mm_and_si128(src_r1, mask);
177    src_r2 = _mm_and_si128(src_r2, mask);
178    src_r3 = _mm_and_si128(src_r3, mask);
179
180    src_r0 = _mm_madd_epi16(src_r0, mult_val);
181    src_r1 = _mm_madd_epi16(src_r1, mult_val);
182    src_r2 = _mm_madd_epi16(src_r2, mult_val);
183    src_r3 = _mm_madd_epi16(src_r3, mult_val);
184
185    //Scaling
186    if(u4_qp_div_6 >= 6)
187    {
188        src_r0 = _mm_slli_epi32(src_r0, u4_qp_div_6 - 6);
189        src_r1 = _mm_slli_epi32(src_r1, u4_qp_div_6 - 6);
190        src_r2 = _mm_slli_epi32(src_r2, u4_qp_div_6 - 6);
191        src_r3 = _mm_slli_epi32(src_r3, u4_qp_div_6 - 6);
192    }
193    else
194    {
195        temp0 = _mm_add_epi32(src_r0, add_rshift);
196        temp1 = _mm_add_epi32(src_r1, add_rshift);
197        temp2 = _mm_add_epi32(src_r2, add_rshift);
198        temp3 = _mm_add_epi32(src_r3, add_rshift);
199        src_r0 = _mm_srai_epi32(temp0, 6 - u4_qp_div_6);
200        src_r1 = _mm_srai_epi32(temp1, 6 - u4_qp_div_6);
201        src_r2 = _mm_srai_epi32(temp2, 6 - u4_qp_div_6);
202        src_r3 = _mm_srai_epi32(temp3, 6 - u4_qp_div_6);
203    }
204    src_r0_r1 = _mm_packs_epi32(src_r0, src_r1);
205    src_r2_r3 = _mm_packs_epi32(src_r2, src_r3);
206
207    _mm_storeu_si128((__m128i *) (&pi2_out[0]), src_r0_r1);
208    _mm_storeu_si128((__m128i *) (&pi2_out[8]), src_r2_r3);
209}
210