1/******************************************************************************
2 *
3 * Copyright (C) 2015 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 *****************************************************************************
18 * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
19*/
20/**
21 *******************************************************************************
22 * @file
23 *  ih264_iquant_itrans_recon_ssse3.c
24 *
25 * @brief
26 *  Contains function definitions for inverse  quantization, inverse
27 * transform and reconstruction
28 *
29 * @author
30 *  Mohit [100664]
31 *
32 * @par List of Functions:
33 *  - ih264_iquant_itrans_recon_4x4_ssse3()
34 *  - ih264_iquant_itrans_recon_8x8_ssse3()
35 *
36 * @remarks
37 *  None
38 *
39 *******************************************************************************
40 */
41/* User include files */
42#include "ih264_typedefs.h"
43#include "ih264_defs.h"
44#include "ih264_trans_macros.h"
45#include "ih264_macros.h"
46#include "ih264_platform_macros.h"
47#include "ih264_trans_data.h"
48#include "ih264_size_defs.h"
49#include "ih264_structs.h"
50#include "ih264_trans_quant_itrans_iquant.h"
51#include <immintrin.h>
52
53/*
54 ********************************************************************************
55 *
56 * @brief This function reconstructs a 4x4 sub block from quantized resiude and
57 * prediction buffer
58 *
59 * @par Description:
60 *  The quantized residue is first inverse quantized, then inverse transformed.
61 *  This inverse transformed content is added to the prediction buffer to recon-
62 *  struct the end output
63 *
64 * @param[in] pi2_src
65 *  quantized 4x4 block
66 *
67 * @param[in] pu1_pred
68 *  prediction 4x4 block
69 *
70 * @param[out] pu1_out
71 *  reconstructed 4x4 block
72 *
73 * @param[in] src_strd
74 *  quantization buffer stride
75 *
76 * @param[in] pred_strd,
77 *  Prediction buffer stride
78 *
79 * @param[in] out_strd
80 *  recon buffer Stride
81 *
82 * @param[in] pu2_scaling_list
83 *  pointer to scaling list
84 *
85 * @param[in] pu2_norm_adjust
86 *  pointer to inverse scale matrix
87 *
88 * @param[in] u4_qp_div_6
89 *  Floor (qp/6)
90 *
91 * @param[in] pi4_tmp
92 * temporary buffer of size 1*16
93 *
94 * @returns none
95 *
96 * @remarks none
97 *
98 *******************************************************************************
99 */
100void ih264_iquant_itrans_recon_4x4_ssse3(WORD16 *pi2_src,
101                                         UWORD8 *pu1_pred,
102                                         UWORD8 *pu1_out,
103                                         WORD32 pred_strd,
104                                         WORD32 out_strd,
105                                         const UWORD16 *pu2_iscal_mat,
106                                         const UWORD16 *pu2_weigh_mat,
107                                         UWORD32 u4_qp_div_6,
108                                         WORD16 *pi2_tmp,
109                                         WORD32 iq_start_idx,
110                                         WORD16 *pi2_dc_ld_addr)
111{
112    UWORD32 *pu4_out = (UWORD32 *) pu1_out;
113    __m128i src_r0_r1, src_r2_r3;
114    __m128i src_r0, src_r1, src_r2, src_r3;
115    __m128i scalemat_r0_r1, scalemat_r2_r3, predload_r;
116    __m128i pred_r0, pred_r1, pred_r2, pred_r3;
117    __m128i sign_reg, dequant_r0_r1, dequant_r2_r3;
118    __m128i zero_8x16b = _mm_setzero_si128();          // all bits reset to zero
119    __m128i temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
120    __m128i resq_r0, resq_r1, resq_r2, resq_r3;
121    __m128i add_rshift = _mm_set1_epi32((1 << (3 - u4_qp_div_6)));
122    __m128i value_32 = _mm_set1_epi32(32);
123    UNUSED (pi2_tmp);
124    UNUSED (pi2_dc_ld_addr);
125
126    /*************************************************************/
127    /* Dequantization of coefficients. Will be replaced by SIMD  */
128    /* operations on platform                                    */
129    /*************************************************************/
130    src_r0_r1 = _mm_loadu_si128((__m128i *) (pi2_src)); //a00 a01 a02 a03 a10 a11 a12 a13 -- the source matrix 0th,1st row
131    src_r2_r3 = _mm_loadu_si128((__m128i *) (pi2_src + 8)); //a20 a21 a22 a23 a30 a31 a32 a33 -- the source matrix 2nd,3rd row
132    scalemat_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat)); //b00 b01 b02 b03 b10 b11 b12 b13 -- the scaling matrix 0th,1st row
133    scalemat_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_iscal_mat + 8)); //b20 b21 b22 b23 b30 b31 b32 b33 -- the scaling matrix 2nd,3rd row
134    dequant_r0_r1 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat)); //q00 q01 q02 q03 q10 q11 q12 q13 -- all 16 bits
135    dequant_r2_r3 = _mm_loadu_si128((__m128i *) (pu2_weigh_mat + 8)); //q20 q21 q22 q23 q30 q31 q32 q33 -- all 16 bits
136
137    temp0 = _mm_mullo_epi16(scalemat_r0_r1, dequant_r0_r1); //b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
138    temp1 = _mm_mullo_epi16(scalemat_r2_r3, dequant_r2_r3); //b00*q00 b01*q01 b02*q02 b03*q03 b10*q10 b11*q11 b12*q12 b13*q13 -- 16 bit result
139
140    temp4 = _mm_unpacklo_epi16(temp0, zero_8x16b); // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
141    temp5 = _mm_unpackhi_epi16(temp0, zero_8x16b); // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
142    temp6 = _mm_unpacklo_epi16(temp1, zero_8x16b); // b00*q00 0 b01*q01 0 b02*q02 0 b03*q03 0 -- 16 bit long
143    temp7 = _mm_unpackhi_epi16(temp1, zero_8x16b); // b10*q10 0 b11*q11 0 b12*q12 0 b13*q13 0 -- 16 bit long
144
145    src_r0 = _mm_unpacklo_epi16(src_r0_r1, zero_8x16b); // a00 0 a01 0 a02 0 a03 0 -- 16 bit long
146    src_r1 = _mm_unpackhi_epi16(src_r0_r1, zero_8x16b); // a10 0 a11 0 a12 0 a13 0 -- 16 bit long
147    src_r2 = _mm_unpacklo_epi16(src_r2_r3, zero_8x16b); // a20 0 a21 0 a22 0 a23 0 -- 16 bit long
148    src_r3 = _mm_unpackhi_epi16(src_r2_r3, zero_8x16b); // a30 0 a31 0 a32 0 a33 0 -- 16 bit long
149
150    temp4 = _mm_madd_epi16(src_r0, temp4); //a00*b00*q00 a10*b10*q10 a20*b20*q20 a30*b30 q30 -- 32 bits long
151    temp5 = _mm_madd_epi16(src_r1, temp5);
152    temp6 = _mm_madd_epi16(src_r2, temp6);
153    temp7 = _mm_madd_epi16(src_r3, temp7);
154
155    if (u4_qp_div_6 >= 4) {
156        resq_r0 = _mm_slli_epi32(temp4, u4_qp_div_6 - 4);
157        resq_r1 = _mm_slli_epi32(temp5, u4_qp_div_6 - 4);
158        resq_r2 = _mm_slli_epi32(temp6, u4_qp_div_6 - 4);
159        resq_r3 = _mm_slli_epi32(temp7, u4_qp_div_6 - 4);
160    } else {
161        temp4 = _mm_add_epi32(temp4, add_rshift);
162        temp5 = _mm_add_epi32(temp5, add_rshift);
163        temp6 = _mm_add_epi32(temp6, add_rshift);
164        temp7 = _mm_add_epi32(temp7, add_rshift);
165        resq_r0 = _mm_srai_epi32(temp4, 4 - u4_qp_div_6);
166        resq_r1 = _mm_srai_epi32(temp5, 4 - u4_qp_div_6);
167        resq_r2 = _mm_srai_epi32(temp6, 4 - u4_qp_div_6);
168        resq_r3 = _mm_srai_epi32(temp7, 4 - u4_qp_div_6);
169    }
170
171    if (iq_start_idx == 1)
172    {
173        resq_r0 = _mm_insert_epi16(resq_r0,(WORD32)pi2_src[0],0);
174        if (pi2_src[0] >= 0)
175            resq_r0 = _mm_insert_epi16(resq_r0,0,1);
176        else
177            resq_r0 = _mm_insert_epi16(resq_r0,-1,1);
178    }
179    /* Perform Inverse transform */
180    /*-------------------------------------------------------------*/
181    /* IDCT [ Horizontal transformation ]                          */
182    /*-------------------------------------------------------------*/
183    // Matrix transpose
184    /*
185     *  a0 a1 a2 a3
186     *  b0 b1 b2 b3
187     *  c0 c1 c2 c3
188     *  d0 d1 d2 d3
189     */
190    temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1);                  //a0 b0 a1 b1
191    temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3);                  //c0 d0 c1 d1
192    temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1);                  //a2 b2 a3 b3
193    temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3);                  //c2 d2 c3 d3
194    resq_r0 = _mm_unpacklo_epi64(temp1, temp3);                    //a0 b0 c0 d0
195    resq_r1 = _mm_unpackhi_epi64(temp1, temp3);                    //a1 b1 c1 d1
196    resq_r2 = _mm_unpacklo_epi64(temp2, temp4);                    //a2 b2 c2 d2
197    resq_r3 = _mm_unpackhi_epi64(temp2, temp4);                    //a3 b3 c3 d3
198    //Transform starts -- horizontal transform
199    /*------------------------------------------------------------------*/
200    /* z0 = w0 + w2                                             */
201    temp0 = _mm_add_epi32(resq_r0, resq_r2);
202    /* z1 = w0 - w2                                             */
203    temp1 = _mm_sub_epi32(resq_r0, resq_r2);
204    /* z2 = (w1 >> 1) - w3                                      */
205    temp2 = _mm_srai_epi32(resq_r1, 1);                         //(w1>>1)
206    temp2 = _mm_sub_epi32(temp2, resq_r3);                      //(w1>>1) - w3
207    /* z3 = w1 + (w3 >> 1)                                      */
208    temp3 = _mm_srai_epi32(resq_r3, 1);                         //(w3>>1) + w1
209    temp3 = _mm_add_epi32(temp3, resq_r1);
210    /*----------------------------------------------------------*/
211    /* x0 = z0 + z3                                             */
212    resq_r0 = _mm_add_epi32(temp0, temp3);
213    /* x1 = z1 + z2                                             */
214    resq_r1 = _mm_add_epi32(temp1, temp2);
215    /* x2 = z1 - z2                                             */
216    resq_r2 = _mm_sub_epi32(temp1, temp2);
217    /* x3 = z0 - z3                                             */
218    resq_r3 = _mm_sub_epi32(temp0, temp3);
219    // Matrix transpose
220    /*
221     *  a0 b0 c0 d0
222     *  a1 b1 c1 d1
223     *  a2 b2 c2 d2
224     *  a3 b3 c3 d3
225     */
226    temp1 = _mm_unpacklo_epi32(resq_r0, resq_r1);                  //a0 a1 b0 b1
227    temp3 = _mm_unpacklo_epi32(resq_r2, resq_r3);                  //a2 a3 b2 b3
228    temp2 = _mm_unpackhi_epi32(resq_r0, resq_r1);                  //c0 c1 d0 d1
229    temp4 = _mm_unpackhi_epi32(resq_r2, resq_r3);                  //c2 c3 d2 d3
230    resq_r0 = _mm_unpacklo_epi64(temp1, temp3);                    //a0 a1 a2 a3
231    resq_r1 = _mm_unpackhi_epi64(temp1, temp3);                    //b0 b1 b2 b3
232    resq_r2 = _mm_unpacklo_epi64(temp2, temp4);                    //c0 c1 c2 c3
233    resq_r3 = _mm_unpackhi_epi64(temp2, temp4);                    //d0 d1 d2 d3
234    //Transform ends -- horizontal transform
235
236    zero_8x16b = _mm_setzero_si128();                  // all bits reset to zero
237    //Load pred buffer
238    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[0])); //p00 p01 p02 p03 0 0 0 0 0 0 0 0 -- all 8 bits
239    pred_r0 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p00 p01 p02 p03 0 0 0 0 -- all 16 bits
240
241    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[pred_strd])); //p10 p11 p12 p13 0 0 0 0 0 0 0 0 -- all 8 bits
242    pred_r1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p10 p11 p12 p13 0 0 0 0 -- all 16 bits
243
244    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[2 * pred_strd])); //p20 p21 p22 p23 0 0 0 0 0 0 0 0 -- all 8 bits
245    pred_r2 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p20 p21 p22 p23 0 0 0 0 -- all 16 bits
246
247    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[3 * pred_strd])); //p30 p31 p32 p33 0 0 0 0 0 0 0 0 -- all 8 bits
248    pred_r3 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p30 p31 p32 p33 0 0 0 0 -- all 16 bits
249    pred_r0 = _mm_unpacklo_epi16(pred_r0, zero_8x16b); //p00 p01 p02 p03 -- 32 bits sign extended
250    pred_r1 = _mm_unpacklo_epi16(pred_r1, zero_8x16b); //p10 p11 p12 p13 -- 32 bits sign extended
251    pred_r2 = _mm_unpacklo_epi16(pred_r2, zero_8x16b); //p20 p21 p22 p23 -- 32 bits sign extended
252    pred_r3 = _mm_unpacklo_epi16(pred_r3, zero_8x16b); //p30 p31 p32 p33 -- 32 bits sign extended
253
254    /*--------------------------------------------------------------*/
255    /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6      */
256    /*                                                              */
257    /* Add the prediction and store it back to same buffer          */
258    /*--------------------------------------------------------------*/
259    /* z0j = y0j + y2j                                                        */
260    temp0 = _mm_add_epi32(resq_r0, resq_r2);
261    /* z1j = y0j - y2j                                                        */
262    temp1 = _mm_sub_epi32(resq_r0, resq_r2);
263    /* z2j = (y1j>>1) - y3j                                                        */
264    temp2 = _mm_srai_epi32(resq_r1, 1);                             //(y1j>>1)
265    temp2 = _mm_sub_epi32(temp2, resq_r3);
266    /* z3j = y1j + (y3j>>1)                                                        */
267    temp3 = _mm_srai_epi32(resq_r3, 1);                             //(y3j>>1)
268    temp3 = _mm_add_epi32(temp3, resq_r1);
269
270    /* x0j = z0j + z3j                                                        */
271    temp4 = _mm_add_epi32(temp0, temp3);
272    temp4 = _mm_add_epi32(temp4, value_32);
273    temp4 = _mm_srai_epi32(temp4, 6);
274    temp4 = _mm_add_epi32(temp4, pred_r0);
275    /* x1j = z1j + z2j                                                        */
276    temp5 = _mm_add_epi32(temp1, temp2);
277    temp5 = _mm_add_epi32(temp5, value_32);
278    temp5 = _mm_srai_epi32(temp5, 6);
279    temp5 = _mm_add_epi32(temp5, pred_r1);
280    /* x2j = z1j - z2j                                                        */
281    temp6 = _mm_sub_epi32(temp1, temp2);
282    temp6 = _mm_add_epi32(temp6, value_32);
283    temp6 = _mm_srai_epi32(temp6, 6);
284    temp6 = _mm_add_epi32(temp6, pred_r2);
285    /* x3j = z0j - z3j                                                        */
286    temp7 = _mm_sub_epi32(temp0, temp3);
287    temp7 = _mm_add_epi32(temp7, value_32);
288    temp7 = _mm_srai_epi32(temp7, 6);
289    temp7 = _mm_add_epi32(temp7, pred_r3);
290
291    // 32-bit to 16-bit conversion
292    temp0 = _mm_packs_epi32(temp4, temp5);
293    temp1 = _mm_packs_epi32(temp6, temp7);
294    /*------------------------------------------------------------------*/
295    //Clipping the results to 8 bits
296    sign_reg = _mm_cmpgt_epi16(temp0, zero_8x16b);      // sign check
297    temp0 = _mm_and_si128(temp0, sign_reg);
298    sign_reg = _mm_cmpgt_epi16(temp1, zero_8x16b);
299    temp1 = _mm_and_si128(temp1, sign_reg);
300
301    resq_r0 = _mm_packus_epi16(temp0, temp1);
302    resq_r1 = _mm_srli_si128(resq_r0, 4);
303    resq_r2 = _mm_srli_si128(resq_r1, 4);
304    resq_r3 = _mm_srli_si128(resq_r2, 4);
305
306    *pu4_out = _mm_cvtsi128_si32(resq_r0);
307    pu1_out += out_strd;
308    pu4_out = (UWORD32 *) (pu1_out);
309    *(pu4_out) = _mm_cvtsi128_si32(resq_r1);
310    pu1_out += out_strd;
311    pu4_out = (UWORD32 *) (pu1_out);
312    *(pu4_out) = _mm_cvtsi128_si32(resq_r2);
313    pu1_out += out_strd;
314    pu4_out = (UWORD32 *) (pu1_out);
315    *(pu4_out) = _mm_cvtsi128_si32(resq_r3);
316}
317/**
318 *******************************************************************************
319 *
320 * @brief
321 *  This function performs inverse quant and Inverse transform type Ci4 for 8x8 block
322 *
323 * @par Description:
324 *  Performs inverse transform Ci8 and adds the residue to get the
325 *  reconstructed block
326 *
327 * @param[in] pi2_src
328 *  Input 8x8coefficients
329 *
330 * @param[in] pu1_pred
331 *  Prediction 8x8 block
332 *
333 * @param[out] pu1_recon
334 *  Output 8x8 block
335 *
336 * @param[in] q_div
337 *  QP/6
338 *
339 * @param[in] q_rem
340 *  QP%6
341 *
342 * @param[in] q_lev
343 *  Quantizer level
344 *
345 * @param[in] u4_src_stride
346 *  Input stride
347 *
348 * @param[in] u4_pred_stride,
349 *  Prediction stride
350 *
351 * @param[in] u4_out_stride
352 *  Output Stride
353 *
354 * @param[in] pi4_tmp
355 *  temporary buffer of size 1*64
356 *  the tmp for each block
357 *
358 * @param[in] pu4_iquant_mat
359 *  Pointer to the inverse quantization matrix
360 *
361 * @returns  Void
362 *
363 * @remarks
364 *  None
365 *
366 *******************************************************************************
367 */
368
369void ih264_iquant_itrans_recon_8x8_ssse3(WORD16 *pi2_src,
370                                         UWORD8 *pu1_pred,
371                                         UWORD8 *pu1_out,
372                                         WORD32 pred_strd,
373                                         WORD32 out_strd,
374                                         const UWORD16 *pu2_iscale_mat,
375                                         const UWORD16 *pu2_weigh_mat,
376                                         UWORD32 qp_div,
377                                         WORD16 *pi2_tmp,
378                                         WORD32 iq_start_idx,
379                                         WORD16 *pi2_dc_ld_addr)
380{
381    __m128i src_r0;
382    __m128i scalemat_r0;
383    __m128i zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
384    // __m128i one_8x16b = _mm_set1_epi8(255); // all bits set to 1
385    // __m128i one_zero_mask = _mm_unpacklo_epi16(one_8x16b, zero_8x16b); // 1 0 1 0 1 0 1 0 --- 16 bits size
386    __m128i value_32 = _mm_set1_epi32(32);
387    __m128i add_rshift = _mm_set1_epi32((1 << (5 - qp_div)));
388    __m128i dequant_r0;
389    __m128i predload_r;
390    __m128i pred_r0_1, pred_r1_1, pred_r2_1, pred_r3_1, pred_r4_1, pred_r5_1,
391            pred_r6_1, pred_r7_1;
392    __m128i sign_reg;
393    __m128i src_r0_1, src_r0_2;
394    __m128i scalemat_r0_1, scalemat_r0_2;
395    __m128i temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8;
396    __m128i temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17,
397            temp18, temp19, temp20;
398    // To store dequantization results
399    __m128i resq_r0_1, resq_r0_2, resq_r1_1, resq_r1_2, resq_r2_1, resq_r2_2,
400            resq_r3_1, resq_r3_2, resq_r4_1, resq_r4_2, resq_r5_1, resq_r5_2,
401            resq_r6_1, resq_r6_2, resq_r7_1, resq_r7_2;
402    UNUSED (pi2_tmp);
403    UNUSED (iq_start_idx);
404    UNUSED (pi2_dc_ld_addr);
405
406    /*************************************************************/
407    /* Dequantization of coefficients. Will be replaced by SIMD  */
408    /* operations on platform. Note : DC coeff is not scaled     */
409    /*************************************************************/
410
411    // Row 0 processing
412    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src)); //a00 a01 a02 a03 a04 a05 a06 a07 -- the source matrix 0th row
413    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat)); //b00 b01 b02 b03 b04 b05 b06 b07 -- the scaling matrix 0th row
414    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[0])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
415    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
416    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
417    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
418    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
419    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
420
421    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
422    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
423
424    if (qp_div >= 6) {
425        resq_r0_1 = _mm_slli_epi32(temp5, qp_div - 6);
426        resq_r0_2 = _mm_slli_epi32(temp7, qp_div - 6);
427    } else {
428        temp5 = _mm_add_epi32(temp5, add_rshift);
429        temp7 = _mm_add_epi32(temp7, add_rshift);
430        resq_r0_1 = _mm_srai_epi32(temp5, 6 - qp_div);
431        resq_r0_2 = _mm_srai_epi32(temp7, 6 - qp_div);
432    }
433    resq_r0_1 = _mm_packs_epi32(resq_r0_1, resq_r0_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
434    // Row 1 processing
435    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 8)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 1st row
436    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 8)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 1st row
437    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[8])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
438    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
439    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
440    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
441    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
442    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
443    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
444    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
445    if (qp_div >= 6) {
446        resq_r1_1 = _mm_slli_epi32(temp5, qp_div - 6);
447        resq_r1_2 = _mm_slli_epi32(temp7, qp_div - 6);
448    } else {
449        temp5 = _mm_add_epi32(temp5, add_rshift);
450        temp7 = _mm_add_epi32(temp7, add_rshift);
451        resq_r1_1 = _mm_srai_epi32(temp5, 6 - qp_div);
452        resq_r1_2 = _mm_srai_epi32(temp7, 6 - qp_div);
453    }
454    resq_r1_1 = _mm_packs_epi32(resq_r1_1, resq_r1_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
455    // Row 2 processing
456    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 16)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 2nd row
457    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 16)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 2nd row
458    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[16])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
459    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
460    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
461    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
462    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
463    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
464    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
465    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
466    if (qp_div >= 6) {
467        resq_r2_1 = _mm_slli_epi32(temp5, qp_div - 6);
468        resq_r2_2 = _mm_slli_epi32(temp7, qp_div - 6);
469    } else {
470        temp5 = _mm_add_epi32(temp5, add_rshift);
471        temp7 = _mm_add_epi32(temp7, add_rshift);
472        resq_r2_1 = _mm_srai_epi32(temp5, 6 - qp_div);
473        resq_r2_2 = _mm_srai_epi32(temp7, 6 - qp_div);
474    }
475    resq_r2_1 = _mm_packs_epi32(resq_r2_1, resq_r2_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
476    // Row 3 processing
477    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 24)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 3rd row
478    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 24)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 3rd row
479    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[24])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
480    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
481    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
482    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
483    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
484    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
485    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 - 32 bits long
486    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
487    if (qp_div >= 6) {
488        resq_r3_1 = _mm_slli_epi32(temp5, qp_div - 6);
489        resq_r3_2 = _mm_slli_epi32(temp7, qp_div - 6);
490    } else {
491        temp5 = _mm_add_epi32(temp5, add_rshift);
492        temp7 = _mm_add_epi32(temp7, add_rshift);
493        resq_r3_1 = _mm_srai_epi32(temp5, 6 - qp_div);
494        resq_r3_2 = _mm_srai_epi32(temp7, 6 - qp_div);
495    }
496    resq_r3_1 = _mm_packs_epi32(resq_r3_1, resq_r3_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
497    // Row 4 processing
498    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 32)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 4th row
499    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 32)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 4th row
500    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[32])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
501    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
502    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
503    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
504    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
505    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
506    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
507    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
508    if (qp_div >= 6) {
509        resq_r4_1 = _mm_slli_epi32(temp5, qp_div - 6);
510        resq_r4_2 = _mm_slli_epi32(temp7, qp_div - 6);
511
512    } else {
513        temp5 = _mm_add_epi32(temp5, add_rshift);
514        temp7 = _mm_add_epi32(temp7, add_rshift);
515        resq_r4_1 = _mm_srai_epi32(temp5, 6 - qp_div);
516        resq_r4_2 = _mm_srai_epi32(temp7, 6 - qp_div);
517    }
518    resq_r4_1 = _mm_packs_epi32(resq_r4_1, resq_r4_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
519    // Row 5 processing
520    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 40)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 5th row
521    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 40)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 5th row
522    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[40])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
523    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
524    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
525    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
526    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
527    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
528    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
529    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
530    if (qp_div >= 6) {
531        resq_r5_1 = _mm_slli_epi32(temp5, qp_div - 6);
532        resq_r5_2 = _mm_slli_epi32(temp7, qp_div - 6);
533        //resq_r5_1 = _mm_and_si128(resq_r5_1,one_zero_mask);
534        //resq_r5_2 = _mm_and_si128(resq_r5_2,one_zero_mask);
535    } else {
536        temp5 = _mm_add_epi32(temp5, add_rshift);
537        temp7 = _mm_add_epi32(temp7, add_rshift);
538        resq_r5_1 = _mm_srai_epi32(temp5, 6 - qp_div);
539        resq_r5_2 = _mm_srai_epi32(temp7, 6 - qp_div);
540    }
541    resq_r5_1 = _mm_packs_epi32(resq_r5_1, resq_r5_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
542    // Row 6 processing
543    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 48)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 6th row
544    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 48)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 6th row
545    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[48])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
546    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
547    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
548    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
549    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
550    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
551    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
552    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
553    if (qp_div >= 6) {
554        resq_r6_1 = _mm_slli_epi32(temp5, qp_div - 6);
555        resq_r6_2 = _mm_slli_epi32(temp7, qp_div - 6);
556        //resq_r6_1 = _mm_and_si128(resq_r6_1,one_zero_mask);
557        //resq_r6_2 = _mm_and_si128(resq_r6_2,one_zero_mask);
558    } else {
559        temp5 = _mm_add_epi32(temp5, add_rshift);
560        temp7 = _mm_add_epi32(temp7, add_rshift);
561        resq_r6_1 = _mm_srai_epi32(temp5, 6 - qp_div);
562        resq_r6_2 = _mm_srai_epi32(temp7, 6 - qp_div);
563        //resq_r6_1 = _mm_and_si128(resq_r6_1,one_zero_mask);
564        //resq_r6_2 = _mm_and_si128(resq_r6_2,one_zero_mask);
565    }
566    resq_r6_1 = _mm_packs_epi32(resq_r6_1, resq_r6_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
567    // Row 7 processing
568    src_r0 = _mm_loadu_si128((__m128i *) (pi2_src + 56)); //a00 a01 a02 a03 a04 a05 a06 a07 a08 -- the source matrix 7th row
569    scalemat_r0 = _mm_loadu_si128((__m128i *) (pu2_iscale_mat + 56)); //b00 b01 b02 b03 b04 b05 b06 b07 b08 -- the scaling matrix 7th row
570    dequant_r0 = _mm_loadu_si128((__m128i *) (&pu2_weigh_mat[56])); //q0 q1 q2 q3 q4 q5 q6 q7 -- all 16 bits
571    src_r0_1 = _mm_unpacklo_epi16(src_r0, zero_8x16b); //a00 0 a01 0 a02 0 a03 0 -- 16 bit long
572    src_r0_2 = _mm_unpackhi_epi16(src_r0, zero_8x16b); // a04 0 a05 0 a06 0 a07 0 -- 16 bit long
573    temp10 = _mm_mullo_epi16(scalemat_r0, dequant_r0); //b00*q0 b01*q1 b02*q2 b03*q3 b04*q4 b05*q5 b06*q6 b07*q7 -- 16 bit result
574    scalemat_r0_1 = _mm_unpacklo_epi16(temp10, zero_8x16b); // b00*q0 0 b01*q1 0 b02*q2 0 b03*q3 0 -- 16 bit long
575    scalemat_r0_2 = _mm_unpackhi_epi16(temp10, zero_8x16b); // b04*q4 0 b05*q5 0 b06*q6 0 b07*q7 0 -- 16 bit long
576    temp5 = _mm_madd_epi16(src_r0_1, scalemat_r0_1); // a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 -- 32 bits long
577    temp7 = _mm_madd_epi16(src_r0_2, scalemat_r0_2); // a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 32 bits long
578    if (qp_div >= 6) {
579        resq_r7_1 = _mm_slli_epi32(temp5, qp_div - 6);
580        resq_r7_2 = _mm_slli_epi32(temp7, qp_div - 6);
581    } else {
582        temp5 = _mm_add_epi32(temp5, add_rshift);
583        temp7 = _mm_add_epi32(temp7, add_rshift);
584        resq_r7_1 = _mm_srai_epi32(temp5, 6 - qp_div);
585        resq_r7_2 = _mm_srai_epi32(temp7, 6 - qp_div);
586    }
587    resq_r7_1 = _mm_packs_epi32(resq_r7_1, resq_r7_2); //a00*b00*q0 a01*b01*q1 a02*b02*q2 a03*b03*q3 a04*b04*q4 a05*b05*q5 a06*b06*q6 a07*b07*q7 -- 16 bit long
588    /* Perform Inverse transform */
589    /*--------------------------------------------------------------------*/
590    /* IDCT [ Horizontal transformation ]                                 */
591    /*--------------------------------------------------------------------*/
592    // Matrix transpose
593    /*
594     *  a0 a1 a2 a3 a4 a5 a6 a7
595     *  b0 b1 b2 b3 b4 b5 b6 b7
596     *  c0 c1 c2 c3 c4 c5 c6 c7
597     *  d0 d1 d2 d3 d4 d5 d6 d7
598     */
599    temp1 = _mm_unpacklo_epi16(resq_r0_1, resq_r1_1); //a0 b0 a1 b1 a2 b2 a3 b3
600    temp3 = _mm_unpacklo_epi16(resq_r2_1, resq_r3_1); //c0 d0 c1 d1 c2 d2 c3 d3
601    temp2 = _mm_unpackhi_epi16(resq_r0_1, resq_r1_1); //a4 b4 a5 b5 a6 b6 a7 b7
602    temp4 = _mm_unpackhi_epi16(resq_r2_1, resq_r3_1); //c4 d4 c5 d5 c6 d6 c7 d7
603    resq_r0_1 = _mm_unpacklo_epi32(temp1, temp3); //a0 b0 c0 d0 a1 b1 c1 d1
604    resq_r1_1 = _mm_unpackhi_epi32(temp1, temp3); //a2 b2 c2 d2 a3 b3 c3 d3
605    resq_r2_1 = _mm_unpacklo_epi32(temp2, temp4); //a4 b4 c4 d4 a5 b5 c5 d5
606    resq_r3_1 = _mm_unpackhi_epi32(temp2, temp4); //a6 b6 c6 d6 a7 b7 c7 d7
607    /*
608     * e0 e1 e2 e3 e4 e5 e6 e7
609     * f0 f1 f2 f3 f4 f5 f6 f7
610     * g0 g1 g2 g3 g4 g5 g6 g7
611     * h0 h1 h2 h3 h4 h5 h6 h7
612     */
613    temp1 = _mm_unpacklo_epi16(resq_r4_1, resq_r5_1); //e0 f0 e1 f1 e2 f2 e2 f3
614    temp3 = _mm_unpacklo_epi16(resq_r6_1, resq_r7_1); //g0 h0 g1 h1 g2 h2 g3 h3
615    temp2 = _mm_unpackhi_epi16(resq_r4_1, resq_r5_1); //e4 f4 e5 f5 e6 f6 e7 f7
616    temp4 = _mm_unpackhi_epi16(resq_r6_1, resq_r7_1); //g4 h4 g5 h5 g6 h6 g7 h7
617    resq_r4_1 = _mm_unpacklo_epi32(temp1, temp3); //e0 f0 g0 h0 e1 f1 g1 h1
618    resq_r5_1 = _mm_unpackhi_epi32(temp1, temp3); //e2 f2 g2 h2 e3 f3 g3 h3
619    resq_r6_1 = _mm_unpacklo_epi32(temp2, temp4); //e4 f4 g4 h4 e5 f5 g5 h5
620    resq_r7_1 = _mm_unpackhi_epi32(temp2, temp4); //e6 f6 g6 h6 e7 f7 g7 h7
621    /*
622     * a0 b0 c0 d0 a1 b1 c1 d1
623     * a2 b2 c2 d2 a3 b3 c3 d3
624     * a4 b4 c4 d4 a5 b5 c5 d5
625     * a6 b6 c6 d6 a7 b7 c7 d7
626     * e0 f0 g0 h0 e1 f1 g1 h1
627     * e2 f2 g2 h2 e3 f3 g3 h3
628     * e4 f4 g4 h4 e5 f5 g5 h5
629     * e6 f6 g6 h6 e7 f7 g7 h7
630     */
631    resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); //a0 b0 c0 d0 e0 f0 g0 h0
632    resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); //a1 b1 c1 d1 e1 f1 g1 h1
633    resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); //a2 b2 c2 d2 e2 f2 g2 h2
634    resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); //a3 b3 c3 d3 e3 f3 g3 h3
635    resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); //a4 b4 c4 d4 e4 f4 g4 h4
636    resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); //a5 b5 c5 d5 e5 f5 g5 h5
637    resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); //a6 b6 c6 d6 e6 f6 g6 h6
638    resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); //a7 b7 c7 d7 e7 f7 g7 h7
639
640    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
641    resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); //a1 b1 c1 d1 -- 32 bit
642    resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); //e1 f1 g1 h1 -- 32 bit
643    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
644    resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); //a3 b3 c3 d3 -- 32 bit
645    resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); //e3 f3 g3 h3 -- 32 bit
646    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
647    resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); //a5 b5 c5 d5 -- 32 bit
648    resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); //e5 f5 g5 h5 -- 32 bit
649    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
650    resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); //a7 b7 c7 d7 -- 32 bit
651    resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); //e7 f7 g7 h7 -- 32 bit
652    //Transform starts -- horizontal transform
653    /*------------------------------------------------------------------*/
654    /* y0 = w0 + w4                                                     */
655    temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
656    /* y2 = w0 - w4                                                      */
657    temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
658    /* y1 = -w3 + w5 - w7 - (w7 >> 1)                                   */
659    temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
660    temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
661    temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
662    temp12 = _mm_sub_epi32(temp10, resq_r7_2);
663    temp5 = _mm_srai_epi32(resq_r7_1, 1); //w7>>1
664    temp13 = _mm_srai_epi32(resq_r7_2, 1);
665    temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
666    temp10 = _mm_sub_epi32(temp12, temp13);
667    temp2 = _mm_packs_epi32(temp2, temp10);
668    /* y3 = w1 + w7 - w3 - (w3 >> 1)                                    */
669    temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); //w1+w7
670    temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
671    temp4 = _mm_sub_epi32(temp4, resq_r3_1); //w1+w7-w3
672    temp12 = _mm_sub_epi32(temp12, resq_r3_2);
673    temp5 = _mm_srai_epi32(resq_r3_1, 1); //w3>>1
674    temp13 = _mm_srai_epi32(resq_r3_2, 1);
675    temp4 = _mm_sub_epi32(temp4, temp5); //w1+w7-w3-(w3>>1)
676    temp12 = _mm_sub_epi32(temp12, temp13);
677    temp4 = _mm_packs_epi32(temp4, temp12);
678    /* y4 = (w2 >> 1) - w6                                              */
679    temp5 = _mm_srai_epi16(resq_r2_2, 1); //w2>>1
680    temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
681    /* y5 = -w1 + w7 + w5 + (w5 >> 1)                                   */
682    temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); //w7-w1
683    temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
684    temp6 = _mm_add_epi32(temp6, resq_r5_1); //w7-w1+w5
685    temp14 = _mm_add_epi32(temp14, resq_r5_2);
686    temp7 = _mm_srai_epi32(resq_r5_1, 1); //w5>>1
687    temp15 = _mm_srai_epi32(resq_r5_2, 1);
688    temp6 = _mm_add_epi32(temp6, temp7); //w7-w1_w5+(w5>>1)
689    temp14 = _mm_add_epi32(temp14, temp15);
690    temp6 = _mm_packs_epi32(temp6, temp14);
691    /* y6 = w2 + (w6 >> 1)                                              */
692    temp7 = _mm_srai_epi16(resq_r6_2, 1); //w6>>1
693    temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
694    /* y7 = w3 + w5 + w1 + (w1 >> 1)                                    */
695    temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); //w3+w5
696    temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
697    temp8 = _mm_add_epi32(temp8, resq_r1_1); //w3+w5+w1
698    temp16 = _mm_add_epi32(temp16, resq_r1_2);
699    temp17 = _mm_srai_epi32(resq_r1_1, 1); //w1>>1
700    temp18 = _mm_srai_epi32(resq_r1_2, 1);
701    temp8 = _mm_add_epi32(temp8, temp17); //w3+w5+w1+(w1>>1)
702    temp16 = _mm_add_epi32(temp16, temp18);
703    temp8 = _mm_packs_epi32(temp8, temp16);
704    /*------------------------------------------------------------------*/
705    /*------------------------------------------------------------------*/
706    /* z0 = y0 + y6                                                        */
707    resq_r0_1 = _mm_add_epi16(temp1, temp7);
708    /* z1 = y1 + (y7 >> 2)                                                */
709    resq_r1_1 = _mm_srai_epi16(temp8, 2);
710    resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
711    /* z2 = y2 + y4                                                        */
712    resq_r2_1 = _mm_add_epi16(temp3, temp5);
713    /* z3 = y3 + (y5 >> 2)                                                */
714    resq_r3_1 = _mm_srai_epi16(temp6, 2);
715    resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
716    /* z4 = y2 - y4                                                        */
717    resq_r4_1 = _mm_sub_epi16(temp3, temp5);
718    /* z5 = (y3 >> 2) - y5                                                 */
719    resq_r5_1 = _mm_srai_epi16(temp4, 2);
720    resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
721    /* z6 = y0 - y6                                                     */
722    resq_r6_1 = _mm_sub_epi16(temp1, temp7);
723    /* z7 = y7 - (y1 >> 2)                                                 */
724    resq_r7_1 = _mm_srai_epi16(temp2, 2);
725    resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
726    /*------------------------------------------------------------------*/
727    /*------------------------------------------------------------------*/
728    /* x0 = z0 + z7                                                        */
729    temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
730    /* x1 = z2 + z5                                                        */
731    temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
732    /* x2 = z4 + z3                                                        */
733    temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
734    /* x3 = z6 + z1                                                        */
735    temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
736    /* x4 = z6 - z1                                                        */
737    temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
738    /* x5 = z4 - z3                                                        */
739    temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
740    /* x6 = z2 - z5                                                        */
741    temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
742    /* x7 = z0 - z7                                                        */
743    temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
744    /*------------------------------------------------------------------*/
745    // Matrix transpose
746    /*
747     *  a0 b0 c0 d0 e0 f0 g0 h0
748     *  a1 b1 c1 d1 e1 f1 g1 h1
749     *  a2 b2 c2 d2 e2 f2 g2 h2
750     *  a3 b3 c3 d3 e3 f3 g3 h3
751     */
752    temp17 = _mm_unpacklo_epi16(temp1, temp2); //a0 a1 b0 b1 c0 c1 d0 d1
753    temp19 = _mm_unpacklo_epi16(temp3, temp4); //a2 a3 b2 b3 c2 c3 d2 d3
754    temp18 = _mm_unpackhi_epi16(temp1, temp2); //e0 e1 f0 f1 g0 g1 h0 h1
755    temp20 = _mm_unpackhi_epi16(temp3, temp4); //e2 e3 f2 f3 g2 g3 h2 h3
756
757    resq_r0_1 = _mm_unpacklo_epi32(temp17, temp19); //a0 a1 a2 a3 b0 b1 b2 b3
758    resq_r1_1 = _mm_unpackhi_epi32(temp17, temp19); //c0 c1 c2 c3 d0 d1 d2 d3
759    resq_r2_1 = _mm_unpacklo_epi32(temp18, temp20); //e0 e1 e2 e3 f0 f1 f2 f3
760    resq_r3_1 = _mm_unpackhi_epi32(temp18, temp20); //g0 g2 g2 g3 h0 h1 h2 h3
761    /*
762     *  a4 b4 c4 d4 e4 f4 g4 h4
763     *  a5 b5 c5 d5 e5 f5 g5 h5
764     *  a6 b6 c6 d6 e6 f6 g6 h6
765     *  a7 b7 c7 d7 e7 f7 g7 h7
766     */
767    temp17 = _mm_unpacklo_epi16(temp5, temp6); //a4 a5 b4 b5 c4 c5 d4 d5
768    temp19 = _mm_unpacklo_epi16(temp7, temp8); //a6 a7 b6 b7 c6 c7 d6 d7
769    temp18 = _mm_unpackhi_epi16(temp5, temp6); //e4 e5 f4 f5 g4 g5 h4 h5
770    temp20 = _mm_unpackhi_epi16(temp7, temp8); //e6 e7 f6 f7 g6 g7 h6 h7
771
772    resq_r4_1 = _mm_unpacklo_epi32(temp17, temp19); //a4 a5 a6 a7 b4 b5 b6 b7
773    resq_r5_1 = _mm_unpackhi_epi32(temp17, temp19); //c4 c5 c6 c7 d4 d5 d6 d7
774    resq_r6_1 = _mm_unpacklo_epi32(temp18, temp20); //e4 e5 e6 e7 f4 f5 f6 f7
775    resq_r7_1 = _mm_unpackhi_epi32(temp18, temp20); //g4 g5 g6 g7 h4 h5 h6 h7
776    /*  a0 a1 a2 a3 b0 b1 b2 b3
777     *  c0 c1 c2 c3 d0 d1 d2 d3
778     *  e0 e1 e2 e3 f0 f1 f2 f3
779     *  g0 g2 g2 g3 h0 h1 h2 h3
780     *  a4 a5 a6 a7 b4 b5 b6 b7
781     *  c4 c5 c6 c7 d4 d5 d6 d7
782     *  e4 e5 e6 e7 f4 f5 f6 f7
783     *  g4 g5 g6 g7 h4 h5 h6 h7
784     */
785    resq_r0_2 = _mm_unpacklo_epi64(resq_r0_1, resq_r4_1); //a0 a1 a2 a3 a4 a5 a6 a7
786    resq_r1_2 = _mm_unpackhi_epi64(resq_r0_1, resq_r4_1); //b0 b1 b2 b3 b4 b5 b6 b7
787    resq_r2_2 = _mm_unpacklo_epi64(resq_r1_1, resq_r5_1); //c0 c1 c2 c3 c4 c5 c6 c7
788    resq_r3_2 = _mm_unpackhi_epi64(resq_r1_1, resq_r5_1); //d0 d1 d2 d3 d4 d5 d6 d7
789    resq_r4_2 = _mm_unpacklo_epi64(resq_r2_1, resq_r6_1); //e0 e1 e2 e3 e4 e5 e6 e7
790    resq_r5_2 = _mm_unpackhi_epi64(resq_r2_1, resq_r6_1); //f0 f1 f2 f3 f4 f5 f6 f7
791    resq_r6_2 = _mm_unpacklo_epi64(resq_r3_1, resq_r7_1); //g0 g1 g2 g3 g4 g5 g6 g7
792    resq_r7_2 = _mm_unpackhi_epi64(resq_r3_1, resq_r7_1); //h0 h1 h2 h3 h4 h5 h6 h7
793
794    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r1_2);
795    resq_r1_1 = _mm_unpacklo_epi16(resq_r1_2, sign_reg); //a1 b1 c1 d1 -- 32 bit
796    resq_r1_2 = _mm_unpackhi_epi16(resq_r1_2, sign_reg); //e1 f1 g1 h1 -- 32 bit
797    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r3_2);
798    resq_r3_1 = _mm_unpacklo_epi16(resq_r3_2, sign_reg); //a3 b3 c3 d3 -- 32 bit
799    resq_r3_2 = _mm_unpackhi_epi16(resq_r3_2, sign_reg); //e3 f3 g3 h3 -- 32 bit
800    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r5_2);
801    resq_r5_1 = _mm_unpacklo_epi16(resq_r5_2, sign_reg); //a5 b5 c5 d5 -- 32 bit
802    resq_r5_2 = _mm_unpackhi_epi16(resq_r5_2, sign_reg); //e5 f5 g5 h5 -- 32 bit
803    sign_reg = _mm_cmpgt_epi16(zero_8x16b, resq_r7_2);
804    resq_r7_1 = _mm_unpacklo_epi16(resq_r7_2, sign_reg); //a7 b7 c7 d7 -- 32 bit
805    resq_r7_2 = _mm_unpackhi_epi16(resq_r7_2, sign_reg); //e7 f7 g7 h7 -- 32 bit
806
807    zero_8x16b = _mm_setzero_si128(); // all bits reset to zero
808    //Load pred buffer row 0
809    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[0])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
810    pred_r0_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
811    //Load pred buffer row 1
812    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
813    pred_r1_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
814    //Load pred buffer row 2
815    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[2 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
816    pred_r2_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
817    //Load pred buffer row 3
818    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[3 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
819    pred_r3_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
820    //Load pred buffer row 4
821    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[4 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
822    pred_r4_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
823    //Load pred buffer row 5
824    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[5 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bit
825    pred_r5_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
826    //Load pred buffer row 6
827    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[6 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
828    pred_r6_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
829    //Load pred buffer row 7
830    predload_r = _mm_loadl_epi64((__m128i *) (&pu1_pred[7 * pred_strd])); //p0 p1 p2 p3 p4 p5 p6 p7 0 0 0 0 0 0 0 0 -- all 8 bits
831    pred_r7_1 = _mm_unpacklo_epi8(predload_r, zero_8x16b); //p0 p1 p2 p3 p4 p5 p6 p7 -- all 16 bits
832
833    /*--------------------------------------------------------------------*/
834    /* IDCT [ Vertical transformation] and Xij = (xij + 32)>>6            */
835    /*                                                                    */
836    /* Add the prediction and store it back to reconstructed frame buffer */
837    /* [Prediction buffer itself in this case]                            */
838    /*--------------------------------------------------------------------*/
839
840    /* y0j = w0j + w4j                                                     */
841    temp1 = _mm_add_epi16(resq_r0_2, resq_r4_2);
842    /* y2j = w0j - w4j                                                      */
843    temp3 = _mm_sub_epi16(resq_r0_2, resq_r4_2);
844    /* y1j = -w3j + w5j - w7j - (w7j >> 1)                                   */
845    temp2 = _mm_sub_epi32(resq_r5_1, resq_r3_1); //-w3+w5
846    temp10 = _mm_sub_epi32(resq_r5_2, resq_r3_2);
847    temp4 = _mm_sub_epi32(temp2, resq_r7_1); //-w3+w5-w7
848    temp12 = _mm_sub_epi32(temp10, resq_r7_2);
849    temp5 = _mm_srai_epi32(resq_r7_1, 1); //w7>>1
850    temp13 = _mm_srai_epi32(resq_r7_2, 1);
851    temp2 = _mm_sub_epi32(temp4, temp5); //-w3+w5-w7 -(w7>>1)
852    temp10 = _mm_sub_epi32(temp12, temp13);
853    temp2 = _mm_packs_epi32(temp2, temp10);
854    /* y3j = w1j + w7j - w3j - (w3j >> 1)                                    */
855    temp4 = _mm_add_epi32(resq_r1_1, resq_r7_1); //w1+w7
856    temp12 = _mm_add_epi32(resq_r1_2, resq_r7_2);
857    temp4 = _mm_sub_epi32(temp4, resq_r3_1); //w1+w7-w3
858    temp12 = _mm_sub_epi32(temp12, resq_r3_2);
859    temp5 = _mm_srai_epi32(resq_r3_1, 1); //w3>>1
860    temp13 = _mm_srai_epi32(resq_r3_2, 1);
861    temp4 = _mm_sub_epi32(temp4, temp5); //w1+w7-w3-(w3>>1)
862    temp12 = _mm_sub_epi32(temp12, temp13);
863    temp4 = _mm_packs_epi32(temp4, temp12);
864    /* y4j = (w2j >> 1) - w6j                                              */
865    temp5 = _mm_srai_epi16(resq_r2_2, 1); //w2>>1
866    temp5 = _mm_sub_epi16(temp5, resq_r6_2); //(w2>>1)-w6
867    /* y5j = -w1j + w7j + w5j + (w5j >> 1)                                   */
868    temp6 = _mm_sub_epi32(resq_r7_1, resq_r1_1); //w7-w1
869    temp14 = _mm_sub_epi32(resq_r7_2, resq_r1_2);
870    temp6 = _mm_add_epi32(temp6, resq_r5_1); //w7-w1+w5
871    temp14 = _mm_add_epi32(temp14, resq_r5_2);
872    temp7 = _mm_srai_epi32(resq_r5_1, 1); //w5>>1
873    temp15 = _mm_srai_epi32(resq_r5_2, 1);
874    temp6 = _mm_add_epi32(temp6, temp7); //w7-w1_w5+(w5>>1)
875    temp14 = _mm_add_epi32(temp14, temp15);
876    temp6 = _mm_packs_epi32(temp6, temp14);
877    /* y6j = w2j + (w6j >> 1)                                              */
878    temp7 = _mm_srai_epi16(resq_r6_2, 1); //w6>>1
879    temp7 = _mm_add_epi16(temp7, resq_r2_2); //(w6>>1)+w2
880    /* y7j = w3j + w5j + w1j + (w1j >> 1)                                    */
881    temp8 = _mm_add_epi32(resq_r3_1, resq_r5_1); //w3+w5
882    temp16 = _mm_add_epi32(resq_r3_2, resq_r5_2);
883    temp8 = _mm_add_epi32(temp8, resq_r1_1); //w3+w5+w1
884    temp16 = _mm_add_epi32(temp16, resq_r1_2);
885    temp17 = _mm_srai_epi32(resq_r1_1, 1); //w1>>1
886    temp18 = _mm_srai_epi32(resq_r1_2, 1);
887    temp8 = _mm_add_epi32(temp8, temp17); //w3+w5+w1+(w1>>1)
888    temp16 = _mm_add_epi32(temp16, temp18);
889    temp8 = _mm_packs_epi32(temp8, temp16);
890    /*------------------------------------------------------------------*/
891    /*------------------------------------------------------------------*/
892    /* z0j = y0j + y6j                                                        */
893    resq_r0_1 = _mm_add_epi16(temp1, temp7);
894    /* z1j = y1j + (y7j >> 2)                                                */
895    resq_r1_1 = _mm_srai_epi16(temp8, 2);
896    resq_r1_1 = _mm_add_epi16(resq_r1_1, temp2);
897    /* z2j = y2j + y4j                                                        */
898    resq_r2_1 = _mm_add_epi16(temp3, temp5);
899    /* z3j = y3j + (y5j >> 2)                                                */
900    resq_r3_1 = _mm_srai_epi16(temp6, 2);
901    resq_r3_1 = _mm_add_epi16(resq_r3_1, temp4);
902    /* z4j = y2j - y4j                                                        */
903    resq_r4_1 = _mm_sub_epi16(temp3, temp5);
904    /* z5j = (y3j >> 2) - y5j                                                 */
905    resq_r5_1 = _mm_srai_epi16(temp4, 2);
906    resq_r5_1 = _mm_sub_epi16(resq_r5_1, temp6);
907    /* z6j = y0j - y6j                                                     */
908    resq_r6_1 = _mm_sub_epi16(temp1, temp7);
909    /* z7j = y7j - (y1j >> 2)                                                 */
910    resq_r7_1 = _mm_srai_epi16(temp2, 2);
911    resq_r7_1 = _mm_sub_epi16(temp8, resq_r7_1);
912    /*------------------------------------------------------------------*/
913
914    /*------------------------------------------------------------------*/
915    /* x0j = z0j + z7j                                                        */
916    temp1 = _mm_add_epi16(resq_r0_1, resq_r7_1);
917    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp1);
918    temp10 = _mm_unpacklo_epi16(temp1, sign_reg);
919    temp11 = _mm_unpackhi_epi16(temp1, sign_reg);
920    temp10 = _mm_add_epi32(temp10, value_32);
921    temp11 = _mm_add_epi32(temp11, value_32);
922    temp10 = _mm_srai_epi32(temp10, 6);
923    temp11 = _mm_srai_epi32(temp11, 6);
924    temp10 = _mm_packs_epi32(temp10, temp11);
925    temp1 = _mm_add_epi16(temp10, pred_r0_1);
926    /* x1j = z2j + z5j                                                        */
927    temp2 = _mm_add_epi16(resq_r2_1, resq_r5_1);
928    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp2);
929    temp10 = _mm_unpacklo_epi16(temp2, sign_reg);
930    temp11 = _mm_unpackhi_epi16(temp2, sign_reg);
931    temp10 = _mm_add_epi32(temp10, value_32);
932    temp11 = _mm_add_epi32(temp11, value_32);
933    temp10 = _mm_srai_epi32(temp10, 6);
934    temp11 = _mm_srai_epi32(temp11, 6);
935    temp10 = _mm_packs_epi32(temp10, temp11);
936    temp2 = _mm_add_epi16(temp10, pred_r1_1);
937    /* x2j = z4j + z3j                                                        */
938    temp3 = _mm_add_epi16(resq_r4_1, resq_r3_1);
939    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp3);
940    temp10 = _mm_unpacklo_epi16(temp3, sign_reg);
941    temp11 = _mm_unpackhi_epi16(temp3, sign_reg);
942    temp10 = _mm_add_epi32(temp10, value_32);
943    temp11 = _mm_add_epi32(temp11, value_32);
944    temp10 = _mm_srai_epi32(temp10, 6);
945    temp11 = _mm_srai_epi32(temp11, 6);
946    temp10 = _mm_packs_epi32(temp10, temp11);
947    temp3 = _mm_add_epi16(temp10, pred_r2_1);
948    /* x3j = z6j + z1j                                                        */
949    temp4 = _mm_add_epi16(resq_r6_1, resq_r1_1);
950    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp4);
951    temp10 = _mm_unpacklo_epi16(temp4, sign_reg);
952    temp11 = _mm_unpackhi_epi16(temp4, sign_reg);
953    temp10 = _mm_add_epi32(temp10, value_32);
954    temp11 = _mm_add_epi32(temp11, value_32);
955    temp10 = _mm_srai_epi32(temp10, 6);
956    temp11 = _mm_srai_epi32(temp11, 6);
957    temp10 = _mm_packs_epi32(temp10, temp11);
958    temp4 = _mm_add_epi16(temp10, pred_r3_1);
959    /* x4j = z6j - z1j                                                        */
960    temp5 = _mm_sub_epi16(resq_r6_1, resq_r1_1);
961    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp5);
962    temp10 = _mm_unpacklo_epi16(temp5, sign_reg);
963    temp11 = _mm_unpackhi_epi16(temp5, sign_reg);
964    temp10 = _mm_add_epi32(temp10, value_32);
965    temp11 = _mm_add_epi32(temp11, value_32);
966    temp10 = _mm_srai_epi32(temp10, 6);
967    temp11 = _mm_srai_epi32(temp11, 6);
968    temp10 = _mm_packs_epi32(temp10, temp11);
969    temp5 = _mm_add_epi16(temp10, pred_r4_1);
970    /* x5j = z4j - z3j                                                        */
971    temp6 = _mm_sub_epi16(resq_r4_1, resq_r3_1);
972    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp6);
973    temp10 = _mm_unpacklo_epi16(temp6, sign_reg);
974    temp11 = _mm_unpackhi_epi16(temp6, sign_reg);
975    temp10 = _mm_add_epi32(temp10, value_32);
976    temp11 = _mm_add_epi32(temp11, value_32);
977    temp10 = _mm_srai_epi32(temp10, 6);
978    temp11 = _mm_srai_epi32(temp11, 6);
979    temp10 = _mm_packs_epi32(temp10, temp11);
980    temp6 = _mm_add_epi16(temp10, pred_r5_1);
981    /* x6j = z2j - z5j                                                        */
982    temp7 = _mm_sub_epi16(resq_r2_1, resq_r5_1);
983    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp7);
984    temp10 = _mm_unpacklo_epi16(temp7, sign_reg);
985    temp11 = _mm_unpackhi_epi16(temp7, sign_reg);
986    temp10 = _mm_add_epi32(temp10, value_32);
987    temp11 = _mm_add_epi32(temp11, value_32);
988    temp10 = _mm_srai_epi32(temp10, 6);
989    temp11 = _mm_srai_epi32(temp11, 6);
990    temp10 = _mm_packs_epi32(temp10, temp11);
991    temp7 = _mm_add_epi16(temp10, pred_r6_1);
992    /* x7j = z0j - z7j                                                        */
993    temp8 = _mm_sub_epi16(resq_r0_1, resq_r7_1);
994    sign_reg = _mm_cmpgt_epi16(zero_8x16b, temp8);
995    temp10 = _mm_unpacklo_epi16(temp8, sign_reg);
996    temp11 = _mm_unpackhi_epi16(temp8, sign_reg);
997    temp10 = _mm_add_epi32(temp10, value_32);
998    temp11 = _mm_add_epi32(temp11, value_32);
999    temp10 = _mm_srai_epi32(temp10, 6);
1000    temp11 = _mm_srai_epi32(temp11, 6);
1001    temp10 = _mm_packs_epi32(temp10, temp11);
1002    temp8 = _mm_add_epi16(temp10, pred_r7_1);
1003    /*------------------------------------------------------------------*/
1004    //Clipping the results to 8 bits
1005    sign_reg = _mm_cmpgt_epi16(temp1, zero_8x16b); // sign check
1006    temp1 = _mm_and_si128(temp1, sign_reg);
1007    sign_reg = _mm_cmpgt_epi16(temp2, zero_8x16b); // sign check
1008    temp2 = _mm_and_si128(temp2, sign_reg);
1009    sign_reg = _mm_cmpgt_epi16(temp3, zero_8x16b); // sign check
1010    temp3 = _mm_and_si128(temp3, sign_reg);
1011    sign_reg = _mm_cmpgt_epi16(temp4, zero_8x16b); // sign check
1012    temp4 = _mm_and_si128(temp4, sign_reg);
1013    sign_reg = _mm_cmpgt_epi16(temp5, zero_8x16b); // sign check
1014    temp5 = _mm_and_si128(temp5, sign_reg);
1015    sign_reg = _mm_cmpgt_epi16(temp6, zero_8x16b); // sign check
1016    temp6 = _mm_and_si128(temp6, sign_reg);
1017    sign_reg = _mm_cmpgt_epi16(temp7, zero_8x16b); // sign check
1018    temp7 = _mm_and_si128(temp7, sign_reg);
1019    sign_reg = _mm_cmpgt_epi16(temp8, zero_8x16b); // sign check
1020    temp8 = _mm_and_si128(temp8, sign_reg);
1021
1022    resq_r0_2 = _mm_packus_epi16(temp1, zero_8x16b);
1023    resq_r1_2 = _mm_packus_epi16(temp2, zero_8x16b);
1024    resq_r2_2 = _mm_packus_epi16(temp3, zero_8x16b);
1025    resq_r3_2 = _mm_packus_epi16(temp4, zero_8x16b);
1026    resq_r4_2 = _mm_packus_epi16(temp5, zero_8x16b);
1027    resq_r5_2 = _mm_packus_epi16(temp6, zero_8x16b);
1028    resq_r6_2 = _mm_packus_epi16(temp7, zero_8x16b);
1029    resq_r7_2 = _mm_packus_epi16(temp8, zero_8x16b);
1030
1031    _mm_storel_epi64((__m128i *) (&pu1_out[0]), resq_r0_2);
1032    _mm_storel_epi64((__m128i *) (&pu1_out[out_strd]), resq_r1_2);
1033    _mm_storel_epi64((__m128i *) (&pu1_out[2 * out_strd]), resq_r2_2);
1034    _mm_storel_epi64((__m128i *) (&pu1_out[3 * out_strd]), resq_r3_2);
1035    _mm_storel_epi64((__m128i *) (&pu1_out[4 * out_strd]), resq_r4_2);
1036    _mm_storel_epi64((__m128i *) (&pu1_out[5 * out_strd]), resq_r5_2);
1037    _mm_storel_epi64((__m128i *) (&pu1_out[6 * out_strd]), resq_r6_2);
1038    _mm_storel_epi64((__m128i *) (&pu1_out[7 * out_strd]), resq_r7_2);
1039}
1040
1041