1/* 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. 3 * 4 * Usee of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <immintrin.h> // AVX2 12#include "vpx/vpx_integer.h" 13 14 15int64_t vp9_block_error_avx2(const int16_t *coeff, 16 const int16_t *dqcoeff, 17 intptr_t block_size, 18 int64_t *ssz) { 19 __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg; 20 __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi; 21 __m256i sse_reg_64hi, ssz_reg_64hi; 22 __m128i sse_reg128, ssz_reg128; 23 int64_t sse; 24 int i; 25 const __m256i zero_reg = _mm256_set1_epi16(0); 26 27 // init sse and ssz registerd to zero 28 sse_reg = _mm256_set1_epi16(0); 29 ssz_reg = _mm256_set1_epi16(0); 30 31 for (i = 0 ; i < block_size ; i+= 16) { 32 // load 32 bytes from coeff and dqcoeff 33 coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i)); 34 dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i)); 35 // dqcoeff - coeff 36 dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg); 37 // madd (dqcoeff - coeff) 38 dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg); 39 // madd coeff 40 coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg); 41 // expand each double word of madd (dqcoeff - coeff) to quad word 42 exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg); 43 exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg); 44 // expand each double word of madd (coeff) to quad word 45 exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg); 46 exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg); 47 // add each quad word of madd (dqcoeff - coeff) and madd (coeff) 48 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo); 49 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo); 50 sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi); 51 ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi); 52 } 53 // save the higher 64 bit of each 128 bit lane 54 sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); 55 ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); 56 // add the higher 64 bit to the low 64 bit 57 sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi); 58 ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi); 59 60 // add each 64 bit from each of the 128 bit lane of the 256 bit 61 sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg), 62 _mm256_extractf128_si256(sse_reg, 1)); 63 64 ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg), 65 _mm256_extractf128_si256(ssz_reg, 1)); 66 67 // store the results 68 _mm_storel_epi64((__m128i*)(&sse), sse_reg128); 69 70 _mm_storel_epi64((__m128i*)(ssz), ssz_reg128); 71 return sse; 72} 73