Searched refs:_mm256_add_epi32 (Results 1 - 10 of 10) sorted by relevance

/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/x86/
H A Dvp9_dct32x32_avx2.c355 const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
356 const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
357 const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
358 const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
359 const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
360 const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
361 const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
362 const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
363 const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
364 const __m256i s2_24_5 = _mm256_add_epi32(s2_24_
[all...]
H A Dvp9_sad4d_intrin_avx2.c45 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
46 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
47 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
48 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
73 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
127 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
128 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
129 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
130 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
131 sum_ref0 = _mm256_add_epi32(sum_ref
[all...]
H A Dvp9_variance_impl_intrin_avx2.c64 madd_ref_src = _mm256_add_epi32(madd_ref_src,
65 _mm256_add_epi32(madd_low, madd_high));
166 madd_ref_src = _mm256_add_epi32(madd_ref_src,
167 _mm256_add_epi32(madd_low, madd_high));
186 expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high);
192 expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high);
197 ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
203 madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd);
204 sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum);
H A Dvp9_subpel_variance_impl_intrin_avx2.c96 sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \
97 sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
105 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
106 sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \
111 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
112 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
116 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
/external/libvpx/libvpx/vp9/encoder/x86/
H A Dvp9_dct32x32_avx2.c355 const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING);
356 const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING);
357 const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING);
358 const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING);
359 const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING);
360 const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING);
361 const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING);
362 const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING);
363 const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING);
364 const __m256i s2_24_5 = _mm256_add_epi32(s2_24_
[all...]
H A Dvp9_sad4d_intrin_avx2.c45 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
46 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
47 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
48 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
73 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
127 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
128 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
129 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
130 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
131 sum_ref0 = _mm256_add_epi32(sum_ref
[all...]
H A Dvp9_variance_impl_intrin_avx2.c64 madd_ref_src = _mm256_add_epi32(madd_ref_src,
65 _mm256_add_epi32(madd_low, madd_high));
166 madd_ref_src = _mm256_add_epi32(madd_ref_src,
167 _mm256_add_epi32(madd_low, madd_high));
186 expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high);
192 expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high);
197 ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high);
203 madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd);
204 sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum);
H A Dvp9_subpel_variance_impl_intrin_avx2.c96 sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \
97 sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi);
105 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
106 sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \
111 sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \
112 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
116 sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \
/external/clang/test/CodeGen/
H A Davx2-builtins.c65 return _mm256_add_epi32(a, b);
/external/clang/lib/Headers/
H A Davx2intrin.h89 _mm256_add_epi32(__m256i __a, __m256i __b) function

Completed in 800 milliseconds