enc_sse2.c revision a2415724fb3466168b2af5b08bd94ba732c0e753
1// Copyright 2011 Google Inc. All Rights Reserved. 2// 3// This code is licensed under the same terms as WebM: 4// Software License Agreement: http://www.webmproject.org/license/software/ 5// Additional IP Rights Grant: http://www.webmproject.org/license/additional/ 6// ----------------------------------------------------------------------------- 7// 8// SSE2 version of speed-critical encoding functions. 9// 10// Author: Christian Duvivier (cduvivier@google.com) 11 12#include "./dsp.h" 13 14#if defined(WEBP_USE_SSE2) 15#include <stdlib.h> // for abs() 16#include <emmintrin.h> 17 18#include "../enc/vp8enci.h" 19 20#if defined(__cplusplus) || defined(c_plusplus) 21extern "C" { 22#endif 23 24//------------------------------------------------------------------------------ 25// Compute susceptibility based on DCT-coeff histograms: 26// the higher, the "easier" the macroblock is to compress. 27 28static void CollectHistogramSSE2(const uint8_t* ref, const uint8_t* pred, 29 int start_block, int end_block, 30 VP8Histogram* const histo) { 31 const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); 32 int j; 33 for (j = start_block; j < end_block; ++j) { 34 int16_t out[16]; 35 int k; 36 37 VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); 38 39 // Convert coefficients to bin (within out[]). 40 { 41 // Load. 42 const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); 43 const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); 44 // sign(out) = out >> 15 (0x0000 if positive, 0xffff if negative) 45 const __m128i sign0 = _mm_srai_epi16(out0, 15); 46 const __m128i sign1 = _mm_srai_epi16(out1, 15); 47 // abs(out) = (out ^ sign) - sign 48 const __m128i xor0 = _mm_xor_si128(out0, sign0); 49 const __m128i xor1 = _mm_xor_si128(out1, sign1); 50 const __m128i abs0 = _mm_sub_epi16(xor0, sign0); 51 const __m128i abs1 = _mm_sub_epi16(xor1, sign1); 52 // v = abs(out) >> 3 53 const __m128i v0 = _mm_srai_epi16(abs0, 3); 54 const __m128i v1 = _mm_srai_epi16(abs1, 3); 55 // bin = min(v, MAX_COEFF_THRESH) 56 const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); 57 const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); 58 // Store. 59 _mm_storeu_si128((__m128i*)&out[0], bin0); 60 _mm_storeu_si128((__m128i*)&out[8], bin1); 61 } 62 63 // Convert coefficients to bin. 64 for (k = 0; k < 16; ++k) { 65 histo->distribution[out[k]]++; 66 } 67 } 68} 69 70//------------------------------------------------------------------------------ 71// Transforms (Paragraph 14.4) 72 73// Does one or two inverse transforms. 74static void ITransformSSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst, 75 int do_two) { 76 // This implementation makes use of 16-bit fixed point versions of two 77 // multiply constants: 78 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 79 // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 80 // 81 // To be able to use signed 16-bit integers, we use the following trick to 82 // have constants within range: 83 // - Associated constants are obtained by subtracting the 16-bit fixed point 84 // version of one: 85 // k = K - (1 << 16) => K = k + (1 << 16) 86 // K1 = 85267 => k1 = 20091 87 // K2 = 35468 => k2 = -30068 88 // - The multiplication of a variable by a constant become the sum of the 89 // variable and the multiplication of that variable by the associated 90 // constant: 91 // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x 92 const __m128i k1 = _mm_set1_epi16(20091); 93 const __m128i k2 = _mm_set1_epi16(-30068); 94 __m128i T0, T1, T2, T3; 95 96 // Load and concatenate the transform coefficients (we'll do two inverse 97 // transforms in parallel). In the case of only one inverse transform, the 98 // second half of the vectors will just contain random value we'll never 99 // use nor store. 100 __m128i in0, in1, in2, in3; 101 { 102 in0 = _mm_loadl_epi64((__m128i*)&in[0]); 103 in1 = _mm_loadl_epi64((__m128i*)&in[4]); 104 in2 = _mm_loadl_epi64((__m128i*)&in[8]); 105 in3 = _mm_loadl_epi64((__m128i*)&in[12]); 106 // a00 a10 a20 a30 x x x x 107 // a01 a11 a21 a31 x x x x 108 // a02 a12 a22 a32 x x x x 109 // a03 a13 a23 a33 x x x x 110 if (do_two) { 111 const __m128i inB0 = _mm_loadl_epi64((__m128i*)&in[16]); 112 const __m128i inB1 = _mm_loadl_epi64((__m128i*)&in[20]); 113 const __m128i inB2 = _mm_loadl_epi64((__m128i*)&in[24]); 114 const __m128i inB3 = _mm_loadl_epi64((__m128i*)&in[28]); 115 in0 = _mm_unpacklo_epi64(in0, inB0); 116 in1 = _mm_unpacklo_epi64(in1, inB1); 117 in2 = _mm_unpacklo_epi64(in2, inB2); 118 in3 = _mm_unpacklo_epi64(in3, inB3); 119 // a00 a10 a20 a30 b00 b10 b20 b30 120 // a01 a11 a21 a31 b01 b11 b21 b31 121 // a02 a12 a22 a32 b02 b12 b22 b32 122 // a03 a13 a23 a33 b03 b13 b23 b33 123 } 124 } 125 126 // Vertical pass and subsequent transpose. 127 { 128 // First pass, c and d calculations are longer because of the "trick" 129 // multiplications. 130 const __m128i a = _mm_add_epi16(in0, in2); 131 const __m128i b = _mm_sub_epi16(in0, in2); 132 // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 133 const __m128i c1 = _mm_mulhi_epi16(in1, k2); 134 const __m128i c2 = _mm_mulhi_epi16(in3, k1); 135 const __m128i c3 = _mm_sub_epi16(in1, in3); 136 const __m128i c4 = _mm_sub_epi16(c1, c2); 137 const __m128i c = _mm_add_epi16(c3, c4); 138 // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 139 const __m128i d1 = _mm_mulhi_epi16(in1, k1); 140 const __m128i d2 = _mm_mulhi_epi16(in3, k2); 141 const __m128i d3 = _mm_add_epi16(in1, in3); 142 const __m128i d4 = _mm_add_epi16(d1, d2); 143 const __m128i d = _mm_add_epi16(d3, d4); 144 145 // Second pass. 146 const __m128i tmp0 = _mm_add_epi16(a, d); 147 const __m128i tmp1 = _mm_add_epi16(b, c); 148 const __m128i tmp2 = _mm_sub_epi16(b, c); 149 const __m128i tmp3 = _mm_sub_epi16(a, d); 150 151 // Transpose the two 4x4. 152 // a00 a01 a02 a03 b00 b01 b02 b03 153 // a10 a11 a12 a13 b10 b11 b12 b13 154 // a20 a21 a22 a23 b20 b21 b22 b23 155 // a30 a31 a32 a33 b30 b31 b32 b33 156 const __m128i transpose0_0 = _mm_unpacklo_epi16(tmp0, tmp1); 157 const __m128i transpose0_1 = _mm_unpacklo_epi16(tmp2, tmp3); 158 const __m128i transpose0_2 = _mm_unpackhi_epi16(tmp0, tmp1); 159 const __m128i transpose0_3 = _mm_unpackhi_epi16(tmp2, tmp3); 160 // a00 a10 a01 a11 a02 a12 a03 a13 161 // a20 a30 a21 a31 a22 a32 a23 a33 162 // b00 b10 b01 b11 b02 b12 b03 b13 163 // b20 b30 b21 b31 b22 b32 b23 b33 164 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 165 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 166 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 167 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 168 // a00 a10 a20 a30 a01 a11 a21 a31 169 // b00 b10 b20 b30 b01 b11 b21 b31 170 // a02 a12 a22 a32 a03 a13 a23 a33 171 // b02 b12 a22 b32 b03 b13 b23 b33 172 T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 173 T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 174 T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 175 T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 176 // a00 a10 a20 a30 b00 b10 b20 b30 177 // a01 a11 a21 a31 b01 b11 b21 b31 178 // a02 a12 a22 a32 b02 b12 b22 b32 179 // a03 a13 a23 a33 b03 b13 b23 b33 180 } 181 182 // Horizontal pass and subsequent transpose. 183 { 184 // First pass, c and d calculations are longer because of the "trick" 185 // multiplications. 186 const __m128i four = _mm_set1_epi16(4); 187 const __m128i dc = _mm_add_epi16(T0, four); 188 const __m128i a = _mm_add_epi16(dc, T2); 189 const __m128i b = _mm_sub_epi16(dc, T2); 190 // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 191 const __m128i c1 = _mm_mulhi_epi16(T1, k2); 192 const __m128i c2 = _mm_mulhi_epi16(T3, k1); 193 const __m128i c3 = _mm_sub_epi16(T1, T3); 194 const __m128i c4 = _mm_sub_epi16(c1, c2); 195 const __m128i c = _mm_add_epi16(c3, c4); 196 // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 197 const __m128i d1 = _mm_mulhi_epi16(T1, k1); 198 const __m128i d2 = _mm_mulhi_epi16(T3, k2); 199 const __m128i d3 = _mm_add_epi16(T1, T3); 200 const __m128i d4 = _mm_add_epi16(d1, d2); 201 const __m128i d = _mm_add_epi16(d3, d4); 202 203 // Second pass. 204 const __m128i tmp0 = _mm_add_epi16(a, d); 205 const __m128i tmp1 = _mm_add_epi16(b, c); 206 const __m128i tmp2 = _mm_sub_epi16(b, c); 207 const __m128i tmp3 = _mm_sub_epi16(a, d); 208 const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); 209 const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); 210 const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); 211 const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); 212 213 // Transpose the two 4x4. 214 // a00 a01 a02 a03 b00 b01 b02 b03 215 // a10 a11 a12 a13 b10 b11 b12 b13 216 // a20 a21 a22 a23 b20 b21 b22 b23 217 // a30 a31 a32 a33 b30 b31 b32 b33 218 const __m128i transpose0_0 = _mm_unpacklo_epi16(shifted0, shifted1); 219 const __m128i transpose0_1 = _mm_unpacklo_epi16(shifted2, shifted3); 220 const __m128i transpose0_2 = _mm_unpackhi_epi16(shifted0, shifted1); 221 const __m128i transpose0_3 = _mm_unpackhi_epi16(shifted2, shifted3); 222 // a00 a10 a01 a11 a02 a12 a03 a13 223 // a20 a30 a21 a31 a22 a32 a23 a33 224 // b00 b10 b01 b11 b02 b12 b03 b13 225 // b20 b30 b21 b31 b22 b32 b23 b33 226 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 227 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 228 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 229 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 230 // a00 a10 a20 a30 a01 a11 a21 a31 231 // b00 b10 b20 b30 b01 b11 b21 b31 232 // a02 a12 a22 a32 a03 a13 a23 a33 233 // b02 b12 a22 b32 b03 b13 b23 b33 234 T0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 235 T1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 236 T2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 237 T3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 238 // a00 a10 a20 a30 b00 b10 b20 b30 239 // a01 a11 a21 a31 b01 b11 b21 b31 240 // a02 a12 a22 a32 b02 b12 b22 b32 241 // a03 a13 a23 a33 b03 b13 b23 b33 242 } 243 244 // Add inverse transform to 'ref' and store. 245 { 246 const __m128i zero = _mm_set1_epi16(0); 247 // Load the reference(s). 248 __m128i ref0, ref1, ref2, ref3; 249 if (do_two) { 250 // Load eight bytes/pixels per line. 251 ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); 252 ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); 253 ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); 254 ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); 255 } else { 256 // Load four bytes/pixels per line. 257 ref0 = _mm_cvtsi32_si128(*(int*)&ref[0 * BPS]); 258 ref1 = _mm_cvtsi32_si128(*(int*)&ref[1 * BPS]); 259 ref2 = _mm_cvtsi32_si128(*(int*)&ref[2 * BPS]); 260 ref3 = _mm_cvtsi32_si128(*(int*)&ref[3 * BPS]); 261 } 262 // Convert to 16b. 263 ref0 = _mm_unpacklo_epi8(ref0, zero); 264 ref1 = _mm_unpacklo_epi8(ref1, zero); 265 ref2 = _mm_unpacklo_epi8(ref2, zero); 266 ref3 = _mm_unpacklo_epi8(ref3, zero); 267 // Add the inverse transform(s). 268 ref0 = _mm_add_epi16(ref0, T0); 269 ref1 = _mm_add_epi16(ref1, T1); 270 ref2 = _mm_add_epi16(ref2, T2); 271 ref3 = _mm_add_epi16(ref3, T3); 272 // Unsigned saturate to 8b. 273 ref0 = _mm_packus_epi16(ref0, ref0); 274 ref1 = _mm_packus_epi16(ref1, ref1); 275 ref2 = _mm_packus_epi16(ref2, ref2); 276 ref3 = _mm_packus_epi16(ref3, ref3); 277 // Store the results. 278 if (do_two) { 279 // Store eight bytes/pixels per line. 280 _mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0); 281 _mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1); 282 _mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2); 283 _mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3); 284 } else { 285 // Store four bytes/pixels per line. 286 *((int32_t *)&dst[0 * BPS]) = _mm_cvtsi128_si32(ref0); 287 *((int32_t *)&dst[1 * BPS]) = _mm_cvtsi128_si32(ref1); 288 *((int32_t *)&dst[2 * BPS]) = _mm_cvtsi128_si32(ref2); 289 *((int32_t *)&dst[3 * BPS]) = _mm_cvtsi128_si32(ref3); 290 } 291 } 292} 293 294static void FTransformSSE2(const uint8_t* src, const uint8_t* ref, 295 int16_t* out) { 296 const __m128i zero = _mm_setzero_si128(); 297 const __m128i seven = _mm_set1_epi16(7); 298 const __m128i k7500 = _mm_set1_epi32(7500); 299 const __m128i k14500 = _mm_set1_epi32(14500); 300 const __m128i k51000 = _mm_set1_epi32(51000); 301 const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16)); 302 const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217, 303 5352, 2217, 5352, 2217); 304 const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352, 305 2217, -5352, 2217, -5352); 306 307 __m128i v01, v32; 308 309 // Difference between src and ref and initial transpose. 310 { 311 // Load src and convert to 16b. 312 const __m128i src0 = _mm_loadl_epi64((__m128i*)&src[0 * BPS]); 313 const __m128i src1 = _mm_loadl_epi64((__m128i*)&src[1 * BPS]); 314 const __m128i src2 = _mm_loadl_epi64((__m128i*)&src[2 * BPS]); 315 const __m128i src3 = _mm_loadl_epi64((__m128i*)&src[3 * BPS]); 316 const __m128i src_0 = _mm_unpacklo_epi8(src0, zero); 317 const __m128i src_1 = _mm_unpacklo_epi8(src1, zero); 318 const __m128i src_2 = _mm_unpacklo_epi8(src2, zero); 319 const __m128i src_3 = _mm_unpacklo_epi8(src3, zero); 320 // Load ref and convert to 16b. 321 const __m128i ref0 = _mm_loadl_epi64((__m128i*)&ref[0 * BPS]); 322 const __m128i ref1 = _mm_loadl_epi64((__m128i*)&ref[1 * BPS]); 323 const __m128i ref2 = _mm_loadl_epi64((__m128i*)&ref[2 * BPS]); 324 const __m128i ref3 = _mm_loadl_epi64((__m128i*)&ref[3 * BPS]); 325 const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero); 326 const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero); 327 const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero); 328 const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero); 329 // Compute difference. 330 const __m128i diff0 = _mm_sub_epi16(src_0, ref_0); 331 const __m128i diff1 = _mm_sub_epi16(src_1, ref_1); 332 const __m128i diff2 = _mm_sub_epi16(src_2, ref_2); 333 const __m128i diff3 = _mm_sub_epi16(src_3, ref_3); 334 335 // Transpose. 336 // 00 01 02 03 0 0 0 0 337 // 10 11 12 13 0 0 0 0 338 // 20 21 22 23 0 0 0 0 339 // 30 31 32 33 0 0 0 0 340 const __m128i transpose0_0 = _mm_unpacklo_epi16(diff0, diff1); 341 const __m128i transpose0_1 = _mm_unpacklo_epi16(diff2, diff3); 342 // 00 10 01 11 02 12 03 13 343 // 20 30 21 31 22 32 23 33 344 const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 345 v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 346 v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); 347 // a02 a12 a22 a32 a03 a13 a23 a33 348 // a00 a10 a20 a30 a01 a11 a21 a31 349 // a03 a13 a23 a33 a02 a12 a22 a32 350 } 351 352 // First pass and subsequent transpose. 353 { 354 // Same operations are done on the (0,3) and (1,2) pairs. 355 // b0 = (a0 + a3) << 3 356 // b1 = (a1 + a2) << 3 357 // b3 = (a0 - a3) << 3 358 // b2 = (a1 - a2) << 3 359 const __m128i a01 = _mm_add_epi16(v01, v32); 360 const __m128i a32 = _mm_sub_epi16(v01, v32); 361 const __m128i b01 = _mm_slli_epi16(a01, 3); 362 const __m128i b32 = _mm_slli_epi16(a32, 3); 363 const __m128i b11 = _mm_unpackhi_epi64(b01, b01); 364 const __m128i b22 = _mm_unpackhi_epi64(b32, b32); 365 366 // e0 = b0 + b1 367 // e2 = b0 - b1 368 const __m128i e0 = _mm_add_epi16(b01, b11); 369 const __m128i e2 = _mm_sub_epi16(b01, b11); 370 const __m128i e02 = _mm_unpacklo_epi64(e0, e2); 371 372 // e1 = (b3 * 5352 + b2 * 2217 + 14500) >> 12 373 // e3 = (b3 * 2217 - b2 * 5352 + 7500) >> 12 374 const __m128i b23 = _mm_unpacklo_epi16(b22, b32); 375 const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); 376 const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); 377 const __m128i d1 = _mm_add_epi32(c1, k14500); 378 const __m128i d3 = _mm_add_epi32(c3, k7500); 379 const __m128i e1 = _mm_srai_epi32(d1, 12); 380 const __m128i e3 = _mm_srai_epi32(d3, 12); 381 const __m128i e13 = _mm_packs_epi32(e1, e3); 382 383 // Transpose. 384 // 00 01 02 03 20 21 22 23 385 // 10 11 12 13 30 31 32 33 386 const __m128i transpose0_0 = _mm_unpacklo_epi16(e02, e13); 387 const __m128i transpose0_1 = _mm_unpackhi_epi16(e02, e13); 388 // 00 10 01 11 02 12 03 13 389 // 20 30 21 31 22 32 23 33 390 const __m128i v23 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 391 v01 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 392 v32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); 393 // 02 12 22 32 03 13 23 33 394 // 00 10 20 30 01 11 21 31 395 // 03 13 23 33 02 12 22 32 396 } 397 398 // Second pass 399 { 400 // Same operations are done on the (0,3) and (1,2) pairs. 401 // a0 = v0 + v3 402 // a1 = v1 + v2 403 // a3 = v0 - v3 404 // a2 = v1 - v2 405 const __m128i a01 = _mm_add_epi16(v01, v32); 406 const __m128i a32 = _mm_sub_epi16(v01, v32); 407 const __m128i a11 = _mm_unpackhi_epi64(a01, a01); 408 const __m128i a22 = _mm_unpackhi_epi64(a32, a32); 409 410 // d0 = (a0 + a1 + 7) >> 4; 411 // d2 = (a0 - a1 + 7) >> 4; 412 const __m128i b0 = _mm_add_epi16(a01, a11); 413 const __m128i b2 = _mm_sub_epi16(a01, a11); 414 const __m128i c0 = _mm_add_epi16(b0, seven); 415 const __m128i c2 = _mm_add_epi16(b2, seven); 416 const __m128i d0 = _mm_srai_epi16(c0, 4); 417 const __m128i d2 = _mm_srai_epi16(c2, 4); 418 419 // f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16) 420 // f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16) 421 const __m128i b23 = _mm_unpacklo_epi16(a22, a32); 422 const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); 423 const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); 424 const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one); 425 const __m128i d3 = _mm_add_epi32(c3, k51000); 426 const __m128i e1 = _mm_srai_epi32(d1, 16); 427 const __m128i e3 = _mm_srai_epi32(d3, 16); 428 const __m128i f1 = _mm_packs_epi32(e1, e1); 429 const __m128i f3 = _mm_packs_epi32(e3, e3); 430 // f1 = f1 + (a3 != 0); 431 // The compare will return (0xffff, 0) for (==0, !=0). To turn that into the 432 // desired (0, 1), we add one earlier through k12000_plus_one. 433 const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero)); 434 435 _mm_storel_epi64((__m128i*)&out[ 0], d0); 436 _mm_storel_epi64((__m128i*)&out[ 4], g1); 437 _mm_storel_epi64((__m128i*)&out[ 8], d2); 438 _mm_storel_epi64((__m128i*)&out[12], f3); 439 } 440} 441 442//------------------------------------------------------------------------------ 443// Metric 444 445static int SSE4x4SSE2(const uint8_t* a, const uint8_t* b) { 446 const __m128i zero = _mm_set1_epi16(0); 447 448 // Load values. 449 const __m128i a0 = _mm_loadl_epi64((__m128i*)&a[BPS * 0]); 450 const __m128i a1 = _mm_loadl_epi64((__m128i*)&a[BPS * 1]); 451 const __m128i a2 = _mm_loadl_epi64((__m128i*)&a[BPS * 2]); 452 const __m128i a3 = _mm_loadl_epi64((__m128i*)&a[BPS * 3]); 453 const __m128i b0 = _mm_loadl_epi64((__m128i*)&b[BPS * 0]); 454 const __m128i b1 = _mm_loadl_epi64((__m128i*)&b[BPS * 1]); 455 const __m128i b2 = _mm_loadl_epi64((__m128i*)&b[BPS * 2]); 456 const __m128i b3 = _mm_loadl_epi64((__m128i*)&b[BPS * 3]); 457 458 // Combine pair of lines and convert to 16b. 459 const __m128i a01 = _mm_unpacklo_epi32(a0, a1); 460 const __m128i a23 = _mm_unpacklo_epi32(a2, a3); 461 const __m128i b01 = _mm_unpacklo_epi32(b0, b1); 462 const __m128i b23 = _mm_unpacklo_epi32(b2, b3); 463 const __m128i a01s = _mm_unpacklo_epi8(a01, zero); 464 const __m128i a23s = _mm_unpacklo_epi8(a23, zero); 465 const __m128i b01s = _mm_unpacklo_epi8(b01, zero); 466 const __m128i b23s = _mm_unpacklo_epi8(b23, zero); 467 468 // Compute differences; (a-b)^2 = (abs(a-b))^2 = (sat8(a-b) + sat8(b-a))^2 469 // TODO(cduvivier): Dissassemble and figure out why this is fastest. We don't 470 // need absolute values, there is no need to do calculation 471 // in 8bit as we are already in 16bit, ... Yet this is what 472 // benchmarks the fastest! 473 const __m128i d0 = _mm_subs_epu8(a01s, b01s); 474 const __m128i d1 = _mm_subs_epu8(b01s, a01s); 475 const __m128i d2 = _mm_subs_epu8(a23s, b23s); 476 const __m128i d3 = _mm_subs_epu8(b23s, a23s); 477 478 // Square and add them all together. 479 const __m128i madd0 = _mm_madd_epi16(d0, d0); 480 const __m128i madd1 = _mm_madd_epi16(d1, d1); 481 const __m128i madd2 = _mm_madd_epi16(d2, d2); 482 const __m128i madd3 = _mm_madd_epi16(d3, d3); 483 const __m128i sum0 = _mm_add_epi32(madd0, madd1); 484 const __m128i sum1 = _mm_add_epi32(madd2, madd3); 485 const __m128i sum2 = _mm_add_epi32(sum0, sum1); 486 int32_t tmp[4]; 487 _mm_storeu_si128((__m128i*)tmp, sum2); 488 return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); 489} 490 491//------------------------------------------------------------------------------ 492// Texture distortion 493// 494// We try to match the spectral content (weighted) between source and 495// reconstructed samples. 496 497// Hadamard transform 498// Returns the difference between the weighted sum of the absolute value of 499// transformed coefficients. 500static int TTransformSSE2(const uint8_t* inA, const uint8_t* inB, 501 const uint16_t* const w) { 502 int32_t sum[4]; 503 __m128i tmp_0, tmp_1, tmp_2, tmp_3; 504 const __m128i zero = _mm_setzero_si128(); 505 const __m128i one = _mm_set1_epi16(1); 506 const __m128i three = _mm_set1_epi16(3); 507 508 // Load, combine and tranpose inputs. 509 { 510 const __m128i inA_0 = _mm_loadl_epi64((__m128i*)&inA[BPS * 0]); 511 const __m128i inA_1 = _mm_loadl_epi64((__m128i*)&inA[BPS * 1]); 512 const __m128i inA_2 = _mm_loadl_epi64((__m128i*)&inA[BPS * 2]); 513 const __m128i inA_3 = _mm_loadl_epi64((__m128i*)&inA[BPS * 3]); 514 const __m128i inB_0 = _mm_loadl_epi64((__m128i*)&inB[BPS * 0]); 515 const __m128i inB_1 = _mm_loadl_epi64((__m128i*)&inB[BPS * 1]); 516 const __m128i inB_2 = _mm_loadl_epi64((__m128i*)&inB[BPS * 2]); 517 const __m128i inB_3 = _mm_loadl_epi64((__m128i*)&inB[BPS * 3]); 518 519 // Combine inA and inB (we'll do two transforms in parallel). 520 const __m128i inAB_0 = _mm_unpacklo_epi8(inA_0, inB_0); 521 const __m128i inAB_1 = _mm_unpacklo_epi8(inA_1, inB_1); 522 const __m128i inAB_2 = _mm_unpacklo_epi8(inA_2, inB_2); 523 const __m128i inAB_3 = _mm_unpacklo_epi8(inA_3, inB_3); 524 // a00 b00 a01 b01 a02 b03 a03 b03 0 0 0 0 0 0 0 0 525 // a10 b10 a11 b11 a12 b12 a13 b13 0 0 0 0 0 0 0 0 526 // a20 b20 a21 b21 a22 b22 a23 b23 0 0 0 0 0 0 0 0 527 // a30 b30 a31 b31 a32 b32 a33 b33 0 0 0 0 0 0 0 0 528 529 // Transpose the two 4x4, discarding the filling zeroes. 530 const __m128i transpose0_0 = _mm_unpacklo_epi8(inAB_0, inAB_2); 531 const __m128i transpose0_1 = _mm_unpacklo_epi8(inAB_1, inAB_3); 532 // a00 a20 b00 b20 a01 a21 b01 b21 a02 a22 b02 b22 a03 a23 b03 b23 533 // a10 a30 b10 b30 a11 a31 b11 b31 a12 a32 b12 b32 a13 a33 b13 b33 534 const __m128i transpose1_0 = _mm_unpacklo_epi8(transpose0_0, transpose0_1); 535 const __m128i transpose1_1 = _mm_unpackhi_epi8(transpose0_0, transpose0_1); 536 // a00 a10 a20 a30 b00 b10 b20 b30 a01 a11 a21 a31 b01 b11 b21 b31 537 // a02 a12 a22 a32 b02 b12 b22 b32 a03 a13 a23 a33 b03 b13 b23 b33 538 539 // Convert to 16b. 540 tmp_0 = _mm_unpacklo_epi8(transpose1_0, zero); 541 tmp_1 = _mm_unpackhi_epi8(transpose1_0, zero); 542 tmp_2 = _mm_unpacklo_epi8(transpose1_1, zero); 543 tmp_3 = _mm_unpackhi_epi8(transpose1_1, zero); 544 // a00 a10 a20 a30 b00 b10 b20 b30 545 // a01 a11 a21 a31 b01 b11 b21 b31 546 // a02 a12 a22 a32 b02 b12 b22 b32 547 // a03 a13 a23 a33 b03 b13 b23 b33 548 } 549 550 // Horizontal pass and subsequent transpose. 551 { 552 // Calculate a and b (two 4x4 at once). 553 const __m128i a0 = _mm_slli_epi16(_mm_add_epi16(tmp_0, tmp_2), 2); 554 const __m128i a1 = _mm_slli_epi16(_mm_add_epi16(tmp_1, tmp_3), 2); 555 const __m128i a2 = _mm_slli_epi16(_mm_sub_epi16(tmp_1, tmp_3), 2); 556 const __m128i a3 = _mm_slli_epi16(_mm_sub_epi16(tmp_0, tmp_2), 2); 557 // b0_extra = (a0 != 0); 558 const __m128i b0_extra = _mm_andnot_si128(_mm_cmpeq_epi16 (a0, zero), one); 559 const __m128i b0_base = _mm_add_epi16(a0, a1); 560 const __m128i b1 = _mm_add_epi16(a3, a2); 561 const __m128i b2 = _mm_sub_epi16(a3, a2); 562 const __m128i b3 = _mm_sub_epi16(a0, a1); 563 const __m128i b0 = _mm_add_epi16(b0_base, b0_extra); 564 // a00 a01 a02 a03 b00 b01 b02 b03 565 // a10 a11 a12 a13 b10 b11 b12 b13 566 // a20 a21 a22 a23 b20 b21 b22 b23 567 // a30 a31 a32 a33 b30 b31 b32 b33 568 569 // Transpose the two 4x4. 570 const __m128i transpose0_0 = _mm_unpacklo_epi16(b0, b1); 571 const __m128i transpose0_1 = _mm_unpacklo_epi16(b2, b3); 572 const __m128i transpose0_2 = _mm_unpackhi_epi16(b0, b1); 573 const __m128i transpose0_3 = _mm_unpackhi_epi16(b2, b3); 574 // a00 a10 a01 a11 a02 a12 a03 a13 575 // a20 a30 a21 a31 a22 a32 a23 a33 576 // b00 b10 b01 b11 b02 b12 b03 b13 577 // b20 b30 b21 b31 b22 b32 b23 b33 578 const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); 579 const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); 580 const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); 581 const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); 582 // a00 a10 a20 a30 a01 a11 a21 a31 583 // b00 b10 b20 b30 b01 b11 b21 b31 584 // a02 a12 a22 a32 a03 a13 a23 a33 585 // b02 b12 a22 b32 b03 b13 b23 b33 586 tmp_0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); 587 tmp_1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); 588 tmp_2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); 589 tmp_3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); 590 // a00 a10 a20 a30 b00 b10 b20 b30 591 // a01 a11 a21 a31 b01 b11 b21 b31 592 // a02 a12 a22 a32 b02 b12 b22 b32 593 // a03 a13 a23 a33 b03 b13 b23 b33 594 } 595 596 // Vertical pass and difference of weighted sums. 597 { 598 // Load all inputs. 599 // TODO(cduvivier): Make variable declarations and allocations aligned so 600 // we can use _mm_load_si128 instead of _mm_loadu_si128. 601 const __m128i w_0 = _mm_loadu_si128((__m128i*)&w[0]); 602 const __m128i w_8 = _mm_loadu_si128((__m128i*)&w[8]); 603 604 // Calculate a and b (two 4x4 at once). 605 const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); 606 const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); 607 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); 608 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); 609 const __m128i b0 = _mm_add_epi16(a0, a1); 610 const __m128i b1 = _mm_add_epi16(a3, a2); 611 const __m128i b2 = _mm_sub_epi16(a3, a2); 612 const __m128i b3 = _mm_sub_epi16(a0, a1); 613 614 // Separate the transforms of inA and inB. 615 __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); 616 __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); 617 __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); 618 __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); 619 620 { 621 // sign(b) = b >> 15 (0x0000 if positive, 0xffff if negative) 622 const __m128i sign_A_b0 = _mm_srai_epi16(A_b0, 15); 623 const __m128i sign_A_b2 = _mm_srai_epi16(A_b2, 15); 624 const __m128i sign_B_b0 = _mm_srai_epi16(B_b0, 15); 625 const __m128i sign_B_b2 = _mm_srai_epi16(B_b2, 15); 626 627 // b = abs(b) = (b ^ sign) - sign 628 A_b0 = _mm_xor_si128(A_b0, sign_A_b0); 629 A_b2 = _mm_xor_si128(A_b2, sign_A_b2); 630 B_b0 = _mm_xor_si128(B_b0, sign_B_b0); 631 B_b2 = _mm_xor_si128(B_b2, sign_B_b2); 632 A_b0 = _mm_sub_epi16(A_b0, sign_A_b0); 633 A_b2 = _mm_sub_epi16(A_b2, sign_A_b2); 634 B_b0 = _mm_sub_epi16(B_b0, sign_B_b0); 635 B_b2 = _mm_sub_epi16(B_b2, sign_B_b2); 636 } 637 638 // b = abs(b) + 3 639 A_b0 = _mm_add_epi16(A_b0, three); 640 A_b2 = _mm_add_epi16(A_b2, three); 641 B_b0 = _mm_add_epi16(B_b0, three); 642 B_b2 = _mm_add_epi16(B_b2, three); 643 644 // abs((b + (b<0) + 3) >> 3) = (abs(b) + 3) >> 3 645 // b = (abs(b) + 3) >> 3 646 A_b0 = _mm_srai_epi16(A_b0, 3); 647 A_b2 = _mm_srai_epi16(A_b2, 3); 648 B_b0 = _mm_srai_epi16(B_b0, 3); 649 B_b2 = _mm_srai_epi16(B_b2, 3); 650 651 // weighted sums 652 A_b0 = _mm_madd_epi16(A_b0, w_0); 653 A_b2 = _mm_madd_epi16(A_b2, w_8); 654 B_b0 = _mm_madd_epi16(B_b0, w_0); 655 B_b2 = _mm_madd_epi16(B_b2, w_8); 656 A_b0 = _mm_add_epi32(A_b0, A_b2); 657 B_b0 = _mm_add_epi32(B_b0, B_b2); 658 659 // difference of weighted sums 660 A_b0 = _mm_sub_epi32(A_b0, B_b0); 661 _mm_storeu_si128((__m128i*)&sum[0], A_b0); 662 } 663 return sum[0] + sum[1] + sum[2] + sum[3]; 664} 665 666static int Disto4x4SSE2(const uint8_t* const a, const uint8_t* const b, 667 const uint16_t* const w) { 668 const int diff_sum = TTransformSSE2(a, b, w); 669 return (abs(diff_sum) + 8) >> 4; 670} 671 672static int Disto16x16SSE2(const uint8_t* const a, const uint8_t* const b, 673 const uint16_t* const w) { 674 int D = 0; 675 int x, y; 676 for (y = 0; y < 16 * BPS; y += 4 * BPS) { 677 for (x = 0; x < 16; x += 4) { 678 D += Disto4x4SSE2(a + x + y, b + x + y, w); 679 } 680 } 681 return D; 682} 683 684 685//------------------------------------------------------------------------------ 686// Quantization 687// 688 689// Simple quantization 690static int QuantizeBlockSSE2(int16_t in[16], int16_t out[16], 691 int n, const VP8Matrix* const mtx) { 692 const __m128i max_coeff_2047 = _mm_set1_epi16(2047); 693 const __m128i zero = _mm_set1_epi16(0); 694 __m128i sign0, sign8; 695 __m128i coeff0, coeff8; 696 __m128i out0, out8; 697 __m128i packed_out; 698 699 // Load all inputs. 700 // TODO(cduvivier): Make variable declarations and allocations aligned so that 701 // we can use _mm_load_si128 instead of _mm_loadu_si128. 702 __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); 703 __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); 704 const __m128i sharpen0 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[0]); 705 const __m128i sharpen8 = _mm_loadu_si128((__m128i*)&mtx->sharpen_[8]); 706 const __m128i iq0 = _mm_loadu_si128((__m128i*)&mtx->iq_[0]); 707 const __m128i iq8 = _mm_loadu_si128((__m128i*)&mtx->iq_[8]); 708 const __m128i bias0 = _mm_loadu_si128((__m128i*)&mtx->bias_[0]); 709 const __m128i bias8 = _mm_loadu_si128((__m128i*)&mtx->bias_[8]); 710 const __m128i q0 = _mm_loadu_si128((__m128i*)&mtx->q_[0]); 711 const __m128i q8 = _mm_loadu_si128((__m128i*)&mtx->q_[8]); 712 const __m128i zthresh0 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[0]); 713 const __m128i zthresh8 = _mm_loadu_si128((__m128i*)&mtx->zthresh_[8]); 714 715 // sign(in) = in >> 15 (0x0000 if positive, 0xffff if negative) 716 sign0 = _mm_srai_epi16(in0, 15); 717 sign8 = _mm_srai_epi16(in8, 15); 718 719 // coeff = abs(in) = (in ^ sign) - sign 720 coeff0 = _mm_xor_si128(in0, sign0); 721 coeff8 = _mm_xor_si128(in8, sign8); 722 coeff0 = _mm_sub_epi16(coeff0, sign0); 723 coeff8 = _mm_sub_epi16(coeff8, sign8); 724 725 // coeff = abs(in) + sharpen 726 coeff0 = _mm_add_epi16(coeff0, sharpen0); 727 coeff8 = _mm_add_epi16(coeff8, sharpen8); 728 729 // if (coeff > 2047) coeff = 2047 730 coeff0 = _mm_min_epi16(coeff0, max_coeff_2047); 731 coeff8 = _mm_min_epi16(coeff8, max_coeff_2047); 732 733 // out = (coeff * iQ + B) >> QFIX; 734 { 735 // doing calculations with 32b precision (QFIX=17) 736 // out = (coeff * iQ) 737 __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); 738 __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); 739 __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); 740 __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); 741 __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); 742 __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); 743 __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); 744 __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); 745 // expand bias from 16b to 32b 746 __m128i bias_00 = _mm_unpacklo_epi16(bias0, zero); 747 __m128i bias_04 = _mm_unpackhi_epi16(bias0, zero); 748 __m128i bias_08 = _mm_unpacklo_epi16(bias8, zero); 749 __m128i bias_12 = _mm_unpackhi_epi16(bias8, zero); 750 // out = (coeff * iQ + B) 751 out_00 = _mm_add_epi32(out_00, bias_00); 752 out_04 = _mm_add_epi32(out_04, bias_04); 753 out_08 = _mm_add_epi32(out_08, bias_08); 754 out_12 = _mm_add_epi32(out_12, bias_12); 755 // out = (coeff * iQ + B) >> QFIX; 756 out_00 = _mm_srai_epi32(out_00, QFIX); 757 out_04 = _mm_srai_epi32(out_04, QFIX); 758 out_08 = _mm_srai_epi32(out_08, QFIX); 759 out_12 = _mm_srai_epi32(out_12, QFIX); 760 // pack result as 16b 761 out0 = _mm_packs_epi32(out_00, out_04); 762 out8 = _mm_packs_epi32(out_08, out_12); 763 } 764 765 // get sign back (if (sign[j]) out_n = -out_n) 766 out0 = _mm_xor_si128(out0, sign0); 767 out8 = _mm_xor_si128(out8, sign8); 768 out0 = _mm_sub_epi16(out0, sign0); 769 out8 = _mm_sub_epi16(out8, sign8); 770 771 // in = out * Q 772 in0 = _mm_mullo_epi16(out0, q0); 773 in8 = _mm_mullo_epi16(out8, q8); 774 775 // if (coeff <= mtx->zthresh_) {in=0; out=0;} 776 { 777 __m128i cmp0 = _mm_cmpgt_epi16(coeff0, zthresh0); 778 __m128i cmp8 = _mm_cmpgt_epi16(coeff8, zthresh8); 779 in0 = _mm_and_si128(in0, cmp0); 780 in8 = _mm_and_si128(in8, cmp8); 781 _mm_storeu_si128((__m128i*)&in[0], in0); 782 _mm_storeu_si128((__m128i*)&in[8], in8); 783 out0 = _mm_and_si128(out0, cmp0); 784 out8 = _mm_and_si128(out8, cmp8); 785 } 786 787 // zigzag the output before storing it. 788 // 789 // The zigzag pattern can almost be reproduced with a small sequence of 790 // shuffles. After it, we only need to swap the 7th (ending up in third 791 // position instead of twelfth) and 8th values. 792 { 793 __m128i outZ0, outZ8; 794 outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0)); 795 outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0)); 796 outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2)); 797 outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1)); 798 outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0)); 799 outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0)); 800 _mm_storeu_si128((__m128i*)&out[0], outZ0); 801 _mm_storeu_si128((__m128i*)&out[8], outZ8); 802 packed_out = _mm_packs_epi16(outZ0, outZ8); 803 } 804 { 805 const int16_t outZ_12 = out[12]; 806 const int16_t outZ_3 = out[3]; 807 out[3] = outZ_12; 808 out[12] = outZ_3; 809 } 810 811 // detect if all 'out' values are zeroes or not 812 { 813 int32_t tmp[4]; 814 _mm_storeu_si128((__m128i*)tmp, packed_out); 815 if (n) { 816 tmp[0] &= ~0xff; 817 } 818 return (tmp[3] || tmp[2] || tmp[1] || tmp[0]); 819 } 820} 821 822extern void VP8EncDspInitSSE2(void); 823void VP8EncDspInitSSE2(void) { 824 VP8CollectHistogram = CollectHistogramSSE2; 825 VP8EncQuantizeBlock = QuantizeBlockSSE2; 826 VP8ITransform = ITransformSSE2; 827 VP8FTransform = FTransformSSE2; 828 VP8SSE4x4 = SSE4x4SSE2; 829 VP8TDisto4x4 = Disto4x4SSE2; 830 VP8TDisto16x16 = Disto16x16SSE2; 831} 832 833#if defined(__cplusplus) || defined(c_plusplus) 834} // extern "C" 835#endif 836 837#endif // WEBP_USE_SSE2 838