1// Copyright 2011 Google Inc. All Rights Reserved. 2// 3// Use of this source code is governed by a BSD-style license 4// that can be found in the COPYING file in the root of the source 5// tree. An additional intellectual property rights grant can be found 6// in the file PATENTS. All contributing project authors may 7// be found in the AUTHORS file in the root of the source tree. 8// ----------------------------------------------------------------------------- 9// 10// SSE2 version of some decoding functions (idct, loop filtering). 11// 12// Author: somnath@google.com (Somnath Banerjee) 13// cduvivier@google.com (Christian Duvivier) 14 15#include "src/dsp/dsp.h" 16 17#if defined(WEBP_USE_SSE2) 18 19// The 3-coeff sparse transform in SSE2 is not really faster than the plain-C 20// one it seems => disable it by default. Uncomment the following to enable: 21#if !defined(USE_TRANSFORM_AC3) 22#define USE_TRANSFORM_AC3 0 // ALTERNATE_CODE 23#endif 24 25#include <emmintrin.h> 26#include "src/dsp/common_sse2.h" 27#include "src/dec/vp8i_dec.h" 28#include "src/utils/utils.h" 29 30//------------------------------------------------------------------------------ 31// Transforms (Paragraph 14.4) 32 33static void Transform_SSE2(const int16_t* in, uint8_t* dst, int do_two) { 34 // This implementation makes use of 16-bit fixed point versions of two 35 // multiply constants: 36 // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 37 // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 38 // 39 // To be able to use signed 16-bit integers, we use the following trick to 40 // have constants within range: 41 // - Associated constants are obtained by subtracting the 16-bit fixed point 42 // version of one: 43 // k = K - (1 << 16) => K = k + (1 << 16) 44 // K1 = 85267 => k1 = 20091 45 // K2 = 35468 => k2 = -30068 46 // - The multiplication of a variable by a constant become the sum of the 47 // variable and the multiplication of that variable by the associated 48 // constant: 49 // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x 50 const __m128i k1 = _mm_set1_epi16(20091); 51 const __m128i k2 = _mm_set1_epi16(-30068); 52 __m128i T0, T1, T2, T3; 53 54 // Load and concatenate the transform coefficients (we'll do two transforms 55 // in parallel). In the case of only one transform, the second half of the 56 // vectors will just contain random value we'll never use nor store. 57 __m128i in0, in1, in2, in3; 58 { 59 in0 = _mm_loadl_epi64((const __m128i*)&in[0]); 60 in1 = _mm_loadl_epi64((const __m128i*)&in[4]); 61 in2 = _mm_loadl_epi64((const __m128i*)&in[8]); 62 in3 = _mm_loadl_epi64((const __m128i*)&in[12]); 63 // a00 a10 a20 a30 x x x x 64 // a01 a11 a21 a31 x x x x 65 // a02 a12 a22 a32 x x x x 66 // a03 a13 a23 a33 x x x x 67 if (do_two) { 68 const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]); 69 const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]); 70 const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]); 71 const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]); 72 in0 = _mm_unpacklo_epi64(in0, inB0); 73 in1 = _mm_unpacklo_epi64(in1, inB1); 74 in2 = _mm_unpacklo_epi64(in2, inB2); 75 in3 = _mm_unpacklo_epi64(in3, inB3); 76 // a00 a10 a20 a30 b00 b10 b20 b30 77 // a01 a11 a21 a31 b01 b11 b21 b31 78 // a02 a12 a22 a32 b02 b12 b22 b32 79 // a03 a13 a23 a33 b03 b13 b23 b33 80 } 81 } 82 83 // Vertical pass and subsequent transpose. 84 { 85 // First pass, c and d calculations are longer because of the "trick" 86 // multiplications. 87 const __m128i a = _mm_add_epi16(in0, in2); 88 const __m128i b = _mm_sub_epi16(in0, in2); 89 // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 90 const __m128i c1 = _mm_mulhi_epi16(in1, k2); 91 const __m128i c2 = _mm_mulhi_epi16(in3, k1); 92 const __m128i c3 = _mm_sub_epi16(in1, in3); 93 const __m128i c4 = _mm_sub_epi16(c1, c2); 94 const __m128i c = _mm_add_epi16(c3, c4); 95 // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 96 const __m128i d1 = _mm_mulhi_epi16(in1, k1); 97 const __m128i d2 = _mm_mulhi_epi16(in3, k2); 98 const __m128i d3 = _mm_add_epi16(in1, in3); 99 const __m128i d4 = _mm_add_epi16(d1, d2); 100 const __m128i d = _mm_add_epi16(d3, d4); 101 102 // Second pass. 103 const __m128i tmp0 = _mm_add_epi16(a, d); 104 const __m128i tmp1 = _mm_add_epi16(b, c); 105 const __m128i tmp2 = _mm_sub_epi16(b, c); 106 const __m128i tmp3 = _mm_sub_epi16(a, d); 107 108 // Transpose the two 4x4. 109 VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3); 110 } 111 112 // Horizontal pass and subsequent transpose. 113 { 114 // First pass, c and d calculations are longer because of the "trick" 115 // multiplications. 116 const __m128i four = _mm_set1_epi16(4); 117 const __m128i dc = _mm_add_epi16(T0, four); 118 const __m128i a = _mm_add_epi16(dc, T2); 119 const __m128i b = _mm_sub_epi16(dc, T2); 120 // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 121 const __m128i c1 = _mm_mulhi_epi16(T1, k2); 122 const __m128i c2 = _mm_mulhi_epi16(T3, k1); 123 const __m128i c3 = _mm_sub_epi16(T1, T3); 124 const __m128i c4 = _mm_sub_epi16(c1, c2); 125 const __m128i c = _mm_add_epi16(c3, c4); 126 // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 127 const __m128i d1 = _mm_mulhi_epi16(T1, k1); 128 const __m128i d2 = _mm_mulhi_epi16(T3, k2); 129 const __m128i d3 = _mm_add_epi16(T1, T3); 130 const __m128i d4 = _mm_add_epi16(d1, d2); 131 const __m128i d = _mm_add_epi16(d3, d4); 132 133 // Second pass. 134 const __m128i tmp0 = _mm_add_epi16(a, d); 135 const __m128i tmp1 = _mm_add_epi16(b, c); 136 const __m128i tmp2 = _mm_sub_epi16(b, c); 137 const __m128i tmp3 = _mm_sub_epi16(a, d); 138 const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); 139 const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); 140 const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); 141 const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); 142 143 // Transpose the two 4x4. 144 VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1, 145 &T2, &T3); 146 } 147 148 // Add inverse transform to 'dst' and store. 149 { 150 const __m128i zero = _mm_setzero_si128(); 151 // Load the reference(s). 152 __m128i dst0, dst1, dst2, dst3; 153 if (do_two) { 154 // Load eight bytes/pixels per line. 155 dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS)); 156 dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS)); 157 dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS)); 158 dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS)); 159 } else { 160 // Load four bytes/pixels per line. 161 dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); 162 dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); 163 dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); 164 dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); 165 } 166 // Convert to 16b. 167 dst0 = _mm_unpacklo_epi8(dst0, zero); 168 dst1 = _mm_unpacklo_epi8(dst1, zero); 169 dst2 = _mm_unpacklo_epi8(dst2, zero); 170 dst3 = _mm_unpacklo_epi8(dst3, zero); 171 // Add the inverse transform(s). 172 dst0 = _mm_add_epi16(dst0, T0); 173 dst1 = _mm_add_epi16(dst1, T1); 174 dst2 = _mm_add_epi16(dst2, T2); 175 dst3 = _mm_add_epi16(dst3, T3); 176 // Unsigned saturate to 8b. 177 dst0 = _mm_packus_epi16(dst0, dst0); 178 dst1 = _mm_packus_epi16(dst1, dst1); 179 dst2 = _mm_packus_epi16(dst2, dst2); 180 dst3 = _mm_packus_epi16(dst3, dst3); 181 // Store the results. 182 if (do_two) { 183 // Store eight bytes/pixels per line. 184 _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0); 185 _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1); 186 _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2); 187 _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3); 188 } else { 189 // Store four bytes/pixels per line. 190 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); 191 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); 192 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); 193 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); 194 } 195 } 196} 197 198#if (USE_TRANSFORM_AC3 == 1) 199#define MUL(a, b) (((a) * (b)) >> 16) 200static void TransformAC3(const int16_t* in, uint8_t* dst) { 201 static const int kC1 = 20091 + (1 << 16); 202 static const int kC2 = 35468; 203 const __m128i A = _mm_set1_epi16(in[0] + 4); 204 const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2)); 205 const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1)); 206 const int c1 = MUL(in[1], kC2); 207 const int d1 = MUL(in[1], kC1); 208 const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1); 209 const __m128i B = _mm_adds_epi16(A, CD); 210 const __m128i m0 = _mm_adds_epi16(B, d4); 211 const __m128i m1 = _mm_adds_epi16(B, c4); 212 const __m128i m2 = _mm_subs_epi16(B, c4); 213 const __m128i m3 = _mm_subs_epi16(B, d4); 214 const __m128i zero = _mm_setzero_si128(); 215 // Load the source pixels. 216 __m128i dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); 217 __m128i dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); 218 __m128i dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); 219 __m128i dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); 220 // Convert to 16b. 221 dst0 = _mm_unpacklo_epi8(dst0, zero); 222 dst1 = _mm_unpacklo_epi8(dst1, zero); 223 dst2 = _mm_unpacklo_epi8(dst2, zero); 224 dst3 = _mm_unpacklo_epi8(dst3, zero); 225 // Add the inverse transform. 226 dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3)); 227 dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3)); 228 dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3)); 229 dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3)); 230 // Unsigned saturate to 8b. 231 dst0 = _mm_packus_epi16(dst0, dst0); 232 dst1 = _mm_packus_epi16(dst1, dst1); 233 dst2 = _mm_packus_epi16(dst2, dst2); 234 dst3 = _mm_packus_epi16(dst3, dst3); 235 // Store the results. 236 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); 237 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); 238 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); 239 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); 240} 241#undef MUL 242#endif // USE_TRANSFORM_AC3 243 244//------------------------------------------------------------------------------ 245// Loop Filter (Paragraph 15) 246 247// Compute abs(p - q) = subs(p - q) OR subs(q - p) 248#define MM_ABS(p, q) _mm_or_si128( \ 249 _mm_subs_epu8((q), (p)), \ 250 _mm_subs_epu8((p), (q))) 251 252// Shift each byte of "x" by 3 bits while preserving by the sign bit. 253static WEBP_INLINE void SignedShift8b_SSE2(__m128i* const x) { 254 const __m128i zero = _mm_setzero_si128(); 255 const __m128i lo_0 = _mm_unpacklo_epi8(zero, *x); 256 const __m128i hi_0 = _mm_unpackhi_epi8(zero, *x); 257 const __m128i lo_1 = _mm_srai_epi16(lo_0, 3 + 8); 258 const __m128i hi_1 = _mm_srai_epi16(hi_0, 3 + 8); 259 *x = _mm_packs_epi16(lo_1, hi_1); 260} 261 262#define FLIP_SIGN_BIT2(a, b) { \ 263 (a) = _mm_xor_si128(a, sign_bit); \ 264 (b) = _mm_xor_si128(b, sign_bit); \ 265} 266 267#define FLIP_SIGN_BIT4(a, b, c, d) { \ 268 FLIP_SIGN_BIT2(a, b); \ 269 FLIP_SIGN_BIT2(c, d); \ 270} 271 272// input/output is uint8_t 273static WEBP_INLINE void GetNotHEV_SSE2(const __m128i* const p1, 274 const __m128i* const p0, 275 const __m128i* const q0, 276 const __m128i* const q1, 277 int hev_thresh, __m128i* const not_hev) { 278 const __m128i zero = _mm_setzero_si128(); 279 const __m128i t_1 = MM_ABS(*p1, *p0); 280 const __m128i t_2 = MM_ABS(*q1, *q0); 281 282 const __m128i h = _mm_set1_epi8(hev_thresh); 283 const __m128i t_max = _mm_max_epu8(t_1, t_2); 284 285 const __m128i t_max_h = _mm_subs_epu8(t_max, h); 286 *not_hev = _mm_cmpeq_epi8(t_max_h, zero); // not_hev <= t1 && not_hev <= t2 287} 288 289// input pixels are int8_t 290static WEBP_INLINE void GetBaseDelta_SSE2(const __m128i* const p1, 291 const __m128i* const p0, 292 const __m128i* const q0, 293 const __m128i* const q1, 294 __m128i* const delta) { 295 // beware of addition order, for saturation! 296 const __m128i p1_q1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 297 const __m128i q0_p0 = _mm_subs_epi8(*q0, *p0); // q0 - p0 298 const __m128i s1 = _mm_adds_epi8(p1_q1, q0_p0); // p1 - q1 + 1 * (q0 - p0) 299 const __m128i s2 = _mm_adds_epi8(q0_p0, s1); // p1 - q1 + 2 * (q0 - p0) 300 const __m128i s3 = _mm_adds_epi8(q0_p0, s2); // p1 - q1 + 3 * (q0 - p0) 301 *delta = s3; 302} 303 304// input and output are int8_t 305static WEBP_INLINE void DoSimpleFilter_SSE2(__m128i* const p0, 306 __m128i* const q0, 307 const __m128i* const fl) { 308 const __m128i k3 = _mm_set1_epi8(3); 309 const __m128i k4 = _mm_set1_epi8(4); 310 __m128i v3 = _mm_adds_epi8(*fl, k3); 311 __m128i v4 = _mm_adds_epi8(*fl, k4); 312 313 SignedShift8b_SSE2(&v4); // v4 >> 3 314 SignedShift8b_SSE2(&v3); // v3 >> 3 315 *q0 = _mm_subs_epi8(*q0, v4); // q0 -= v4 316 *p0 = _mm_adds_epi8(*p0, v3); // p0 += v3 317} 318 319// Updates values of 2 pixels at MB edge during complex filtering. 320// Update operations: 321// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)] 322// Pixels 'pi' and 'qi' are int8_t on input, uint8_t on output (sign flip). 323static WEBP_INLINE void Update2Pixels_SSE2(__m128i* const pi, __m128i* const qi, 324 const __m128i* const a0_lo, 325 const __m128i* const a0_hi) { 326 const __m128i a1_lo = _mm_srai_epi16(*a0_lo, 7); 327 const __m128i a1_hi = _mm_srai_epi16(*a0_hi, 7); 328 const __m128i delta = _mm_packs_epi16(a1_lo, a1_hi); 329 const __m128i sign_bit = _mm_set1_epi8(0x80); 330 *pi = _mm_adds_epi8(*pi, delta); 331 *qi = _mm_subs_epi8(*qi, delta); 332 FLIP_SIGN_BIT2(*pi, *qi); 333} 334 335// input pixels are uint8_t 336static WEBP_INLINE void NeedsFilter_SSE2(const __m128i* const p1, 337 const __m128i* const p0, 338 const __m128i* const q0, 339 const __m128i* const q1, 340 int thresh, __m128i* const mask) { 341 const __m128i m_thresh = _mm_set1_epi8(thresh); 342 const __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1) 343 const __m128i kFE = _mm_set1_epi8(0xFE); 344 const __m128i t2 = _mm_and_si128(t1, kFE); // set lsb of each byte to zero 345 const __m128i t3 = _mm_srli_epi16(t2, 1); // abs(p1 - q1) / 2 346 347 const __m128i t4 = MM_ABS(*p0, *q0); // abs(p0 - q0) 348 const __m128i t5 = _mm_adds_epu8(t4, t4); // abs(p0 - q0) * 2 349 const __m128i t6 = _mm_adds_epu8(t5, t3); // abs(p0-q0)*2 + abs(p1-q1)/2 350 351 const __m128i t7 = _mm_subs_epu8(t6, m_thresh); // mask <= m_thresh 352 *mask = _mm_cmpeq_epi8(t7, _mm_setzero_si128()); 353} 354 355//------------------------------------------------------------------------------ 356// Edge filtering functions 357 358// Applies filter on 2 pixels (p0 and q0) 359static WEBP_INLINE void DoFilter2_SSE2(__m128i* const p1, __m128i* const p0, 360 __m128i* const q0, __m128i* const q1, 361 int thresh) { 362 __m128i a, mask; 363 const __m128i sign_bit = _mm_set1_epi8(0x80); 364 // convert p1/q1 to int8_t (for GetBaseDelta_SSE2) 365 const __m128i p1s = _mm_xor_si128(*p1, sign_bit); 366 const __m128i q1s = _mm_xor_si128(*q1, sign_bit); 367 368 NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &mask); 369 370 FLIP_SIGN_BIT2(*p0, *q0); 371 GetBaseDelta_SSE2(&p1s, p0, q0, &q1s, &a); 372 a = _mm_and_si128(a, mask); // mask filter values we don't care about 373 DoSimpleFilter_SSE2(p0, q0, &a); 374 FLIP_SIGN_BIT2(*p0, *q0); 375} 376 377// Applies filter on 4 pixels (p1, p0, q0 and q1) 378static WEBP_INLINE void DoFilter4_SSE2(__m128i* const p1, __m128i* const p0, 379 __m128i* const q0, __m128i* const q1, 380 const __m128i* const mask, 381 int hev_thresh) { 382 const __m128i zero = _mm_setzero_si128(); 383 const __m128i sign_bit = _mm_set1_epi8(0x80); 384 const __m128i k64 = _mm_set1_epi8(64); 385 const __m128i k3 = _mm_set1_epi8(3); 386 const __m128i k4 = _mm_set1_epi8(4); 387 __m128i not_hev; 388 __m128i t1, t2, t3; 389 390 // compute hev mask 391 GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, ¬_hev); 392 393 // convert to signed values 394 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); 395 396 t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 397 t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1) 398 t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0 399 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0) 400 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0) 401 t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0) 402 t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about 403 404 t2 = _mm_adds_epi8(t1, k3); // 3 * (q0 - p0) + hev(p1 - q1) + 3 405 t3 = _mm_adds_epi8(t1, k4); // 3 * (q0 - p0) + hev(p1 - q1) + 4 406 SignedShift8b_SSE2(&t2); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3 407 SignedShift8b_SSE2(&t3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3 408 *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2 409 *q0 = _mm_subs_epi8(*q0, t3); // q0 -= t3 410 FLIP_SIGN_BIT2(*p0, *q0); 411 412 // this is equivalent to signed (a + 1) >> 1 calculation 413 t2 = _mm_add_epi8(t3, sign_bit); 414 t3 = _mm_avg_epu8(t2, zero); 415 t3 = _mm_sub_epi8(t3, k64); 416 417 t3 = _mm_and_si128(not_hev, t3); // if !hev 418 *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3 419 *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3 420 FLIP_SIGN_BIT2(*p1, *q1); 421} 422 423// Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2) 424static WEBP_INLINE void DoFilter6_SSE2(__m128i* const p2, __m128i* const p1, 425 __m128i* const p0, __m128i* const q0, 426 __m128i* const q1, __m128i* const q2, 427 const __m128i* const mask, 428 int hev_thresh) { 429 const __m128i zero = _mm_setzero_si128(); 430 const __m128i sign_bit = _mm_set1_epi8(0x80); 431 __m128i a, not_hev; 432 433 // compute hev mask 434 GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, ¬_hev); 435 436 FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); 437 FLIP_SIGN_BIT2(*p2, *q2); 438 GetBaseDelta_SSE2(p1, p0, q0, q1, &a); 439 440 { // do simple filter on pixels with hev 441 const __m128i m = _mm_andnot_si128(not_hev, *mask); 442 const __m128i f = _mm_and_si128(a, m); 443 DoSimpleFilter_SSE2(p0, q0, &f); 444 } 445 446 { // do strong filter on pixels with not hev 447 const __m128i k9 = _mm_set1_epi16(0x0900); 448 const __m128i k63 = _mm_set1_epi16(63); 449 450 const __m128i m = _mm_and_si128(not_hev, *mask); 451 const __m128i f = _mm_and_si128(a, m); 452 453 const __m128i f_lo = _mm_unpacklo_epi8(zero, f); 454 const __m128i f_hi = _mm_unpackhi_epi8(zero, f); 455 456 const __m128i f9_lo = _mm_mulhi_epi16(f_lo, k9); // Filter (lo) * 9 457 const __m128i f9_hi = _mm_mulhi_epi16(f_hi, k9); // Filter (hi) * 9 458 459 const __m128i a2_lo = _mm_add_epi16(f9_lo, k63); // Filter * 9 + 63 460 const __m128i a2_hi = _mm_add_epi16(f9_hi, k63); // Filter * 9 + 63 461 462 const __m128i a1_lo = _mm_add_epi16(a2_lo, f9_lo); // Filter * 18 + 63 463 const __m128i a1_hi = _mm_add_epi16(a2_hi, f9_hi); // Filter * 18 + 63 464 465 const __m128i a0_lo = _mm_add_epi16(a1_lo, f9_lo); // Filter * 27 + 63 466 const __m128i a0_hi = _mm_add_epi16(a1_hi, f9_hi); // Filter * 27 + 63 467 468 Update2Pixels_SSE2(p2, q2, &a2_lo, &a2_hi); 469 Update2Pixels_SSE2(p1, q1, &a1_lo, &a1_hi); 470 Update2Pixels_SSE2(p0, q0, &a0_lo, &a0_hi); 471 } 472} 473 474// reads 8 rows across a vertical edge. 475static WEBP_INLINE void Load8x4_SSE2(const uint8_t* const b, int stride, 476 __m128i* const p, __m128i* const q) { 477 // A0 = 63 62 61 60 23 22 21 20 43 42 41 40 03 02 01 00 478 // A1 = 73 72 71 70 33 32 31 30 53 52 51 50 13 12 11 10 479 const __m128i A0 = _mm_set_epi32( 480 WebPMemToUint32(&b[6 * stride]), WebPMemToUint32(&b[2 * stride]), 481 WebPMemToUint32(&b[4 * stride]), WebPMemToUint32(&b[0 * stride])); 482 const __m128i A1 = _mm_set_epi32( 483 WebPMemToUint32(&b[7 * stride]), WebPMemToUint32(&b[3 * stride]), 484 WebPMemToUint32(&b[5 * stride]), WebPMemToUint32(&b[1 * stride])); 485 486 // B0 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 487 // B1 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 488 const __m128i B0 = _mm_unpacklo_epi8(A0, A1); 489 const __m128i B1 = _mm_unpackhi_epi8(A0, A1); 490 491 // C0 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00 492 // C1 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40 493 const __m128i C0 = _mm_unpacklo_epi16(B0, B1); 494 const __m128i C1 = _mm_unpackhi_epi16(B0, B1); 495 496 // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 497 // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 498 *p = _mm_unpacklo_epi32(C0, C1); 499 *q = _mm_unpackhi_epi32(C0, C1); 500} 501 502static WEBP_INLINE void Load16x4_SSE2(const uint8_t* const r0, 503 const uint8_t* const r8, 504 int stride, 505 __m128i* const p1, __m128i* const p0, 506 __m128i* const q0, __m128i* const q1) { 507 // Assume the pixels around the edge (|) are numbered as follows 508 // 00 01 | 02 03 509 // 10 11 | 12 13 510 // ... | ... 511 // e0 e1 | e2 e3 512 // f0 f1 | f2 f3 513 // 514 // r0 is pointing to the 0th row (00) 515 // r8 is pointing to the 8th row (80) 516 517 // Load 518 // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 519 // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 520 // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80 521 // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82 522 Load8x4_SSE2(r0, stride, p1, q0); 523 Load8x4_SSE2(r8, stride, p0, q1); 524 525 { 526 // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00 527 // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01 528 // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02 529 // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03 530 const __m128i t1 = *p1; 531 const __m128i t2 = *q0; 532 *p1 = _mm_unpacklo_epi64(t1, *p0); 533 *p0 = _mm_unpackhi_epi64(t1, *p0); 534 *q0 = _mm_unpacklo_epi64(t2, *q1); 535 *q1 = _mm_unpackhi_epi64(t2, *q1); 536 } 537} 538 539static WEBP_INLINE void Store4x4_SSE2(__m128i* const x, 540 uint8_t* dst, int stride) { 541 int i; 542 for (i = 0; i < 4; ++i, dst += stride) { 543 WebPUint32ToMem(dst, _mm_cvtsi128_si32(*x)); 544 *x = _mm_srli_si128(*x, 4); 545 } 546} 547 548// Transpose back and store 549static WEBP_INLINE void Store16x4_SSE2(const __m128i* const p1, 550 const __m128i* const p0, 551 const __m128i* const q0, 552 const __m128i* const q1, 553 uint8_t* r0, uint8_t* r8, 554 int stride) { 555 __m128i t1, p1_s, p0_s, q0_s, q1_s; 556 557 // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00 558 // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80 559 t1 = *p0; 560 p0_s = _mm_unpacklo_epi8(*p1, t1); 561 p1_s = _mm_unpackhi_epi8(*p1, t1); 562 563 // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02 564 // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82 565 t1 = *q0; 566 q0_s = _mm_unpacklo_epi8(t1, *q1); 567 q1_s = _mm_unpackhi_epi8(t1, *q1); 568 569 // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00 570 // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40 571 t1 = p0_s; 572 p0_s = _mm_unpacklo_epi16(t1, q0_s); 573 q0_s = _mm_unpackhi_epi16(t1, q0_s); 574 575 // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80 576 // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0 577 t1 = p1_s; 578 p1_s = _mm_unpacklo_epi16(t1, q1_s); 579 q1_s = _mm_unpackhi_epi16(t1, q1_s); 580 581 Store4x4_SSE2(&p0_s, r0, stride); 582 r0 += 4 * stride; 583 Store4x4_SSE2(&q0_s, r0, stride); 584 585 Store4x4_SSE2(&p1_s, r8, stride); 586 r8 += 4 * stride; 587 Store4x4_SSE2(&q1_s, r8, stride); 588} 589 590//------------------------------------------------------------------------------ 591// Simple In-loop filtering (Paragraph 15.2) 592 593static void SimpleVFilter16_SSE2(uint8_t* p, int stride, int thresh) { 594 // Load 595 __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]); 596 __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]); 597 __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]); 598 __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]); 599 600 DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh); 601 602 // Store 603 _mm_storeu_si128((__m128i*)&p[-stride], p0); 604 _mm_storeu_si128((__m128i*)&p[0], q0); 605} 606 607static void SimpleHFilter16_SSE2(uint8_t* p, int stride, int thresh) { 608 __m128i p1, p0, q0, q1; 609 610 p -= 2; // beginning of p1 611 612 Load16x4_SSE2(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); 613 DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh); 614 Store16x4_SSE2(&p1, &p0, &q0, &q1, p, p + 8 * stride, stride); 615} 616 617static void SimpleVFilter16i_SSE2(uint8_t* p, int stride, int thresh) { 618 int k; 619 for (k = 3; k > 0; --k) { 620 p += 4 * stride; 621 SimpleVFilter16_SSE2(p, stride, thresh); 622 } 623} 624 625static void SimpleHFilter16i_SSE2(uint8_t* p, int stride, int thresh) { 626 int k; 627 for (k = 3; k > 0; --k) { 628 p += 4; 629 SimpleHFilter16_SSE2(p, stride, thresh); 630 } 631} 632 633//------------------------------------------------------------------------------ 634// Complex In-loop filtering (Paragraph 15.3) 635 636#define MAX_DIFF1(p3, p2, p1, p0, m) do { \ 637 (m) = MM_ABS(p1, p0); \ 638 (m) = _mm_max_epu8(m, MM_ABS(p3, p2)); \ 639 (m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \ 640} while (0) 641 642#define MAX_DIFF2(p3, p2, p1, p0, m) do { \ 643 (m) = _mm_max_epu8(m, MM_ABS(p1, p0)); \ 644 (m) = _mm_max_epu8(m, MM_ABS(p3, p2)); \ 645 (m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \ 646} while (0) 647 648#define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \ 649 (e1) = _mm_loadu_si128((__m128i*)&(p)[0 * (stride)]); \ 650 (e2) = _mm_loadu_si128((__m128i*)&(p)[1 * (stride)]); \ 651 (e3) = _mm_loadu_si128((__m128i*)&(p)[2 * (stride)]); \ 652 (e4) = _mm_loadu_si128((__m128i*)&(p)[3 * (stride)]); \ 653} 654 655#define LOADUV_H_EDGE(p, u, v, stride) do { \ 656 const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ 657 const __m128i V = _mm_loadl_epi64((__m128i*)&(v)[(stride)]); \ 658 (p) = _mm_unpacklo_epi64(U, V); \ 659} while (0) 660 661#define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \ 662 LOADUV_H_EDGE(e1, u, v, 0 * (stride)); \ 663 LOADUV_H_EDGE(e2, u, v, 1 * (stride)); \ 664 LOADUV_H_EDGE(e3, u, v, 2 * (stride)); \ 665 LOADUV_H_EDGE(e4, u, v, 3 * (stride)); \ 666} 667 668#define STOREUV(p, u, v, stride) { \ 669 _mm_storel_epi64((__m128i*)&(u)[(stride)], p); \ 670 (p) = _mm_srli_si128(p, 8); \ 671 _mm_storel_epi64((__m128i*)&(v)[(stride)], p); \ 672} 673 674static WEBP_INLINE void ComplexMask_SSE2(const __m128i* const p1, 675 const __m128i* const p0, 676 const __m128i* const q0, 677 const __m128i* const q1, 678 int thresh, int ithresh, 679 __m128i* const mask) { 680 const __m128i it = _mm_set1_epi8(ithresh); 681 const __m128i diff = _mm_subs_epu8(*mask, it); 682 const __m128i thresh_mask = _mm_cmpeq_epi8(diff, _mm_setzero_si128()); 683 __m128i filter_mask; 684 NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &filter_mask); 685 *mask = _mm_and_si128(thresh_mask, filter_mask); 686} 687 688// on macroblock edges 689static void VFilter16_SSE2(uint8_t* p, int stride, 690 int thresh, int ithresh, int hev_thresh) { 691 __m128i t1; 692 __m128i mask; 693 __m128i p2, p1, p0, q0, q1, q2; 694 695 // Load p3, p2, p1, p0 696 LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0); 697 MAX_DIFF1(t1, p2, p1, p0, mask); 698 699 // Load q0, q1, q2, q3 700 LOAD_H_EDGES4(p, stride, q0, q1, q2, t1); 701 MAX_DIFF2(t1, q2, q1, q0, mask); 702 703 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 704 DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 705 706 // Store 707 _mm_storeu_si128((__m128i*)&p[-3 * stride], p2); 708 _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); 709 _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); 710 _mm_storeu_si128((__m128i*)&p[+0 * stride], q0); 711 _mm_storeu_si128((__m128i*)&p[+1 * stride], q1); 712 _mm_storeu_si128((__m128i*)&p[+2 * stride], q2); 713} 714 715static void HFilter16_SSE2(uint8_t* p, int stride, 716 int thresh, int ithresh, int hev_thresh) { 717 __m128i mask; 718 __m128i p3, p2, p1, p0, q0, q1, q2, q3; 719 720 uint8_t* const b = p - 4; 721 Load16x4_SSE2(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); 722 MAX_DIFF1(p3, p2, p1, p0, mask); 723 724 Load16x4_SSE2(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); 725 MAX_DIFF2(q3, q2, q1, q0, mask); 726 727 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 728 DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 729 730 Store16x4_SSE2(&p3, &p2, &p1, &p0, b, b + 8 * stride, stride); 731 Store16x4_SSE2(&q0, &q1, &q2, &q3, p, p + 8 * stride, stride); 732} 733 734// on three inner edges 735static void VFilter16i_SSE2(uint8_t* p, int stride, 736 int thresh, int ithresh, int hev_thresh) { 737 int k; 738 __m128i p3, p2, p1, p0; // loop invariants 739 740 LOAD_H_EDGES4(p, stride, p3, p2, p1, p0); // prologue 741 742 for (k = 3; k > 0; --k) { 743 __m128i mask, tmp1, tmp2; 744 uint8_t* const b = p + 2 * stride; // beginning of p1 745 p += 4 * stride; 746 747 MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask 748 LOAD_H_EDGES4(p, stride, p3, p2, tmp1, tmp2); 749 MAX_DIFF2(p3, p2, tmp1, tmp2, mask); 750 751 // p3 and p2 are not just temporary variables here: they will be 752 // re-used for next span. And q2/q3 will become p1/p0 accordingly. 753 ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); 754 DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh); 755 756 // Store 757 _mm_storeu_si128((__m128i*)&b[0 * stride], p1); 758 _mm_storeu_si128((__m128i*)&b[1 * stride], p0); 759 _mm_storeu_si128((__m128i*)&b[2 * stride], p3); 760 _mm_storeu_si128((__m128i*)&b[3 * stride], p2); 761 762 // rotate samples 763 p1 = tmp1; 764 p0 = tmp2; 765 } 766} 767 768static void HFilter16i_SSE2(uint8_t* p, int stride, 769 int thresh, int ithresh, int hev_thresh) { 770 int k; 771 __m128i p3, p2, p1, p0; // loop invariants 772 773 Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &p1, &p0); // prologue 774 775 for (k = 3; k > 0; --k) { 776 __m128i mask, tmp1, tmp2; 777 uint8_t* const b = p + 2; // beginning of p1 778 779 p += 4; // beginning of q0 (and next span) 780 781 MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask 782 Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &tmp1, &tmp2); 783 MAX_DIFF2(p3, p2, tmp1, tmp2, mask); 784 785 ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); 786 DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh); 787 788 Store16x4_SSE2(&p1, &p0, &p3, &p2, b, b + 8 * stride, stride); 789 790 // rotate samples 791 p1 = tmp1; 792 p0 = tmp2; 793 } 794} 795 796// 8-pixels wide variant, for chroma filtering 797static void VFilter8_SSE2(uint8_t* u, uint8_t* v, int stride, 798 int thresh, int ithresh, int hev_thresh) { 799 __m128i mask; 800 __m128i t1, p2, p1, p0, q0, q1, q2; 801 802 // Load p3, p2, p1, p0 803 LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0); 804 MAX_DIFF1(t1, p2, p1, p0, mask); 805 806 // Load q0, q1, q2, q3 807 LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1); 808 MAX_DIFF2(t1, q2, q1, q0, mask); 809 810 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 811 DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 812 813 // Store 814 STOREUV(p2, u, v, -3 * stride); 815 STOREUV(p1, u, v, -2 * stride); 816 STOREUV(p0, u, v, -1 * stride); 817 STOREUV(q0, u, v, 0 * stride); 818 STOREUV(q1, u, v, 1 * stride); 819 STOREUV(q2, u, v, 2 * stride); 820} 821 822static void HFilter8_SSE2(uint8_t* u, uint8_t* v, int stride, 823 int thresh, int ithresh, int hev_thresh) { 824 __m128i mask; 825 __m128i p3, p2, p1, p0, q0, q1, q2, q3; 826 827 uint8_t* const tu = u - 4; 828 uint8_t* const tv = v - 4; 829 Load16x4_SSE2(tu, tv, stride, &p3, &p2, &p1, &p0); 830 MAX_DIFF1(p3, p2, p1, p0, mask); 831 832 Load16x4_SSE2(u, v, stride, &q0, &q1, &q2, &q3); 833 MAX_DIFF2(q3, q2, q1, q0, mask); 834 835 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 836 DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); 837 838 Store16x4_SSE2(&p3, &p2, &p1, &p0, tu, tv, stride); 839 Store16x4_SSE2(&q0, &q1, &q2, &q3, u, v, stride); 840} 841 842static void VFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride, 843 int thresh, int ithresh, int hev_thresh) { 844 __m128i mask; 845 __m128i t1, t2, p1, p0, q0, q1; 846 847 // Load p3, p2, p1, p0 848 LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0); 849 MAX_DIFF1(t2, t1, p1, p0, mask); 850 851 u += 4 * stride; 852 v += 4 * stride; 853 854 // Load q0, q1, q2, q3 855 LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2); 856 MAX_DIFF2(t2, t1, q1, q0, mask); 857 858 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 859 DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh); 860 861 // Store 862 STOREUV(p1, u, v, -2 * stride); 863 STOREUV(p0, u, v, -1 * stride); 864 STOREUV(q0, u, v, 0 * stride); 865 STOREUV(q1, u, v, 1 * stride); 866} 867 868static void HFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride, 869 int thresh, int ithresh, int hev_thresh) { 870 __m128i mask; 871 __m128i t1, t2, p1, p0, q0, q1; 872 Load16x4_SSE2(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 873 MAX_DIFF1(t2, t1, p1, p0, mask); 874 875 u += 4; // beginning of q0 876 v += 4; 877 Load16x4_SSE2(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 878 MAX_DIFF2(t2, t1, q1, q0, mask); 879 880 ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); 881 DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh); 882 883 u -= 2; // beginning of p1 884 v -= 2; 885 Store16x4_SSE2(&p1, &p0, &q0, &q1, u, v, stride); 886} 887 888//------------------------------------------------------------------------------ 889// 4x4 predictions 890 891#define DST(x, y) dst[(x) + (y) * BPS] 892#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) 893 894// We use the following 8b-arithmetic tricks: 895// (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1 896// where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1] 897// and: 898// (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb 899// where: AC = (a + b + 1) >> 1, BC = (b + c + 1) >> 1 900// and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1 901 902static void VE4_SSE2(uint8_t* dst) { // vertical 903 const __m128i one = _mm_set1_epi8(1); 904 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 905 const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); 906 const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); 907 const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); 908 const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one); 909 const __m128i b = _mm_subs_epu8(a, lsb); 910 const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); 911 const uint32_t vals = _mm_cvtsi128_si32(avg); 912 int i; 913 for (i = 0; i < 4; ++i) { 914 WebPUint32ToMem(dst + i * BPS, vals); 915 } 916} 917 918static void LD4_SSE2(uint8_t* dst) { // Down-Left 919 const __m128i one = _mm_set1_epi8(1); 920 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); 921 const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); 922 const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); 923 const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, dst[-BPS + 7], 3); 924 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); 925 const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one); 926 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 927 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); 928 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); 929 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); 930 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); 931 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); 932} 933 934static void VR4_SSE2(uint8_t* dst) { // Vertical-Right 935 const __m128i one = _mm_set1_epi8(1); 936 const int I = dst[-1 + 0 * BPS]; 937 const int J = dst[-1 + 1 * BPS]; 938 const int K = dst[-1 + 2 * BPS]; 939 const int X = dst[-1 - BPS]; 940 const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 941 const __m128i ABCD0 = _mm_srli_si128(XABCD, 1); 942 const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); 943 const __m128i _XABCD = _mm_slli_si128(XABCD, 1); 944 const __m128i IXABCD = _mm_insert_epi16(_XABCD, I | (X << 8), 0); 945 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); 946 const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one); 947 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 948 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); 949 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); 950 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); 951 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); 952 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); 953 954 // these two are hard to implement in SSE2, so we keep the C-version: 955 DST(0, 2) = AVG3(J, I, X); 956 DST(0, 3) = AVG3(K, J, I); 957} 958 959static void VL4_SSE2(uint8_t* dst) { // Vertical-Left 960 const __m128i one = _mm_set1_epi8(1); 961 const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); 962 const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1); 963 const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2); 964 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); 965 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); 966 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); 967 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); 968 const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_); 969 const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_); 970 const __m128i abbc = _mm_or_si128(ab, bc); 971 const __m128i lsb2 = _mm_and_si128(abbc, lsb1); 972 const __m128i avg4 = _mm_subs_epu8(avg3, lsb2); 973 const uint32_t extra_out = _mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); 974 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); 975 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); 976 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); 977 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); 978 979 // these two are hard to get and irregular 980 DST(3, 2) = (extra_out >> 0) & 0xff; 981 DST(3, 3) = (extra_out >> 8) & 0xff; 982} 983 984static void RD4_SSE2(uint8_t* dst) { // Down-right 985 const __m128i one = _mm_set1_epi8(1); 986 const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); 987 const __m128i ____XABCD = _mm_slli_si128(XABCD, 4); 988 const uint32_t I = dst[-1 + 0 * BPS]; 989 const uint32_t J = dst[-1 + 1 * BPS]; 990 const uint32_t K = dst[-1 + 2 * BPS]; 991 const uint32_t L = dst[-1 + 3 * BPS]; 992 const __m128i LKJI_____ = 993 _mm_cvtsi32_si128(L | (K << 8) | (J << 16) | (I << 24)); 994 const __m128i LKJIXABCD = _mm_or_si128(LKJI_____, ____XABCD); 995 const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1); 996 const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2); 997 const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); 998 const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one); 999 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); 1000 const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); 1001 WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); 1002 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); 1003 WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); 1004 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); 1005} 1006 1007#undef DST 1008#undef AVG3 1009 1010//------------------------------------------------------------------------------ 1011// Luma 16x16 1012 1013static WEBP_INLINE void TrueMotion_SSE2(uint8_t* dst, int size) { 1014 const uint8_t* top = dst - BPS; 1015 const __m128i zero = _mm_setzero_si128(); 1016 int y; 1017 if (size == 4) { 1018 const __m128i top_values = _mm_cvtsi32_si128(WebPMemToUint32(top)); 1019 const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); 1020 for (y = 0; y < 4; ++y, dst += BPS) { 1021 const int val = dst[-1] - top[-1]; 1022 const __m128i base = _mm_set1_epi16(val); 1023 const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); 1024 WebPUint32ToMem(dst, _mm_cvtsi128_si32(out)); 1025 } 1026 } else if (size == 8) { 1027 const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); 1028 const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); 1029 for (y = 0; y < 8; ++y, dst += BPS) { 1030 const int val = dst[-1] - top[-1]; 1031 const __m128i base = _mm_set1_epi16(val); 1032 const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); 1033 _mm_storel_epi64((__m128i*)dst, out); 1034 } 1035 } else { 1036 const __m128i top_values = _mm_loadu_si128((const __m128i*)top); 1037 const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero); 1038 const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero); 1039 for (y = 0; y < 16; ++y, dst += BPS) { 1040 const int val = dst[-1] - top[-1]; 1041 const __m128i base = _mm_set1_epi16(val); 1042 const __m128i out_0 = _mm_add_epi16(base, top_base_0); 1043 const __m128i out_1 = _mm_add_epi16(base, top_base_1); 1044 const __m128i out = _mm_packus_epi16(out_0, out_1); 1045 _mm_storeu_si128((__m128i*)dst, out); 1046 } 1047 } 1048} 1049 1050static void TM4_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 4); } 1051static void TM8uv_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 8); } 1052static void TM16_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 16); } 1053 1054static void VE16_SSE2(uint8_t* dst) { 1055 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1056 int j; 1057 for (j = 0; j < 16; ++j) { 1058 _mm_storeu_si128((__m128i*)(dst + j * BPS), top); 1059 } 1060} 1061 1062static void HE16_SSE2(uint8_t* dst) { // horizontal 1063 int j; 1064 for (j = 16; j > 0; --j) { 1065 const __m128i values = _mm_set1_epi8(dst[-1]); 1066 _mm_storeu_si128((__m128i*)dst, values); 1067 dst += BPS; 1068 } 1069} 1070 1071static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) { 1072 int j; 1073 const __m128i values = _mm_set1_epi8(v); 1074 for (j = 0; j < 16; ++j) { 1075 _mm_storeu_si128((__m128i*)(dst + j * BPS), values); 1076 } 1077} 1078 1079static void DC16_SSE2(uint8_t* dst) { // DC 1080 const __m128i zero = _mm_setzero_si128(); 1081 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1082 const __m128i sad8x2 = _mm_sad_epu8(top, zero); 1083 // sum the two sads: sad8x2[0:1] + sad8x2[8:9] 1084 const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); 1085 int left = 0; 1086 int j; 1087 for (j = 0; j < 16; ++j) { 1088 left += dst[-1 + j * BPS]; 1089 } 1090 { 1091 const int DC = _mm_cvtsi128_si32(sum) + left + 16; 1092 Put16_SSE2(DC >> 5, dst); 1093 } 1094} 1095 1096static void DC16NoTop_SSE2(uint8_t* dst) { // DC with top samples unavailable 1097 int DC = 8; 1098 int j; 1099 for (j = 0; j < 16; ++j) { 1100 DC += dst[-1 + j * BPS]; 1101 } 1102 Put16_SSE2(DC >> 4, dst); 1103} 1104 1105static void DC16NoLeft_SSE2(uint8_t* dst) { // DC with left samples unavailable 1106 const __m128i zero = _mm_setzero_si128(); 1107 const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); 1108 const __m128i sad8x2 = _mm_sad_epu8(top, zero); 1109 // sum the two sads: sad8x2[0:1] + sad8x2[8:9] 1110 const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); 1111 const int DC = _mm_cvtsi128_si32(sum) + 8; 1112 Put16_SSE2(DC >> 4, dst); 1113} 1114 1115static void DC16NoTopLeft_SSE2(uint8_t* dst) { // DC with no top & left samples 1116 Put16_SSE2(0x80, dst); 1117} 1118 1119//------------------------------------------------------------------------------ 1120// Chroma 1121 1122static void VE8uv_SSE2(uint8_t* dst) { // vertical 1123 int j; 1124 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1125 for (j = 0; j < 8; ++j) { 1126 _mm_storel_epi64((__m128i*)(dst + j * BPS), top); 1127 } 1128} 1129 1130// helper for chroma-DC predictions 1131static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) { 1132 int j; 1133 const __m128i values = _mm_set1_epi8(v); 1134 for (j = 0; j < 8; ++j) { 1135 _mm_storel_epi64((__m128i*)(dst + j * BPS), values); 1136 } 1137} 1138 1139static void DC8uv_SSE2(uint8_t* dst) { // DC 1140 const __m128i zero = _mm_setzero_si128(); 1141 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1142 const __m128i sum = _mm_sad_epu8(top, zero); 1143 int left = 0; 1144 int j; 1145 for (j = 0; j < 8; ++j) { 1146 left += dst[-1 + j * BPS]; 1147 } 1148 { 1149 const int DC = _mm_cvtsi128_si32(sum) + left + 8; 1150 Put8x8uv_SSE2(DC >> 4, dst); 1151 } 1152} 1153 1154static void DC8uvNoLeft_SSE2(uint8_t* dst) { // DC with no left samples 1155 const __m128i zero = _mm_setzero_si128(); 1156 const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); 1157 const __m128i sum = _mm_sad_epu8(top, zero); 1158 const int DC = _mm_cvtsi128_si32(sum) + 4; 1159 Put8x8uv_SSE2(DC >> 3, dst); 1160} 1161 1162static void DC8uvNoTop_SSE2(uint8_t* dst) { // DC with no top samples 1163 int dc0 = 4; 1164 int i; 1165 for (i = 0; i < 8; ++i) { 1166 dc0 += dst[-1 + i * BPS]; 1167 } 1168 Put8x8uv_SSE2(dc0 >> 3, dst); 1169} 1170 1171static void DC8uvNoTopLeft_SSE2(uint8_t* dst) { // DC with nothing 1172 Put8x8uv_SSE2(0x80, dst); 1173} 1174 1175//------------------------------------------------------------------------------ 1176// Entry point 1177 1178extern void VP8DspInitSSE2(void); 1179 1180WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitSSE2(void) { 1181 VP8Transform = Transform_SSE2; 1182#if (USE_TRANSFORM_AC3 == 1) 1183 VP8TransformAC3 = TransformAC3_SSE2; 1184#endif 1185 1186 VP8VFilter16 = VFilter16_SSE2; 1187 VP8HFilter16 = HFilter16_SSE2; 1188 VP8VFilter8 = VFilter8_SSE2; 1189 VP8HFilter8 = HFilter8_SSE2; 1190 VP8VFilter16i = VFilter16i_SSE2; 1191 VP8HFilter16i = HFilter16i_SSE2; 1192 VP8VFilter8i = VFilter8i_SSE2; 1193 VP8HFilter8i = HFilter8i_SSE2; 1194 1195 VP8SimpleVFilter16 = SimpleVFilter16_SSE2; 1196 VP8SimpleHFilter16 = SimpleHFilter16_SSE2; 1197 VP8SimpleVFilter16i = SimpleVFilter16i_SSE2; 1198 VP8SimpleHFilter16i = SimpleHFilter16i_SSE2; 1199 1200 VP8PredLuma4[1] = TM4_SSE2; 1201 VP8PredLuma4[2] = VE4_SSE2; 1202 VP8PredLuma4[4] = RD4_SSE2; 1203 VP8PredLuma4[5] = VR4_SSE2; 1204 VP8PredLuma4[6] = LD4_SSE2; 1205 VP8PredLuma4[7] = VL4_SSE2; 1206 1207 VP8PredLuma16[0] = DC16_SSE2; 1208 VP8PredLuma16[1] = TM16_SSE2; 1209 VP8PredLuma16[2] = VE16_SSE2; 1210 VP8PredLuma16[3] = HE16_SSE2; 1211 VP8PredLuma16[4] = DC16NoTop_SSE2; 1212 VP8PredLuma16[5] = DC16NoLeft_SSE2; 1213 VP8PredLuma16[6] = DC16NoTopLeft_SSE2; 1214 1215 VP8PredChroma8[0] = DC8uv_SSE2; 1216 VP8PredChroma8[1] = TM8uv_SSE2; 1217 VP8PredChroma8[2] = VE8uv_SSE2; 1218 VP8PredChroma8[4] = DC8uvNoTop_SSE2; 1219 VP8PredChroma8[5] = DC8uvNoLeft_SSE2; 1220 VP8PredChroma8[6] = DC8uvNoTopLeft_SSE2; 1221} 1222 1223#else // !WEBP_USE_SSE2 1224 1225WEBP_DSP_INIT_STUB(VP8DspInitSSE2) 1226 1227#endif // WEBP_USE_SSE2 1228