highbd_loopfilter_sse2.c revision 7ce0a1d1337c01056ba24006efab21f00e179e04
1/* 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#include <emmintrin.h> // SSE2 12 13#include "./vpx_dsp_rtcd.h" 14#include "vpx_ports/mem.h" 15#include "vpx_ports/emmintrin_compat.h" 16 17static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) { 18 __m128i ubounded; 19 __m128i lbounded; 20 __m128i retval; 21 22 const __m128i zero = _mm_set1_epi16(0); 23 const __m128i one = _mm_set1_epi16(1); 24 __m128i t80, max, min; 25 26 if (bd == 8) { 27 t80 = _mm_set1_epi16(0x80); 28 max = _mm_subs_epi16( 29 _mm_subs_epi16(_mm_slli_epi16(one, 8), one), t80); 30 } else if (bd == 10) { 31 t80 = _mm_set1_epi16(0x200); 32 max = _mm_subs_epi16( 33 _mm_subs_epi16(_mm_slli_epi16(one, 10), one), t80); 34 } else { // bd == 12 35 t80 = _mm_set1_epi16(0x800); 36 max = _mm_subs_epi16( 37 _mm_subs_epi16(_mm_slli_epi16(one, 12), one), t80); 38 } 39 40 min = _mm_subs_epi16(zero, t80); 41 42 ubounded = _mm_cmpgt_epi16(value, max); 43 lbounded = _mm_cmplt_epi16(value, min); 44 retval = _mm_andnot_si128(_mm_or_si128(ubounded, lbounded), value); 45 ubounded = _mm_and_si128(ubounded, max); 46 lbounded = _mm_and_si128(lbounded, min); 47 retval = _mm_or_si128(retval, ubounded); 48 retval = _mm_or_si128(retval, lbounded); 49 return retval; 50} 51 52// TODO(debargha, peter): Break up large functions into smaller ones 53// in this file. 54static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, 55 int p, 56 const uint8_t *_blimit, 57 const uint8_t *_limit, 58 const uint8_t *_thresh, 59 int bd) { 60 const __m128i zero = _mm_set1_epi16(0); 61 const __m128i one = _mm_set1_epi16(1); 62 __m128i blimit, limit, thresh; 63 __m128i q7, p7, q6, p6, q5, p5, q4, p4, q3, p3, q2, p2, q1, p1, q0, p0; 64 __m128i mask, hev, flat, flat2, abs_p1p0, abs_q1q0; 65 __m128i ps1, qs1, ps0, qs0; 66 __m128i abs_p0q0, abs_p1q1, ffff, work; 67 __m128i filt, work_a, filter1, filter2; 68 __m128i flat2_q6, flat2_p6, flat2_q5, flat2_p5, flat2_q4, flat2_p4; 69 __m128i flat2_q3, flat2_p3, flat2_q2, flat2_p2, flat2_q1, flat2_p1; 70 __m128i flat2_q0, flat2_p0; 71 __m128i flat_q2, flat_p2, flat_q1, flat_p1, flat_q0, flat_p0; 72 __m128i pixelFilter_p, pixelFilter_q; 73 __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0; 74 __m128i sum_p7, sum_q7, sum_p3, sum_q3; 75 __m128i t4, t3, t80, t1; 76 __m128i eight, four; 77 78 if (bd == 8) { 79 blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero); 80 limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero); 81 thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero); 82 } else if (bd == 10) { 83 blimit = _mm_slli_epi16( 84 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); 85 limit = _mm_slli_epi16( 86 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); 87 thresh = _mm_slli_epi16( 88 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); 89 } else { // bd == 12 90 blimit = _mm_slli_epi16( 91 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); 92 limit = _mm_slli_epi16( 93 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); 94 thresh = _mm_slli_epi16( 95 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); 96 } 97 98 q4 = _mm_load_si128((__m128i *)(s + 4 * p)); 99 p4 = _mm_load_si128((__m128i *)(s - 5 * p)); 100 q3 = _mm_load_si128((__m128i *)(s + 3 * p)); 101 p3 = _mm_load_si128((__m128i *)(s - 4 * p)); 102 q2 = _mm_load_si128((__m128i *)(s + 2 * p)); 103 p2 = _mm_load_si128((__m128i *)(s - 3 * p)); 104 q1 = _mm_load_si128((__m128i *)(s + 1 * p)); 105 p1 = _mm_load_si128((__m128i *)(s - 2 * p)); 106 q0 = _mm_load_si128((__m128i *)(s + 0 * p)); 107 p0 = _mm_load_si128((__m128i *)(s - 1 * p)); 108 109 // highbd_filter_mask 110 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)); 111 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)); 112 113 ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0); 114 115 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0)); 116 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1)); 117 118 // highbd_hev_mask (in C code this is actually called from highbd_filter4) 119 flat = _mm_max_epi16(abs_p1p0, abs_q1q0); 120 hev = _mm_subs_epu16(flat, thresh); 121 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); 122 123 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); // abs(p0 - q0) * 2 124 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // abs(p1 - q1) / 2 125 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); 126 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); 127 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one)); 128 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p1, p0), 129 _mm_subs_epu16(p0, p1)), 130 _mm_or_si128(_mm_subs_epu16(q1, q0), 131 _mm_subs_epu16(q0, q1))); 132 mask = _mm_max_epi16(work, mask); 133 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), 134 _mm_subs_epu16(p1, p2)), 135 _mm_or_si128(_mm_subs_epu16(q2, q1), 136 _mm_subs_epu16(q1, q2))); 137 mask = _mm_max_epi16(work, mask); 138 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2), 139 _mm_subs_epu16(p2, p3)), 140 _mm_or_si128(_mm_subs_epu16(q3, q2), 141 _mm_subs_epu16(q2, q3))); 142 mask = _mm_max_epi16(work, mask); 143 144 mask = _mm_subs_epu16(mask, limit); 145 mask = _mm_cmpeq_epi16(mask, zero); // return ~mask 146 147 // lp filter 148 // highbd_filter4 149 t4 = _mm_set1_epi16(4); 150 t3 = _mm_set1_epi16(3); 151 if (bd == 8) 152 t80 = _mm_set1_epi16(0x80); 153 else if (bd == 10) 154 t80 = _mm_set1_epi16(0x200); 155 else // bd == 12 156 t80 = _mm_set1_epi16(0x800); 157 158 t1 = _mm_set1_epi16(0x1); 159 160 ps1 = _mm_subs_epi16(p1, t80); 161 qs1 = _mm_subs_epi16(q1, t80); 162 ps0 = _mm_subs_epi16(p0, t80); 163 qs0 = _mm_subs_epi16(q0, t80); 164 165 filt = _mm_and_si128( 166 signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd), hev); 167 work_a = _mm_subs_epi16(qs0, ps0); 168 filt = _mm_adds_epi16(filt, work_a); 169 filt = _mm_adds_epi16(filt, work_a); 170 filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd); 171 filt = _mm_and_si128(filt, mask); 172 filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd); 173 filter2 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t3), bd); 174 175 // Filter1 >> 3 176 filter1 = _mm_srai_epi16(filter1, 0x3); 177 filter2 = _mm_srai_epi16(filter2, 0x3); 178 179 qs0 = _mm_adds_epi16( 180 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), 181 t80); 182 ps0 = _mm_adds_epi16( 183 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), 184 t80); 185 filt = _mm_adds_epi16(filter1, t1); 186 filt = _mm_srai_epi16(filt, 1); 187 filt = _mm_andnot_si128(hev, filt); 188 qs1 = _mm_adds_epi16( 189 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), 190 t80); 191 ps1 = _mm_adds_epi16( 192 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), 193 t80); 194 195 // end highbd_filter4 196 // loopfilter done 197 198 // highbd_flat_mask4 199 flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0), 200 _mm_subs_epu16(p0, p2)), 201 _mm_or_si128(_mm_subs_epu16(p3, p0), 202 _mm_subs_epu16(p0, p3))); 203 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q0), 204 _mm_subs_epu16(q0, q2)), 205 _mm_or_si128(_mm_subs_epu16(q3, q0), 206 _mm_subs_epu16(q0, q3))); 207 flat = _mm_max_epi16(work, flat); 208 work = _mm_max_epi16(abs_p1p0, abs_q1q0); 209 flat = _mm_max_epi16(work, flat); 210 211 if (bd == 8) 212 flat = _mm_subs_epu16(flat, one); 213 else if (bd == 10) 214 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, 2)); 215 else // bd == 12 216 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, 4)); 217 218 flat = _mm_cmpeq_epi16(flat, zero); 219 // end flat_mask4 220 221 // flat & mask = flat && mask (as used in filter8) 222 // (because, in both vars, each block of 16 either all 1s or all 0s) 223 flat = _mm_and_si128(flat, mask); 224 225 p5 = _mm_load_si128((__m128i *)(s - 6 * p)); 226 q5 = _mm_load_si128((__m128i *)(s + 5 * p)); 227 p6 = _mm_load_si128((__m128i *)(s - 7 * p)); 228 q6 = _mm_load_si128((__m128i *)(s + 6 * p)); 229 p7 = _mm_load_si128((__m128i *)(s - 8 * p)); 230 q7 = _mm_load_si128((__m128i *)(s + 7 * p)); 231 232 // highbd_flat_mask5 (arguments passed in are p0, q0, p4-p7, q4-q7 233 // but referred to as p0-p4 & q0-q4 in fn) 234 flat2 = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p4, p0), 235 _mm_subs_epu16(p0, p4)), 236 _mm_or_si128(_mm_subs_epu16(q4, q0), 237 _mm_subs_epu16(q0, q4))); 238 239 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p5, p0), 240 _mm_subs_epu16(p0, p5)), 241 _mm_or_si128(_mm_subs_epu16(q5, q0), 242 _mm_subs_epu16(q0, q5))); 243 flat2 = _mm_max_epi16(work, flat2); 244 245 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p6, p0), 246 _mm_subs_epu16(p0, p6)), 247 _mm_or_si128(_mm_subs_epu16(q6, q0), 248 _mm_subs_epu16(q0, q6))); 249 flat2 = _mm_max_epi16(work, flat2); 250 251 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p7, p0), 252 _mm_subs_epu16(p0, p7)), 253 _mm_or_si128(_mm_subs_epu16(q7, q0), 254 _mm_subs_epu16(q0, q7))); 255 flat2 = _mm_max_epi16(work, flat2); 256 257 if (bd == 8) 258 flat2 = _mm_subs_epu16(flat2, one); 259 else if (bd == 10) 260 flat2 = _mm_subs_epu16(flat2, _mm_slli_epi16(one, 2)); 261 else // bd == 12 262 flat2 = _mm_subs_epu16(flat2, _mm_slli_epi16(one, 4)); 263 264 flat2 = _mm_cmpeq_epi16(flat2, zero); 265 flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask 266 // end highbd_flat_mask5 267 268 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 269 // flat and wide flat calculations 270 eight = _mm_set1_epi16(8); 271 four = _mm_set1_epi16(4); 272 273 pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5), 274 _mm_add_epi16(p4, p3)); 275 pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5), 276 _mm_add_epi16(q4, q3)); 277 278 pixetFilter_p2p1p0 = _mm_add_epi16(p0, _mm_add_epi16(p2, p1)); 279 pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); 280 281 pixetFilter_q2q1q0 = _mm_add_epi16(q0, _mm_add_epi16(q2, q1)); 282 pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); 283 pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, 284 pixelFilter_q)); 285 pixetFilter_p2p1p0 = _mm_add_epi16(four, 286 _mm_add_epi16(pixetFilter_p2p1p0, 287 pixetFilter_q2q1q0)); 288 flat2_p0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 289 _mm_add_epi16(p7, p0)), 4); 290 flat2_q0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 291 _mm_add_epi16(q7, q0)), 4); 292 flat_p0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, 293 _mm_add_epi16(p3, p0)), 3); 294 flat_q0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, 295 _mm_add_epi16(q3, q0)), 3); 296 297 sum_p7 = _mm_add_epi16(p7, p7); 298 sum_q7 = _mm_add_epi16(q7, q7); 299 sum_p3 = _mm_add_epi16(p3, p3); 300 sum_q3 = _mm_add_epi16(q3, q3); 301 302 pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6); 303 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6); 304 flat2_p1 = _mm_srli_epi16( 305 _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1)), 4); 306 flat2_q1 = _mm_srli_epi16( 307 _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1)), 4); 308 309 pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2); 310 pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2); 311 flat_p1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, 312 _mm_add_epi16(sum_p3, p1)), 3); 313 flat_q1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, 314 _mm_add_epi16(sum_q3, q1)), 3); 315 316 sum_p7 = _mm_add_epi16(sum_p7, p7); 317 sum_q7 = _mm_add_epi16(sum_q7, q7); 318 sum_p3 = _mm_add_epi16(sum_p3, p3); 319 sum_q3 = _mm_add_epi16(sum_q3, q3); 320 321 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5); 322 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5); 323 flat2_p2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 324 _mm_add_epi16(sum_p7, p2)), 4); 325 flat2_q2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, 326 _mm_add_epi16(sum_q7, q2)), 4); 327 328 pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1); 329 pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1); 330 flat_p2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, 331 _mm_add_epi16(sum_p3, p2)), 3); 332 flat_q2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, 333 _mm_add_epi16(sum_q3, q2)), 3); 334 335 sum_p7 = _mm_add_epi16(sum_p7, p7); 336 sum_q7 = _mm_add_epi16(sum_q7, q7); 337 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4); 338 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4); 339 flat2_p3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 340 _mm_add_epi16(sum_p7, p3)), 4); 341 flat2_q3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, 342 _mm_add_epi16(sum_q7, q3)), 4); 343 344 sum_p7 = _mm_add_epi16(sum_p7, p7); 345 sum_q7 = _mm_add_epi16(sum_q7, q7); 346 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3); 347 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3); 348 flat2_p4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 349 _mm_add_epi16(sum_p7, p4)), 4); 350 flat2_q4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, 351 _mm_add_epi16(sum_q7, q4)), 4); 352 353 sum_p7 = _mm_add_epi16(sum_p7, p7); 354 sum_q7 = _mm_add_epi16(sum_q7, q7); 355 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2); 356 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2); 357 flat2_p5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 358 _mm_add_epi16(sum_p7, p5)), 4); 359 flat2_q5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, 360 _mm_add_epi16(sum_q7, q5)), 4); 361 362 sum_p7 = _mm_add_epi16(sum_p7, p7); 363 sum_q7 = _mm_add_epi16(sum_q7, q7); 364 pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1); 365 pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1); 366 flat2_p6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, 367 _mm_add_epi16(sum_p7, p6)), 4); 368 flat2_q6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, 369 _mm_add_epi16(sum_q7, q6)), 4); 370 371 // wide flat 372 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 373 374 // highbd_filter8 375 p2 = _mm_andnot_si128(flat, p2); 376 // p2 remains unchanged if !(flat && mask) 377 flat_p2 = _mm_and_si128(flat, flat_p2); 378 // when (flat && mask) 379 p2 = _mm_or_si128(p2, flat_p2); // full list of p2 values 380 q2 = _mm_andnot_si128(flat, q2); 381 flat_q2 = _mm_and_si128(flat, flat_q2); 382 q2 = _mm_or_si128(q2, flat_q2); // full list of q2 values 383 384 ps1 = _mm_andnot_si128(flat, ps1); 385 // p1 takes the value assigned to in in filter4 if !(flat && mask) 386 flat_p1 = _mm_and_si128(flat, flat_p1); 387 // when (flat && mask) 388 p1 = _mm_or_si128(ps1, flat_p1); // full list of p1 values 389 qs1 = _mm_andnot_si128(flat, qs1); 390 flat_q1 = _mm_and_si128(flat, flat_q1); 391 q1 = _mm_or_si128(qs1, flat_q1); // full list of q1 values 392 393 ps0 = _mm_andnot_si128(flat, ps0); 394 // p0 takes the value assigned to in in filter4 if !(flat && mask) 395 flat_p0 = _mm_and_si128(flat, flat_p0); 396 // when (flat && mask) 397 p0 = _mm_or_si128(ps0, flat_p0); // full list of p0 values 398 qs0 = _mm_andnot_si128(flat, qs0); 399 flat_q0 = _mm_and_si128(flat, flat_q0); 400 q0 = _mm_or_si128(qs0, flat_q0); // full list of q0 values 401 // end highbd_filter8 402 403 // highbd_filter16 404 p6 = _mm_andnot_si128(flat2, p6); 405 // p6 remains unchanged if !(flat2 && flat && mask) 406 flat2_p6 = _mm_and_si128(flat2, flat2_p6); 407 // get values for when (flat2 && flat && mask) 408 p6 = _mm_or_si128(p6, flat2_p6); // full list of p6 values 409 q6 = _mm_andnot_si128(flat2, q6); 410 // q6 remains unchanged if !(flat2 && flat && mask) 411 flat2_q6 = _mm_and_si128(flat2, flat2_q6); 412 // get values for when (flat2 && flat && mask) 413 q6 = _mm_or_si128(q6, flat2_q6); // full list of q6 values 414 _mm_store_si128((__m128i *)(s - 7 * p), p6); 415 _mm_store_si128((__m128i *)(s + 6 * p), q6); 416 417 p5 = _mm_andnot_si128(flat2, p5); 418 // p5 remains unchanged if !(flat2 && flat && mask) 419 flat2_p5 = _mm_and_si128(flat2, flat2_p5); 420 // get values for when (flat2 && flat && mask) 421 p5 = _mm_or_si128(p5, flat2_p5); 422 // full list of p5 values 423 q5 = _mm_andnot_si128(flat2, q5); 424 // q5 remains unchanged if !(flat2 && flat && mask) 425 flat2_q5 = _mm_and_si128(flat2, flat2_q5); 426 // get values for when (flat2 && flat && mask) 427 q5 = _mm_or_si128(q5, flat2_q5); 428 // full list of q5 values 429 _mm_store_si128((__m128i *)(s - 6 * p), p5); 430 _mm_store_si128((__m128i *)(s + 5 * p), q5); 431 432 p4 = _mm_andnot_si128(flat2, p4); 433 // p4 remains unchanged if !(flat2 && flat && mask) 434 flat2_p4 = _mm_and_si128(flat2, flat2_p4); 435 // get values for when (flat2 && flat && mask) 436 p4 = _mm_or_si128(p4, flat2_p4); // full list of p4 values 437 q4 = _mm_andnot_si128(flat2, q4); 438 // q4 remains unchanged if !(flat2 && flat && mask) 439 flat2_q4 = _mm_and_si128(flat2, flat2_q4); 440 // get values for when (flat2 && flat && mask) 441 q4 = _mm_or_si128(q4, flat2_q4); // full list of q4 values 442 _mm_store_si128((__m128i *)(s - 5 * p), p4); 443 _mm_store_si128((__m128i *)(s + 4 * p), q4); 444 445 p3 = _mm_andnot_si128(flat2, p3); 446 // p3 takes value from highbd_filter8 if !(flat2 && flat && mask) 447 flat2_p3 = _mm_and_si128(flat2, flat2_p3); 448 // get values for when (flat2 && flat && mask) 449 p3 = _mm_or_si128(p3, flat2_p3); // full list of p3 values 450 q3 = _mm_andnot_si128(flat2, q3); 451 // q3 takes value from highbd_filter8 if !(flat2 && flat && mask) 452 flat2_q3 = _mm_and_si128(flat2, flat2_q3); 453 // get values for when (flat2 && flat && mask) 454 q3 = _mm_or_si128(q3, flat2_q3); // full list of q3 values 455 _mm_store_si128((__m128i *)(s - 4 * p), p3); 456 _mm_store_si128((__m128i *)(s + 3 * p), q3); 457 458 p2 = _mm_andnot_si128(flat2, p2); 459 // p2 takes value from highbd_filter8 if !(flat2 && flat && mask) 460 flat2_p2 = _mm_and_si128(flat2, flat2_p2); 461 // get values for when (flat2 && flat && mask) 462 p2 = _mm_or_si128(p2, flat2_p2); 463 // full list of p2 values 464 q2 = _mm_andnot_si128(flat2, q2); 465 // q2 takes value from highbd_filter8 if !(flat2 && flat && mask) 466 flat2_q2 = _mm_and_si128(flat2, flat2_q2); 467 // get values for when (flat2 && flat && mask) 468 q2 = _mm_or_si128(q2, flat2_q2); // full list of q2 values 469 _mm_store_si128((__m128i *)(s - 3 * p), p2); 470 _mm_store_si128((__m128i *)(s + 2 * p), q2); 471 472 p1 = _mm_andnot_si128(flat2, p1); 473 // p1 takes value from highbd_filter8 if !(flat2 && flat && mask) 474 flat2_p1 = _mm_and_si128(flat2, flat2_p1); 475 // get values for when (flat2 && flat && mask) 476 p1 = _mm_or_si128(p1, flat2_p1); // full list of p1 values 477 q1 = _mm_andnot_si128(flat2, q1); 478 // q1 takes value from highbd_filter8 if !(flat2 && flat && mask) 479 flat2_q1 = _mm_and_si128(flat2, flat2_q1); 480 // get values for when (flat2 && flat && mask) 481 q1 = _mm_or_si128(q1, flat2_q1); // full list of q1 values 482 _mm_store_si128((__m128i *)(s - 2 * p), p1); 483 _mm_store_si128((__m128i *)(s + 1 * p), q1); 484 485 p0 = _mm_andnot_si128(flat2, p0); 486 // p0 takes value from highbd_filter8 if !(flat2 && flat && mask) 487 flat2_p0 = _mm_and_si128(flat2, flat2_p0); 488 // get values for when (flat2 && flat && mask) 489 p0 = _mm_or_si128(p0, flat2_p0); // full list of p0 values 490 q0 = _mm_andnot_si128(flat2, q0); 491 // q0 takes value from highbd_filter8 if !(flat2 && flat && mask) 492 flat2_q0 = _mm_and_si128(flat2, flat2_q0); 493 // get values for when (flat2 && flat && mask) 494 q0 = _mm_or_si128(q0, flat2_q0); // full list of q0 values 495 _mm_store_si128((__m128i *)(s - 1 * p), p0); 496 _mm_store_si128((__m128i *)(s - 0 * p), q0); 497} 498 499static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s, 500 int p, 501 const uint8_t *_blimit, 502 const uint8_t *_limit, 503 const uint8_t *_thresh, 504 int bd) { 505 highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd); 506 highbd_mb_lpf_horizontal_edge_w_sse2_8(s + 8, p, _blimit, _limit, _thresh, 507 bd); 508} 509 510// TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. 511void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p, 512 const uint8_t *_blimit, 513 const uint8_t *_limit, 514 const uint8_t *_thresh, 515 int count, int bd) { 516 if (count == 1) 517 highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd); 518 else 519 highbd_mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh, bd); 520} 521 522void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, 523 const uint8_t *_blimit, 524 const uint8_t *_limit, 525 const uint8_t *_thresh, 526 int count, int bd) { 527 DECLARE_ALIGNED(16, uint16_t, flat_op2[16]); 528 DECLARE_ALIGNED(16, uint16_t, flat_op1[16]); 529 DECLARE_ALIGNED(16, uint16_t, flat_op0[16]); 530 DECLARE_ALIGNED(16, uint16_t, flat_oq2[16]); 531 DECLARE_ALIGNED(16, uint16_t, flat_oq1[16]); 532 DECLARE_ALIGNED(16, uint16_t, flat_oq0[16]); 533 const __m128i zero = _mm_set1_epi16(0); 534 __m128i blimit, limit, thresh; 535 __m128i mask, hev, flat; 536 __m128i p3 = _mm_load_si128((__m128i *)(s - 4 * p)); 537 __m128i q3 = _mm_load_si128((__m128i *)(s + 3 * p)); 538 __m128i p2 = _mm_load_si128((__m128i *)(s - 3 * p)); 539 __m128i q2 = _mm_load_si128((__m128i *)(s + 2 * p)); 540 __m128i p1 = _mm_load_si128((__m128i *)(s - 2 * p)); 541 __m128i q1 = _mm_load_si128((__m128i *)(s + 1 * p)); 542 __m128i p0 = _mm_load_si128((__m128i *)(s - 1 * p)); 543 __m128i q0 = _mm_load_si128((__m128i *)(s + 0 * p)); 544 const __m128i one = _mm_set1_epi16(1); 545 const __m128i ffff = _mm_cmpeq_epi16(one, one); 546 __m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work; 547 const __m128i four = _mm_set1_epi16(4); 548 __m128i workp_a, workp_b, workp_shft; 549 550 const __m128i t4 = _mm_set1_epi16(4); 551 const __m128i t3 = _mm_set1_epi16(3); 552 __m128i t80; 553 const __m128i t1 = _mm_set1_epi16(0x1); 554 __m128i ps1, ps0, qs0, qs1; 555 __m128i filt; 556 __m128i work_a; 557 __m128i filter1, filter2; 558 559 (void)count; 560 561 if (bd == 8) { 562 blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero); 563 limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero); 564 thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero); 565 t80 = _mm_set1_epi16(0x80); 566 } else if (bd == 10) { 567 blimit = _mm_slli_epi16( 568 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); 569 limit = _mm_slli_epi16( 570 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); 571 thresh = _mm_slli_epi16( 572 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); 573 t80 = _mm_set1_epi16(0x200); 574 } else { // bd == 12 575 blimit = _mm_slli_epi16( 576 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); 577 limit = _mm_slli_epi16( 578 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); 579 thresh = _mm_slli_epi16( 580 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); 581 t80 = _mm_set1_epi16(0x800); 582 } 583 584 ps1 = _mm_subs_epi16(p1, t80); 585 ps0 = _mm_subs_epi16(p0, t80); 586 qs0 = _mm_subs_epi16(q0, t80); 587 qs1 = _mm_subs_epi16(q1, t80); 588 589 // filter_mask and hev_mask 590 abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), 591 _mm_subs_epu16(p0, p1)); 592 abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), 593 _mm_subs_epu16(q0, q1)); 594 595 abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), 596 _mm_subs_epu16(q0, p0)); 597 abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), 598 _mm_subs_epu16(q1, p1)); 599 flat = _mm_max_epi16(abs_p1p0, abs_q1q0); 600 hev = _mm_subs_epu16(flat, thresh); 601 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); 602 603 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); 604 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); 605 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); 606 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); 607 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; 608 // So taking maximums continues to work: 609 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one)); 610 mask = _mm_max_epi16(abs_p1p0, mask); 611 // mask |= (abs(p1 - p0) > limit) * -1; 612 mask = _mm_max_epi16(abs_q1q0, mask); 613 // mask |= (abs(q1 - q0) > limit) * -1; 614 615 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), 616 _mm_subs_epu16(p1, p2)), 617 _mm_or_si128(_mm_subs_epu16(q2, q1), 618 _mm_subs_epu16(q1, q2))); 619 mask = _mm_max_epi16(work, mask); 620 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2), 621 _mm_subs_epu16(p2, p3)), 622 _mm_or_si128(_mm_subs_epu16(q3, q2), 623 _mm_subs_epu16(q2, q3))); 624 mask = _mm_max_epi16(work, mask); 625 mask = _mm_subs_epu16(mask, limit); 626 mask = _mm_cmpeq_epi16(mask, zero); 627 628 // flat_mask4 629 flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0), 630 _mm_subs_epu16(p0, p2)), 631 _mm_or_si128(_mm_subs_epu16(q2, q0), 632 _mm_subs_epu16(q0, q2))); 633 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p0), 634 _mm_subs_epu16(p0, p3)), 635 _mm_or_si128(_mm_subs_epu16(q3, q0), 636 _mm_subs_epu16(q0, q3))); 637 flat = _mm_max_epi16(work, flat); 638 flat = _mm_max_epi16(abs_p1p0, flat); 639 flat = _mm_max_epi16(abs_q1q0, flat); 640 641 if (bd == 8) 642 flat = _mm_subs_epu16(flat, one); 643 else if (bd == 10) 644 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, 2)); 645 else // bd == 12 646 flat = _mm_subs_epu16(flat, _mm_slli_epi16(one, 4)); 647 648 flat = _mm_cmpeq_epi16(flat, zero); 649 flat = _mm_and_si128(flat, mask); // flat & mask 650 651 // Added before shift for rounding part of ROUND_POWER_OF_TWO 652 653 workp_a = _mm_add_epi16(_mm_add_epi16(p3, p3), _mm_add_epi16(p2, p1)); 654 workp_a = _mm_add_epi16(_mm_add_epi16(workp_a, four), p0); 655 workp_b = _mm_add_epi16(_mm_add_epi16(q0, p2), p3); 656 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 657 _mm_store_si128((__m128i *)&flat_op2[0], workp_shft); 658 659 workp_b = _mm_add_epi16(_mm_add_epi16(q0, q1), p1); 660 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 661 _mm_store_si128((__m128i *)&flat_op1[0], workp_shft); 662 663 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q2); 664 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p1), p0); 665 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 666 _mm_store_si128((__m128i *)&flat_op0[0], workp_shft); 667 668 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p3), q3); 669 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, p0), q0); 670 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 671 _mm_store_si128((__m128i *)&flat_oq0[0], workp_shft); 672 673 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p2), q3); 674 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q0), q1); 675 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 676 _mm_store_si128((__m128i *)&flat_oq1[0], workp_shft); 677 678 workp_a = _mm_add_epi16(_mm_sub_epi16(workp_a, p1), q3); 679 workp_b = _mm_add_epi16(_mm_sub_epi16(workp_b, q1), q2); 680 workp_shft = _mm_srli_epi16(_mm_add_epi16(workp_a, workp_b), 3); 681 _mm_store_si128((__m128i *)&flat_oq2[0], workp_shft); 682 683 // lp filter 684 filt = signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd); 685 filt = _mm_and_si128(filt, hev); 686 work_a = _mm_subs_epi16(qs0, ps0); 687 filt = _mm_adds_epi16(filt, work_a); 688 filt = _mm_adds_epi16(filt, work_a); 689 filt = _mm_adds_epi16(filt, work_a); 690 // (vpx_filter + 3 * (qs0 - ps0)) & mask 691 filt = signed_char_clamp_bd_sse2(filt, bd); 692 filt = _mm_and_si128(filt, mask); 693 694 filter1 = _mm_adds_epi16(filt, t4); 695 filter2 = _mm_adds_epi16(filt, t3); 696 697 // Filter1 >> 3 698 filter1 = signed_char_clamp_bd_sse2(filter1, bd); 699 filter1 = _mm_srai_epi16(filter1, 3); 700 701 // Filter2 >> 3 702 filter2 = signed_char_clamp_bd_sse2(filter2, bd); 703 filter2 = _mm_srai_epi16(filter2, 3); 704 705 // filt >> 1 706 filt = _mm_adds_epi16(filter1, t1); 707 filt = _mm_srai_epi16(filt, 1); 708 // filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev; 709 filt = _mm_andnot_si128(hev, filt); 710 711 work_a = signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd); 712 work_a = _mm_adds_epi16(work_a, t80); 713 q0 = _mm_load_si128((__m128i *)flat_oq0); 714 work_a = _mm_andnot_si128(flat, work_a); 715 q0 = _mm_and_si128(flat, q0); 716 q0 = _mm_or_si128(work_a, q0); 717 718 work_a = signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd); 719 work_a = _mm_adds_epi16(work_a, t80); 720 q1 = _mm_load_si128((__m128i *)flat_oq1); 721 work_a = _mm_andnot_si128(flat, work_a); 722 q1 = _mm_and_si128(flat, q1); 723 q1 = _mm_or_si128(work_a, q1); 724 725 work_a = _mm_loadu_si128((__m128i *)(s + 2 * p)); 726 q2 = _mm_load_si128((__m128i *)flat_oq2); 727 work_a = _mm_andnot_si128(flat, work_a); 728 q2 = _mm_and_si128(flat, q2); 729 q2 = _mm_or_si128(work_a, q2); 730 731 work_a = signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd); 732 work_a = _mm_adds_epi16(work_a, t80); 733 p0 = _mm_load_si128((__m128i *)flat_op0); 734 work_a = _mm_andnot_si128(flat, work_a); 735 p0 = _mm_and_si128(flat, p0); 736 p0 = _mm_or_si128(work_a, p0); 737 738 work_a = signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd); 739 work_a = _mm_adds_epi16(work_a, t80); 740 p1 = _mm_load_si128((__m128i *)flat_op1); 741 work_a = _mm_andnot_si128(flat, work_a); 742 p1 = _mm_and_si128(flat, p1); 743 p1 = _mm_or_si128(work_a, p1); 744 745 work_a = _mm_loadu_si128((__m128i *)(s - 3 * p)); 746 p2 = _mm_load_si128((__m128i *)flat_op2); 747 work_a = _mm_andnot_si128(flat, work_a); 748 p2 = _mm_and_si128(flat, p2); 749 p2 = _mm_or_si128(work_a, p2); 750 751 _mm_store_si128((__m128i *)(s - 3 * p), p2); 752 _mm_store_si128((__m128i *)(s - 2 * p), p1); 753 _mm_store_si128((__m128i *)(s - 1 * p), p0); 754 _mm_store_si128((__m128i *)(s + 0 * p), q0); 755 _mm_store_si128((__m128i *)(s + 1 * p), q1); 756 _mm_store_si128((__m128i *)(s + 2 * p), q2); 757} 758 759void vpx_highbd_lpf_horizontal_8_dual_sse2(uint16_t *s, int p, 760 const uint8_t *_blimit0, 761 const uint8_t *_limit0, 762 const uint8_t *_thresh0, 763 const uint8_t *_blimit1, 764 const uint8_t *_limit1, 765 const uint8_t *_thresh1, 766 int bd) { 767 vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); 768 vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 769 1, bd); 770} 771 772void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, 773 const uint8_t *_blimit, 774 const uint8_t *_limit, 775 const uint8_t *_thresh, 776 int count, int bd) { 777 const __m128i zero = _mm_set1_epi16(0); 778 __m128i blimit, limit, thresh; 779 __m128i mask, hev, flat; 780 __m128i p3 = _mm_loadu_si128((__m128i *)(s - 4 * p)); 781 __m128i p2 = _mm_loadu_si128((__m128i *)(s - 3 * p)); 782 __m128i p1 = _mm_loadu_si128((__m128i *)(s - 2 * p)); 783 __m128i p0 = _mm_loadu_si128((__m128i *)(s - 1 * p)); 784 __m128i q0 = _mm_loadu_si128((__m128i *)(s - 0 * p)); 785 __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p)); 786 __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); 787 __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); 788 const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), 789 _mm_subs_epu16(p0, p1)); 790 const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), 791 _mm_subs_epu16(q0, q1)); 792 const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0); 793 const __m128i one = _mm_set1_epi16(1); 794 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), 795 _mm_subs_epu16(q0, p0)); 796 __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), 797 _mm_subs_epu16(q1, p1)); 798 __m128i work; 799 const __m128i t4 = _mm_set1_epi16(4); 800 const __m128i t3 = _mm_set1_epi16(3); 801 __m128i t80; 802 __m128i tff80; 803 __m128i tffe0; 804 __m128i t1f; 805 // equivalent to shifting 0x1f left by bitdepth - 8 806 // and setting new bits to 1 807 const __m128i t1 = _mm_set1_epi16(0x1); 808 __m128i t7f; 809 // equivalent to shifting 0x7f left by bitdepth - 8 810 // and setting new bits to 1 811 __m128i ps1, ps0, qs0, qs1; 812 __m128i filt; 813 __m128i work_a; 814 __m128i filter1, filter2; 815 816 (void)count; 817 818 if (bd == 8) { 819 blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero); 820 limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero); 821 thresh = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero); 822 t80 = _mm_set1_epi16(0x80); 823 tff80 = _mm_set1_epi16(0xff80); 824 tffe0 = _mm_set1_epi16(0xffe0); 825 t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 8); 826 t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 8); 827 } else if (bd == 10) { 828 blimit = _mm_slli_epi16( 829 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); 830 limit = _mm_slli_epi16( 831 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); 832 thresh = _mm_slli_epi16( 833 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); 834 t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), 2); 835 tff80 = _mm_slli_epi16(_mm_set1_epi16(0xff80), 2); 836 tffe0 = _mm_slli_epi16(_mm_set1_epi16(0xffe0), 2); 837 t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 6); 838 t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 6); 839 } else { // bd == 12 840 blimit = _mm_slli_epi16( 841 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); 842 limit = _mm_slli_epi16( 843 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); 844 thresh = _mm_slli_epi16( 845 _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); 846 t80 = _mm_slli_epi16(_mm_set1_epi16(0x80), 4); 847 tff80 = _mm_slli_epi16(_mm_set1_epi16(0xff80), 4); 848 tffe0 = _mm_slli_epi16(_mm_set1_epi16(0xffe0), 4); 849 t1f = _mm_srli_epi16(_mm_set1_epi16(0x1fff), 4); 850 t7f = _mm_srli_epi16(_mm_set1_epi16(0x7fff), 4); 851 } 852 853 ps1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80); 854 ps0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80); 855 qs0 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80); 856 qs1 = _mm_subs_epi16(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80); 857 858 // filter_mask and hev_mask 859 flat = _mm_max_epi16(abs_p1p0, abs_q1q0); 860 hev = _mm_subs_epu16(flat, thresh); 861 hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); 862 863 abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); 864 abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); 865 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); 866 mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); 867 // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; 868 // So taking maximums continues to work: 869 mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one)); 870 mask = _mm_max_epi16(flat, mask); 871 // mask |= (abs(p1 - p0) > limit) * -1; 872 // mask |= (abs(q1 - q0) > limit) * -1; 873 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), 874 _mm_subs_epu16(p1, p2)), 875 _mm_or_si128(_mm_subs_epu16(p3, p2), 876 _mm_subs_epu16(p2, p3))); 877 mask = _mm_max_epi16(work, mask); 878 work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q1), 879 _mm_subs_epu16(q1, q2)), 880 _mm_or_si128(_mm_subs_epu16(q3, q2), 881 _mm_subs_epu16(q2, q3))); 882 mask = _mm_max_epi16(work, mask); 883 mask = _mm_subs_epu16(mask, limit); 884 mask = _mm_cmpeq_epi16(mask, zero); 885 886 // filter4 887 filt = signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd); 888 filt = _mm_and_si128(filt, hev); 889 work_a = _mm_subs_epi16(qs0, ps0); 890 filt = _mm_adds_epi16(filt, work_a); 891 filt = _mm_adds_epi16(filt, work_a); 892 filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd); 893 894 // (vpx_filter + 3 * (qs0 - ps0)) & mask 895 filt = _mm_and_si128(filt, mask); 896 897 filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd); 898 filter2 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t3), bd); 899 900 // Filter1 >> 3 901 work_a = _mm_cmpgt_epi16(zero, filter1); // get the values that are <0 902 filter1 = _mm_srli_epi16(filter1, 3); 903 work_a = _mm_and_si128(work_a, tffe0); // sign bits for the values < 0 904 filter1 = _mm_and_si128(filter1, t1f); // clamp the range 905 filter1 = _mm_or_si128(filter1, work_a); // reinsert the sign bits 906 907 // Filter2 >> 3 908 work_a = _mm_cmpgt_epi16(zero, filter2); 909 filter2 = _mm_srli_epi16(filter2, 3); 910 work_a = _mm_and_si128(work_a, tffe0); 911 filter2 = _mm_and_si128(filter2, t1f); 912 filter2 = _mm_or_si128(filter2, work_a); 913 914 // filt >> 1 915 filt = _mm_adds_epi16(filter1, t1); 916 work_a = _mm_cmpgt_epi16(zero, filt); 917 filt = _mm_srli_epi16(filt, 1); 918 work_a = _mm_and_si128(work_a, tff80); 919 filt = _mm_and_si128(filt, t7f); 920 filt = _mm_or_si128(filt, work_a); 921 922 filt = _mm_andnot_si128(hev, filt); 923 924 q0 = _mm_adds_epi16( 925 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80); 926 q1 = _mm_adds_epi16( 927 signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), t80); 928 p0 = _mm_adds_epi16( 929 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80); 930 p1 = _mm_adds_epi16( 931 signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), t80); 932 933 _mm_storeu_si128((__m128i *)(s - 2 * p), p1); 934 _mm_storeu_si128((__m128i *)(s - 1 * p), p0); 935 _mm_storeu_si128((__m128i *)(s + 0 * p), q0); 936 _mm_storeu_si128((__m128i *)(s + 1 * p), q1); 937} 938 939void vpx_highbd_lpf_horizontal_4_dual_sse2(uint16_t *s, int p, 940 const uint8_t *_blimit0, 941 const uint8_t *_limit0, 942 const uint8_t *_thresh0, 943 const uint8_t *_blimit1, 944 const uint8_t *_limit1, 945 const uint8_t *_thresh1, 946 int bd) { 947 vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); 948 vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, 949 bd); 950} 951 952static INLINE void highbd_transpose(uint16_t *src[], int in_p, 953 uint16_t *dst[], int out_p, 954 int num_8x8_to_transpose) { 955 int idx8x8 = 0; 956 __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7; 957 do { 958 uint16_t *in = src[idx8x8]; 959 uint16_t *out = dst[idx8x8]; 960 961 p0 = _mm_loadu_si128((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07 962 p1 = _mm_loadu_si128((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17 963 p2 = _mm_loadu_si128((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27 964 p3 = _mm_loadu_si128((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37 965 p4 = _mm_loadu_si128((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47 966 p5 = _mm_loadu_si128((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57 967 p6 = _mm_loadu_si128((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67 968 p7 = _mm_loadu_si128((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77 969 // 00 10 01 11 02 12 03 13 970 x0 = _mm_unpacklo_epi16(p0, p1); 971 // 20 30 21 31 22 32 23 33 972 x1 = _mm_unpacklo_epi16(p2, p3); 973 // 40 50 41 51 42 52 43 53 974 x2 = _mm_unpacklo_epi16(p4, p5); 975 // 60 70 61 71 62 72 63 73 976 x3 = _mm_unpacklo_epi16(p6, p7); 977 // 00 10 20 30 01 11 21 31 978 x4 = _mm_unpacklo_epi32(x0, x1); 979 // 40 50 60 70 41 51 61 71 980 x5 = _mm_unpacklo_epi32(x2, x3); 981 // 00 10 20 30 40 50 60 70 982 x6 = _mm_unpacklo_epi64(x4, x5); 983 // 01 11 21 31 41 51 61 71 984 x7 = _mm_unpackhi_epi64(x4, x5); 985 986 _mm_storeu_si128((__m128i *)(out + 0*out_p), x6); 987 // 00 10 20 30 40 50 60 70 988 _mm_storeu_si128((__m128i *)(out + 1*out_p), x7); 989 // 01 11 21 31 41 51 61 71 990 991 // 02 12 22 32 03 13 23 33 992 x4 = _mm_unpackhi_epi32(x0, x1); 993 // 42 52 62 72 43 53 63 73 994 x5 = _mm_unpackhi_epi32(x2, x3); 995 // 02 12 22 32 42 52 62 72 996 x6 = _mm_unpacklo_epi64(x4, x5); 997 // 03 13 23 33 43 53 63 73 998 x7 = _mm_unpackhi_epi64(x4, x5); 999 1000 _mm_storeu_si128((__m128i *)(out + 2*out_p), x6); 1001 // 02 12 22 32 42 52 62 72 1002 _mm_storeu_si128((__m128i *)(out + 3*out_p), x7); 1003 // 03 13 23 33 43 53 63 73 1004 1005 // 04 14 05 15 06 16 07 17 1006 x0 = _mm_unpackhi_epi16(p0, p1); 1007 // 24 34 25 35 26 36 27 37 1008 x1 = _mm_unpackhi_epi16(p2, p3); 1009 // 44 54 45 55 46 56 47 57 1010 x2 = _mm_unpackhi_epi16(p4, p5); 1011 // 64 74 65 75 66 76 67 77 1012 x3 = _mm_unpackhi_epi16(p6, p7); 1013 // 04 14 24 34 05 15 25 35 1014 x4 = _mm_unpacklo_epi32(x0, x1); 1015 // 44 54 64 74 45 55 65 75 1016 x5 = _mm_unpacklo_epi32(x2, x3); 1017 // 04 14 24 34 44 54 64 74 1018 x6 = _mm_unpacklo_epi64(x4, x5); 1019 // 05 15 25 35 45 55 65 75 1020 x7 = _mm_unpackhi_epi64(x4, x5); 1021 1022 _mm_storeu_si128((__m128i *)(out + 4*out_p), x6); 1023 // 04 14 24 34 44 54 64 74 1024 _mm_storeu_si128((__m128i *)(out + 5*out_p), x7); 1025 // 05 15 25 35 45 55 65 75 1026 1027 // 06 16 26 36 07 17 27 37 1028 x4 = _mm_unpackhi_epi32(x0, x1); 1029 // 46 56 66 76 47 57 67 77 1030 x5 = _mm_unpackhi_epi32(x2, x3); 1031 // 06 16 26 36 46 56 66 76 1032 x6 = _mm_unpacklo_epi64(x4, x5); 1033 // 07 17 27 37 47 57 67 77 1034 x7 = _mm_unpackhi_epi64(x4, x5); 1035 1036 _mm_storeu_si128((__m128i *)(out + 6*out_p), x6); 1037 // 06 16 26 36 46 56 66 76 1038 _mm_storeu_si128((__m128i *)(out + 7*out_p), x7); 1039 // 07 17 27 37 47 57 67 77 1040 } while (++idx8x8 < num_8x8_to_transpose); 1041} 1042 1043static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, 1044 int in_p, uint16_t *out, int out_p) { 1045 uint16_t *src0[1]; 1046 uint16_t *src1[1]; 1047 uint16_t *dest0[1]; 1048 uint16_t *dest1[1]; 1049 src0[0] = in0; 1050 src1[0] = in1; 1051 dest0[0] = out; 1052 dest1[0] = out + 8; 1053 highbd_transpose(src0, in_p, dest0, out_p, 1); 1054 highbd_transpose(src1, in_p, dest1, out_p, 1); 1055} 1056 1057void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, 1058 const uint8_t *blimit, 1059 const uint8_t *limit, 1060 const uint8_t *thresh, 1061 int count, int bd) { 1062 DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); 1063 uint16_t *src[1]; 1064 uint16_t *dst[1]; 1065 (void)count; 1066 1067 // Transpose 8x8 1068 src[0] = s - 4; 1069 dst[0] = t_dst; 1070 1071 highbd_transpose(src, p, dst, 8, 1); 1072 1073 // Loop filtering 1074 vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, 1075 bd); 1076 1077 src[0] = t_dst; 1078 dst[0] = s - 4; 1079 1080 // Transpose back 1081 highbd_transpose(src, 8, dst, p, 1); 1082} 1083 1084void vpx_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p, 1085 const uint8_t *blimit0, 1086 const uint8_t *limit0, 1087 const uint8_t *thresh0, 1088 const uint8_t *blimit1, 1089 const uint8_t *limit1, 1090 const uint8_t *thresh1, 1091 int bd) { 1092 DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]); 1093 uint16_t *src[2]; 1094 uint16_t *dst[2]; 1095 1096 // Transpose 8x16 1097 highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); 1098 1099 // Loop filtering 1100 vpx_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, 1101 thresh0, blimit1, limit1, thresh1, bd); 1102 src[0] = t_dst; 1103 src[1] = t_dst + 8; 1104 dst[0] = s - 4; 1105 dst[1] = s - 4 + p * 8; 1106 1107 // Transpose back 1108 highbd_transpose(src, 16, dst, p, 2); 1109} 1110 1111void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, 1112 const uint8_t *blimit, 1113 const uint8_t *limit, 1114 const uint8_t *thresh, 1115 int count, int bd) { 1116 DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); 1117 uint16_t *src[1]; 1118 uint16_t *dst[1]; 1119 (void)count; 1120 1121 // Transpose 8x8 1122 src[0] = s - 4; 1123 dst[0] = t_dst; 1124 1125 highbd_transpose(src, p, dst, 8, 1); 1126 1127 // Loop filtering 1128 vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, 1129 bd); 1130 1131 src[0] = t_dst; 1132 dst[0] = s - 4; 1133 1134 // Transpose back 1135 highbd_transpose(src, 8, dst, p, 1); 1136} 1137 1138void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p, 1139 const uint8_t *blimit0, 1140 const uint8_t *limit0, 1141 const uint8_t *thresh0, 1142 const uint8_t *blimit1, 1143 const uint8_t *limit1, 1144 const uint8_t *thresh1, 1145 int bd) { 1146 DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]); 1147 uint16_t *src[2]; 1148 uint16_t *dst[2]; 1149 1150 // Transpose 8x16 1151 highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); 1152 1153 // Loop filtering 1154 vpx_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, 1155 thresh0, blimit1, limit1, thresh1, bd); 1156 src[0] = t_dst; 1157 src[1] = t_dst + 8; 1158 1159 dst[0] = s - 4; 1160 dst[1] = s - 4 + p * 8; 1161 1162 // Transpose back 1163 highbd_transpose(src, 16, dst, p, 2); 1164} 1165 1166void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, 1167 const uint8_t *blimit, 1168 const uint8_t *limit, 1169 const uint8_t *thresh, 1170 int bd) { 1171 DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]); 1172 uint16_t *src[2]; 1173 uint16_t *dst[2]; 1174 1175 src[0] = s - 8; 1176 src[1] = s; 1177 dst[0] = t_dst; 1178 dst[1] = t_dst + 8 * 8; 1179 1180 // Transpose 16x8 1181 highbd_transpose(src, p, dst, 8, 2); 1182 1183 // Loop filtering 1184 highbd_mb_lpf_horizontal_edge_w_sse2_8(t_dst + 8 * 8, 8, blimit, limit, 1185 thresh, bd); 1186 src[0] = t_dst; 1187 src[1] = t_dst + 8 * 8; 1188 dst[0] = s - 8; 1189 dst[1] = s; 1190 1191 // Transpose back 1192 highbd_transpose(src, 8, dst, p, 2); 1193} 1194 1195void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, 1196 int p, 1197 const uint8_t *blimit, 1198 const uint8_t *limit, 1199 const uint8_t *thresh, 1200 int bd) { 1201 DECLARE_ALIGNED(16, uint16_t, t_dst[256]); 1202 1203 // Transpose 16x16 1204 highbd_transpose8x16(s - 8, s - 8 + 8 * p, p, t_dst, 16); 1205 highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16); 1206 1207 // Loop filtering 1208 highbd_mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit, 1209 thresh, bd); 1210 1211 // Transpose back 1212 highbd_transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p); 1213 highbd_transpose8x16(t_dst + 8, t_dst + 8 + 8 * 16, 16, s - 8 + 8 * p, p); 1214} 1215