lp_bld_pack.c revision 37f4c2f906c8e2a6df609a190e4ca9ff028b265b
1/************************************************************************** 2 * 3 * Copyright 2009 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29/** 30 * @file 31 * Helper functions for packing/unpacking. 32 * 33 * Pack/unpacking is necessary for conversion between types of different 34 * bit width. 35 * 36 * They are also commonly used when an computation needs higher 37 * precision for the intermediate values. For example, if one needs the 38 * function: 39 * 40 * c = compute(a, b); 41 * 42 * to use more precision for intermediate results then one should implement it 43 * as: 44 * 45 * LLVMValueRef 46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b) 47 * { 48 * struct lp_type wide_type = lp_wider_type(type); 49 * LLVMValueRef al, ah, bl, bh, cl, ch, c; 50 * 51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah); 52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh); 53 * 54 * cl = compute_half(al, bl); 55 * ch = compute_half(ah, bh); 56 * 57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch); 58 * 59 * return c; 60 * } 61 * 62 * where compute_half() would do the computation for half the elements with 63 * twice the precision. 64 * 65 * @author Jose Fonseca <jfonseca@vmware.com> 66 */ 67 68 69#include "util/u_debug.h" 70#include "util/u_math.h" 71#include "util/u_cpu_detect.h" 72 73#include "lp_bld_type.h" 74#include "lp_bld_const.h" 75#include "lp_bld_intr.h" 76#include "lp_bld_arit.h" 77#include "lp_bld_pack.h" 78 79 80/** 81 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions. 82 */ 83static LLVMValueRef 84lp_build_const_unpack_shuffle(unsigned n, unsigned lo_hi) 85{ 86 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH]; 87 unsigned i, j; 88 89 assert(n <= LP_MAX_VECTOR_LENGTH); 90 assert(lo_hi < 2); 91 92 /* TODO: cache results in a static table */ 93 94 for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) { 95 elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0); 96 elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0); 97 } 98 99 return LLVMConstVector(elems, n); 100} 101 102 103/** 104 * Build shuffle vectors that match PACKxx instructions. 105 */ 106static LLVMValueRef 107lp_build_const_pack_shuffle(unsigned n) 108{ 109 LLVMValueRef elems[LP_MAX_VECTOR_LENGTH]; 110 unsigned i; 111 112 assert(n <= LP_MAX_VECTOR_LENGTH); 113 114 /* TODO: cache results in a static table */ 115 116 for(i = 0; i < n; ++i) 117 elems[i] = LLVMConstInt(LLVMInt32Type(), 2*i, 0); 118 119 return LLVMConstVector(elems, n); 120} 121 122 123/** 124 * Interleave vector elements. 125 * 126 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions. 127 */ 128LLVMValueRef 129lp_build_interleave2(LLVMBuilderRef builder, 130 struct lp_type type, 131 LLVMValueRef a, 132 LLVMValueRef b, 133 unsigned lo_hi) 134{ 135 LLVMValueRef shuffle; 136 137 shuffle = lp_build_const_unpack_shuffle(type.length, lo_hi); 138 139 return LLVMBuildShuffleVector(builder, a, b, shuffle, ""); 140} 141 142 143/** 144 * Double the bit width. 145 * 146 * This will only change the number of bits the values are represented, not the 147 * values themselves. 148 */ 149void 150lp_build_unpack2(LLVMBuilderRef builder, 151 struct lp_type src_type, 152 struct lp_type dst_type, 153 LLVMValueRef src, 154 LLVMValueRef *dst_lo, 155 LLVMValueRef *dst_hi) 156{ 157 LLVMValueRef msb; 158 LLVMTypeRef dst_vec_type; 159 160 assert(!src_type.floating); 161 assert(!dst_type.floating); 162 assert(dst_type.width == src_type.width * 2); 163 assert(dst_type.length * 2 == src_type.length); 164 165 if(dst_type.sign && src_type.sign) { 166 /* Replicate the sign bit in the most significant bits */ 167 msb = LLVMBuildAShr(builder, src, lp_build_const_int_vec(src_type, src_type.width - 1), ""); 168 } 169 else 170 /* Most significant bits always zero */ 171 msb = lp_build_zero(src_type); 172 173 /* Interleave bits */ 174 if(util_cpu_caps.little_endian) { 175 *dst_lo = lp_build_interleave2(builder, src_type, src, msb, 0); 176 *dst_hi = lp_build_interleave2(builder, src_type, src, msb, 1); 177 } 178 else { 179 *dst_lo = lp_build_interleave2(builder, src_type, msb, src, 0); 180 *dst_hi = lp_build_interleave2(builder, src_type, msb, src, 1); 181 } 182 183 /* Cast the result into the new type (twice as wide) */ 184 185 dst_vec_type = lp_build_vec_type(dst_type); 186 187 *dst_lo = LLVMBuildBitCast(builder, *dst_lo, dst_vec_type, ""); 188 *dst_hi = LLVMBuildBitCast(builder, *dst_hi, dst_vec_type, ""); 189} 190 191 192/** 193 * Expand the bit width. 194 * 195 * This will only change the number of bits the values are represented, not the 196 * values themselves. 197 */ 198void 199lp_build_unpack(LLVMBuilderRef builder, 200 struct lp_type src_type, 201 struct lp_type dst_type, 202 LLVMValueRef src, 203 LLVMValueRef *dst, unsigned num_dsts) 204{ 205 unsigned num_tmps; 206 unsigned i; 207 208 /* Register width must remain constant */ 209 assert(src_type.width * src_type.length == dst_type.width * dst_type.length); 210 211 /* We must not loose or gain channels. Only precision */ 212 assert(src_type.length == dst_type.length * num_dsts); 213 214 num_tmps = 1; 215 dst[0] = src; 216 217 while(src_type.width < dst_type.width) { 218 struct lp_type tmp_type = src_type; 219 220 tmp_type.width *= 2; 221 tmp_type.length /= 2; 222 223 for(i = num_tmps; i--; ) { 224 lp_build_unpack2(builder, src_type, tmp_type, dst[i], &dst[2*i + 0], &dst[2*i + 1]); 225 } 226 227 src_type = tmp_type; 228 229 num_tmps *= 2; 230 } 231 232 assert(num_tmps == num_dsts); 233} 234 235 236/** 237 * Non-interleaved pack. 238 * 239 * This will move values as 240 * 241 * lo = __ l0 __ l1 __ l2 __.. __ ln 242 * hi = __ h0 __ h1 __ h2 __.. __ hn 243 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn 244 * 245 * This will only change the number of bits the values are represented, not the 246 * values themselves. 247 * 248 * It is assumed the values are already clamped into the destination type range. 249 * Values outside that range will produce undefined results. Use 250 * lp_build_packs2 instead. 251 */ 252LLVMValueRef 253lp_build_pack2(LLVMBuilderRef builder, 254 struct lp_type src_type, 255 struct lp_type dst_type, 256 LLVMValueRef lo, 257 LLVMValueRef hi) 258{ 259#if HAVE_LLVM < 0x0207 260 LLVMTypeRef src_vec_type = lp_build_vec_type(src_type); 261#endif 262 LLVMTypeRef dst_vec_type = lp_build_vec_type(dst_type); 263 LLVMValueRef shuffle; 264 LLVMValueRef res; 265 266 assert(!src_type.floating); 267 assert(!dst_type.floating); 268 assert(src_type.width == dst_type.width * 2); 269 assert(src_type.length * 2 == dst_type.length); 270 271 if(util_cpu_caps.has_sse2 && src_type.width * src_type.length == 128) { 272 switch(src_type.width) { 273 case 32: 274 if(dst_type.sign) { 275#if HAVE_LLVM >= 0x0207 276 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", dst_vec_type, lo, hi); 277#else 278 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packssdw.128", src_vec_type, lo, hi); 279#endif 280 } 281 else { 282 if (util_cpu_caps.has_sse4_1) { 283 return lp_build_intrinsic_binary(builder, "llvm.x86.sse41.packusdw", dst_vec_type, lo, hi); 284 } 285 else { 286 assert(0); 287 return LLVMGetUndef(dst_vec_type); 288 } 289 } 290 break; 291 292 case 16: 293 if(dst_type.sign) 294#if HAVE_LLVM >= 0x0207 295 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", dst_vec_type, lo, hi); 296#else 297 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packsswb.128", src_vec_type, lo, hi); 298#endif 299 else 300#if HAVE_LLVM >= 0x0207 301 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", dst_vec_type, lo, hi); 302#else 303 res = lp_build_intrinsic_binary(builder, "llvm.x86.sse2.packuswb.128", src_vec_type, lo, hi); 304#endif 305 break; 306 307 default: 308 assert(0); 309 return LLVMGetUndef(dst_vec_type); 310 break; 311 } 312 313 res = LLVMBuildBitCast(builder, res, dst_vec_type, ""); 314 return res; 315 } 316 317 lo = LLVMBuildBitCast(builder, lo, dst_vec_type, ""); 318 hi = LLVMBuildBitCast(builder, hi, dst_vec_type, ""); 319 320 shuffle = lp_build_const_pack_shuffle(dst_type.length); 321 322 res = LLVMBuildShuffleVector(builder, lo, hi, shuffle, ""); 323 324 return res; 325} 326 327 328 329/** 330 * Non-interleaved pack and saturate. 331 * 332 * Same as lp_build_pack2 but will saturate values so that they fit into the 333 * destination type. 334 */ 335LLVMValueRef 336lp_build_packs2(LLVMBuilderRef builder, 337 struct lp_type src_type, 338 struct lp_type dst_type, 339 LLVMValueRef lo, 340 LLVMValueRef hi) 341{ 342 boolean clamp; 343 344 assert(!src_type.floating); 345 assert(!dst_type.floating); 346 assert(src_type.sign == dst_type.sign); 347 assert(src_type.width == dst_type.width * 2); 348 assert(src_type.length * 2 == dst_type.length); 349 350 clamp = TRUE; 351 352 /* All X86 SSE non-interleaved pack instructions take signed inputs and 353 * saturate them, so no need to clamp for those cases. */ 354 if(util_cpu_caps.has_sse2 && 355 src_type.width * src_type.length == 128 && 356 src_type.sign) 357 clamp = FALSE; 358 359 if(clamp) { 360 struct lp_build_context bld; 361 unsigned dst_bits = dst_type.sign ? dst_type.width - 1 : dst_type.width; 362 LLVMValueRef dst_max = lp_build_const_int_vec(src_type, ((unsigned long long)1 << dst_bits) - 1); 363 lp_build_context_init(&bld, builder, src_type); 364 lo = lp_build_min(&bld, lo, dst_max); 365 hi = lp_build_min(&bld, hi, dst_max); 366 /* FIXME: What about lower bound? */ 367 } 368 369 return lp_build_pack2(builder, src_type, dst_type, lo, hi); 370} 371 372 373/** 374 * Truncate the bit width. 375 * 376 * TODO: Handle saturation consistently. 377 */ 378LLVMValueRef 379lp_build_pack(LLVMBuilderRef builder, 380 struct lp_type src_type, 381 struct lp_type dst_type, 382 boolean clamped, 383 const LLVMValueRef *src, unsigned num_srcs) 384{ 385 LLVMValueRef (*pack2)(LLVMBuilderRef builder, 386 struct lp_type src_type, 387 struct lp_type dst_type, 388 LLVMValueRef lo, 389 LLVMValueRef hi); 390 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH]; 391 unsigned i; 392 393 394 /* Register width must remain constant */ 395 assert(src_type.width * src_type.length == dst_type.width * dst_type.length); 396 397 /* We must not loose or gain channels. Only precision */ 398 assert(src_type.length * num_srcs == dst_type.length); 399 400 if(clamped) 401 pack2 = &lp_build_pack2; 402 else 403 pack2 = &lp_build_packs2; 404 405 for(i = 0; i < num_srcs; ++i) 406 tmp[i] = src[i]; 407 408 while(src_type.width > dst_type.width) { 409 struct lp_type tmp_type = src_type; 410 411 tmp_type.width /= 2; 412 tmp_type.length *= 2; 413 414 /* Take in consideration the sign changes only in the last step */ 415 if(tmp_type.width == dst_type.width) 416 tmp_type.sign = dst_type.sign; 417 418 num_srcs /= 2; 419 420 for(i = 0; i < num_srcs; ++i) 421 tmp[i] = pack2(builder, src_type, tmp_type, tmp[2*i + 0], tmp[2*i + 1]); 422 423 src_type = tmp_type; 424 } 425 426 assert(num_srcs == 1); 427 428 return tmp[0]; 429} 430 431 432/** 433 * Truncate or expand the bitwidth. 434 * 435 * NOTE: Getting the right sign flags is crucial here, as we employ some 436 * intrinsics that do saturation. 437 */ 438void 439lp_build_resize(LLVMBuilderRef builder, 440 struct lp_type src_type, 441 struct lp_type dst_type, 442 const LLVMValueRef *src, unsigned num_srcs, 443 LLVMValueRef *dst, unsigned num_dsts) 444{ 445 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH]; 446 unsigned i; 447 448 /* 449 * We don't support float <-> int conversion here. That must be done 450 * before/after calling this function. 451 */ 452 assert(src_type.floating == dst_type.floating); 453 454 /* 455 * We don't support double <-> float conversion yet, although it could be 456 * added with little effort. 457 */ 458 assert((!src_type.floating && !dst_type.floating) || 459 src_type.width == dst_type.width); 460 461 /* We must not loose or gain channels. Only precision */ 462 assert(src_type.length * num_srcs == dst_type.length * num_dsts); 463 464 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */ 465 assert(num_srcs == 1 || num_dsts == 1); 466 467 assert(src_type.length <= LP_MAX_VECTOR_LENGTH); 468 assert(dst_type.length <= LP_MAX_VECTOR_LENGTH); 469 assert(num_srcs <= LP_MAX_VECTOR_LENGTH); 470 assert(num_dsts <= LP_MAX_VECTOR_LENGTH); 471 472 if (src_type.width > dst_type.width) { 473 /* 474 * Truncate bit width. 475 */ 476 477 assert(num_dsts == 1); 478 479 if (src_type.width * src_type.length == dst_type.width * dst_type.length) { 480 /* 481 * Register width remains constant -- use vector packing intrinsics 482 */ 483 484 tmp[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs); 485 } 486 else { 487 /* 488 * Do it element-wise. 489 */ 490 491 assert(src_type.length == dst_type.length); 492 tmp[0] = lp_build_undef(dst_type); 493 for (i = 0; i < dst_type.length; ++i) { 494 LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0); 495 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], index, ""); 496 val = LLVMBuildTrunc(builder, val, lp_build_elem_type(dst_type), ""); 497 tmp[0] = LLVMBuildInsertElement(builder, tmp[0], val, index, ""); 498 } 499 } 500 } 501 else if (src_type.width < dst_type.width) { 502 /* 503 * Expand bit width. 504 */ 505 506 assert(num_srcs == 1); 507 508 if (src_type.width * src_type.length == dst_type.width * dst_type.length) { 509 /* 510 * Register width remains constant -- use vector unpack intrinsics 511 */ 512 lp_build_unpack(builder, src_type, dst_type, src[0], tmp, num_dsts); 513 } 514 else { 515 /* 516 * Do it element-wise. 517 */ 518 519 assert(src_type.length == dst_type.length); 520 tmp[0] = lp_build_undef(dst_type); 521 for (i = 0; i < dst_type.length; ++i) { 522 LLVMValueRef index = LLVMConstInt(LLVMInt32Type(), i, 0); 523 LLVMValueRef val = LLVMBuildExtractElement(builder, src[0], index, ""); 524 525 if (src_type.sign && dst_type.sign) { 526 val = LLVMBuildSExt(builder, val, lp_build_elem_type(dst_type), ""); 527 } else { 528 val = LLVMBuildZExt(builder, val, lp_build_elem_type(dst_type), ""); 529 } 530 tmp[0] = LLVMBuildInsertElement(builder, tmp[0], val, index, ""); 531 } 532 } 533 } 534 else { 535 /* 536 * No-op 537 */ 538 539 assert(num_srcs == 1); 540 assert(num_dsts == 1); 541 542 tmp[0] = src[0]; 543 } 544 545 for(i = 0; i < num_dsts; ++i) 546 dst[i] = tmp[i]; 547} 548 549 550