lp_bld_sample.c revision c4d2a14d6e98dae29a9a04da122521e25eaf7986
1/************************************************************************** 2 * 3 * Copyright 2009 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * @file 30 * Texture sampling -- common code. 31 * 32 * @author Jose Fonseca <jfonseca@vmware.com> 33 */ 34 35#include "pipe/p_defines.h" 36#include "pipe/p_state.h" 37#include "util/u_format.h" 38#include "util/u_math.h" 39#include "lp_bld_arit.h" 40#include "lp_bld_const.h" 41#include "lp_bld_debug.h" 42#include "lp_bld_printf.h" 43#include "lp_bld_flow.h" 44#include "lp_bld_sample.h" 45#include "lp_bld_swizzle.h" 46#include "lp_bld_type.h" 47#include "lp_bld_logic.h" 48#include "lp_bld_pack.h" 49 50 51/* 52 * Bri-linear factor. Should be greater than one. 53 */ 54#define BRILINEAR_FACTOR 2 55 56/** 57 * Does the given texture wrap mode allow sampling the texture border color? 58 * XXX maybe move this into gallium util code. 59 */ 60boolean 61lp_sampler_wrap_mode_uses_border_color(unsigned mode, 62 unsigned min_img_filter, 63 unsigned mag_img_filter) 64{ 65 switch (mode) { 66 case PIPE_TEX_WRAP_REPEAT: 67 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: 68 case PIPE_TEX_WRAP_MIRROR_REPEAT: 69 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: 70 return FALSE; 71 case PIPE_TEX_WRAP_CLAMP: 72 case PIPE_TEX_WRAP_MIRROR_CLAMP: 73 if (min_img_filter == PIPE_TEX_FILTER_NEAREST && 74 mag_img_filter == PIPE_TEX_FILTER_NEAREST) { 75 return FALSE; 76 } else { 77 return TRUE; 78 } 79 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: 80 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: 81 return TRUE; 82 default: 83 assert(0 && "unexpected wrap mode"); 84 return FALSE; 85 } 86} 87 88 89/** 90 * Initialize lp_sampler_static_state object with the gallium sampler 91 * and texture state. 92 * The former is considered to be static and the later dynamic. 93 */ 94void 95lp_sampler_static_state(struct lp_sampler_static_state *state, 96 const struct pipe_sampler_view *view, 97 const struct pipe_sampler_state *sampler) 98{ 99 const struct pipe_resource *texture = view->texture; 100 101 memset(state, 0, sizeof *state); 102 103 if(!texture) 104 return; 105 106 if(!sampler) 107 return; 108 109 /* 110 * We don't copy sampler state over unless it is actually enabled, to avoid 111 * spurious recompiles, as the sampler static state is part of the shader 112 * key. 113 * 114 * Ideally the state tracker or cso_cache module would make all state 115 * canonical, but until that happens it's better to be safe than sorry here. 116 * 117 * XXX: Actually there's much more than can be done here, especially 118 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc. 119 */ 120 121 state->format = view->format; 122 state->swizzle_r = view->swizzle_r; 123 state->swizzle_g = view->swizzle_g; 124 state->swizzle_b = view->swizzle_b; 125 state->swizzle_a = view->swizzle_a; 126 127 state->target = texture->target; 128 state->pot_width = util_is_power_of_two(texture->width0); 129 state->pot_height = util_is_power_of_two(texture->height0); 130 state->pot_depth = util_is_power_of_two(texture->depth0); 131 132 state->wrap_s = sampler->wrap_s; 133 state->wrap_t = sampler->wrap_t; 134 state->wrap_r = sampler->wrap_r; 135 state->min_img_filter = sampler->min_img_filter; 136 state->mag_img_filter = sampler->mag_img_filter; 137 138 if (view->u.tex.last_level && sampler->max_lod > 0.0f) { 139 state->min_mip_filter = sampler->min_mip_filter; 140 } else { 141 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 142 } 143 144 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) { 145 if (sampler->lod_bias != 0.0f) { 146 state->lod_bias_non_zero = 1; 147 } 148 149 /* If min_lod == max_lod we can greatly simplify mipmap selection. 150 * This is a case that occurs during automatic mipmap generation. 151 */ 152 if (sampler->min_lod == sampler->max_lod) { 153 state->min_max_lod_equal = 1; 154 } else { 155 if (sampler->min_lod > 0.0f) { 156 state->apply_min_lod = 1; 157 } 158 159 if (sampler->max_lod < (float)view->u.tex.last_level) { 160 state->apply_max_lod = 1; 161 } 162 } 163 } 164 165 state->compare_mode = sampler->compare_mode; 166 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) { 167 state->compare_func = sampler->compare_func; 168 } 169 170 state->normalized_coords = sampler->normalized_coords; 171 172 /* 173 * FIXME: Handle the remainder of pipe_sampler_view. 174 */ 175} 176 177 178/** 179 * Generate code to compute coordinate gradient (rho). 180 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y 181 * 182 * The resulting rho is scalar per quad. 183 */ 184static LLVMValueRef 185lp_build_rho(struct lp_build_sample_context *bld, 186 unsigned unit, 187 const struct lp_derivatives *derivs) 188{ 189 struct gallivm_state *gallivm = bld->gallivm; 190 struct lp_build_context *int_size_bld = &bld->int_size_bld; 191 struct lp_build_context *float_size_bld = &bld->float_size_bld; 192 struct lp_build_context *float_bld = &bld->float_bld; 193 struct lp_build_context *coord_bld = &bld->coord_bld; 194 struct lp_build_context *perquadf_bld = &bld->perquadf_bld; 195 const LLVMValueRef *ddx_ddy = derivs->ddx_ddy; 196 const unsigned dims = bld->dims; 197 LLVMBuilderRef builder = bld->gallivm->builder; 198 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); 199 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0); 200 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0); 201 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0); 202 LLVMValueRef rho_vec; 203 LLVMValueRef int_size, float_size; 204 LLVMValueRef rho; 205 LLVMValueRef first_level, first_level_vec; 206 LLVMValueRef abs_ddx_ddy[2]; 207 unsigned length = coord_bld->type.length; 208 unsigned num_quads = length / 4; 209 unsigned i; 210 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context)); 211 LLVMValueRef rho_xvec, rho_yvec; 212 213 abs_ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]); 214 if (dims > 2) { 215 abs_ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]); 216 } 217 else { 218 abs_ddx_ddy[1] = NULL; 219 } 220 221 if (dims == 1) { 222 static const unsigned char swizzle1[] = { 223 0, LP_BLD_SWIZZLE_DONTCARE, 224 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 225 }; 226 static const unsigned char swizzle2[] = { 227 1, LP_BLD_SWIZZLE_DONTCARE, 228 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 229 }; 230 rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1); 231 rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2); 232 } 233 else if (dims == 2) { 234 static const unsigned char swizzle1[] = { 235 0, 2, 236 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 237 }; 238 static const unsigned char swizzle2[] = { 239 1, 3, 240 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 241 }; 242 rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1); 243 rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2); 244 } 245 else { 246 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH]; 247 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH]; 248 assert(dims == 3); 249 for (i = 0; i < num_quads; i++) { 250 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i); 251 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2); 252 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i); 253 shuffles1[4*i + 3] = i32undef; 254 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1); 255 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3); 256 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 1); 257 shuffles2[4*i + 3] = i32undef; 258 } 259 rho_xvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1], 260 LLVMConstVector(shuffles1, length), ""); 261 rho_yvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1], 262 LLVMConstVector(shuffles2, length), ""); 263 } 264 265 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec); 266 267 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 268 bld->gallivm, unit); 269 first_level_vec = lp_build_broadcast_scalar(&bld->int_size_bld, first_level); 270 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec); 271 float_size = lp_build_int_to_float(float_size_bld, int_size); 272 273 if (bld->coord_type.length > 4) { 274 /* expand size to each quad */ 275 if (dims > 1) { 276 /* could use some broadcast_vector helper for this? */ 277 int num_quads = bld->coord_type.length / 4; 278 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4]; 279 for (i = 0; i < num_quads; i++) { 280 src[i] = float_size; 281 } 282 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads); 283 } 284 else { 285 float_size = lp_build_broadcast_scalar(coord_bld, float_size); 286 } 287 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size); 288 289 if (dims <= 1) { 290 rho = rho_vec; 291 } 292 else { 293 if (dims >= 2) { 294 static const unsigned char swizzle1[] = { 295 0, LP_BLD_SWIZZLE_DONTCARE, 296 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 297 }; 298 static const unsigned char swizzle2[] = { 299 1, LP_BLD_SWIZZLE_DONTCARE, 300 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 301 }; 302 LLVMValueRef rho_s, rho_t, rho_r; 303 304 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1); 305 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2); 306 307 rho = lp_build_max(coord_bld, rho_s, rho_t); 308 309 if (dims >= 3) { 310 static const unsigned char swizzle3[] = { 311 2, LP_BLD_SWIZZLE_DONTCARE, 312 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE 313 }; 314 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle3); 315 rho = lp_build_max(coord_bld, rho, rho_r); 316 } 317 } 318 } 319 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, 320 perquadf_bld->type, rho); 321 } 322 else { 323 if (dims <= 1) { 324 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, ""); 325 } 326 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size); 327 328 if (dims <= 1) { 329 rho = rho_vec; 330 } 331 else { 332 if (dims >= 2) { 333 LLVMValueRef rho_s, rho_t, rho_r; 334 335 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, ""); 336 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, ""); 337 338 rho = lp_build_max(float_bld, rho_s, rho_t); 339 340 if (dims >= 3) { 341 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, ""); 342 rho = lp_build_max(float_bld, rho, rho_r); 343 } 344 } 345 } 346 } 347 348 return rho; 349} 350 351 352/* 353 * Bri-linear lod computation 354 * 355 * Use a piece-wise linear approximation of log2 such that: 356 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc. 357 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc, 358 * with the steepness specified in 'factor' 359 * - exact result for 0.5, 1.5, etc. 360 * 361 * 362 * 1.0 - /----* 363 * / 364 * / 365 * / 366 * 0.5 - * 367 * / 368 * / 369 * / 370 * 0.0 - *----/ 371 * 372 * | | 373 * 2^0 2^1 374 * 375 * This is a technique also commonly used in hardware: 376 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html 377 * 378 * TODO: For correctness, this should only be applied when texture is known to 379 * have regular mipmaps, i.e., mipmaps derived from the base level. 380 * 381 * TODO: This could be done in fixed point, where applicable. 382 */ 383static void 384lp_build_brilinear_lod(struct lp_build_context *bld, 385 LLVMValueRef lod, 386 double factor, 387 LLVMValueRef *out_lod_ipart, 388 LLVMValueRef *out_lod_fpart) 389{ 390 LLVMValueRef lod_fpart; 391 double pre_offset = (factor - 0.5)/factor - 0.5; 392 double post_offset = 1 - factor; 393 394 if (0) { 395 lp_build_printf(bld->gallivm, "lod = %f\n", lod); 396 } 397 398 lod = lp_build_add(bld, lod, 399 lp_build_const_vec(bld->gallivm, bld->type, pre_offset)); 400 401 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart); 402 403 lod_fpart = lp_build_mul(bld, lod_fpart, 404 lp_build_const_vec(bld->gallivm, bld->type, factor)); 405 406 lod_fpart = lp_build_add(bld, lod_fpart, 407 lp_build_const_vec(bld->gallivm, bld->type, post_offset)); 408 409 /* 410 * It's not necessary to clamp lod_fpart since: 411 * - the above expression will never produce numbers greater than one. 412 * - the mip filtering branch is only taken if lod_fpart is positive 413 */ 414 415 *out_lod_fpart = lod_fpart; 416 417 if (0) { 418 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart); 419 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart); 420 } 421} 422 423 424/* 425 * Combined log2 and brilinear lod computation. 426 * 427 * It's in all identical to calling lp_build_fast_log2() and 428 * lp_build_brilinear_lod() above, but by combining we can compute the integer 429 * and fractional part independently. 430 */ 431static void 432lp_build_brilinear_rho(struct lp_build_context *bld, 433 LLVMValueRef rho, 434 double factor, 435 LLVMValueRef *out_lod_ipart, 436 LLVMValueRef *out_lod_fpart) 437{ 438 LLVMValueRef lod_ipart; 439 LLVMValueRef lod_fpart; 440 441 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor); 442 const double post_offset = 1 - 2*factor; 443 444 assert(bld->type.floating); 445 446 assert(lp_check_value(bld->type, rho)); 447 448 /* 449 * The pre factor will make the intersections with the exact powers of two 450 * happen precisely where we want then to be, which means that the integer 451 * part will not need any post adjustments. 452 */ 453 rho = lp_build_mul(bld, rho, 454 lp_build_const_vec(bld->gallivm, bld->type, pre_factor)); 455 456 /* ipart = ifloor(log2(rho)) */ 457 lod_ipart = lp_build_extract_exponent(bld, rho, 0); 458 459 /* fpart = rho / 2**ipart */ 460 lod_fpart = lp_build_extract_mantissa(bld, rho); 461 462 lod_fpart = lp_build_mul(bld, lod_fpart, 463 lp_build_const_vec(bld->gallivm, bld->type, factor)); 464 465 lod_fpart = lp_build_add(bld, lod_fpart, 466 lp_build_const_vec(bld->gallivm, bld->type, post_offset)); 467 468 /* 469 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since: 470 * - the above expression will never produce numbers greater than one. 471 * - the mip filtering branch is only taken if lod_fpart is positive 472 */ 473 474 *out_lod_ipart = lod_ipart; 475 *out_lod_fpart = lod_fpart; 476} 477 478 479/** 480 * Generate code to compute texture level of detail (lambda). 481 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y 482 * \param lod_bias optional float vector with the shader lod bias 483 * \param explicit_lod optional float vector with the explicit lod 484 * \param width scalar int texture width 485 * \param height scalar int texture height 486 * \param depth scalar int texture depth 487 * 488 * The resulting lod is scalar per quad, so only the first value per quad 489 * passed in from lod_bias, explicit_lod is used. 490 */ 491void 492lp_build_lod_selector(struct lp_build_sample_context *bld, 493 unsigned unit, 494 const struct lp_derivatives *derivs, 495 LLVMValueRef lod_bias, /* optional */ 496 LLVMValueRef explicit_lod, /* optional */ 497 unsigned mip_filter, 498 LLVMValueRef *out_lod_ipart, 499 LLVMValueRef *out_lod_fpart) 500 501{ 502 LLVMBuilderRef builder = bld->gallivm->builder; 503 struct lp_build_context *perquadf_bld = &bld->perquadf_bld; 504 LLVMValueRef lod; 505 506 *out_lod_ipart = bld->perquadi_bld.zero; 507 *out_lod_fpart = perquadf_bld->zero; 508 509 if (bld->static_state->min_max_lod_equal) { 510 /* User is forcing sampling from a particular mipmap level. 511 * This is hit during mipmap generation. 512 */ 513 LLVMValueRef min_lod = 514 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit); 515 516 lod = lp_build_broadcast_scalar(perquadf_bld, min_lod); 517 } 518 else { 519 if (explicit_lod) { 520 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, 521 perquadf_bld->type, explicit_lod); 522 } 523 else { 524 LLVMValueRef rho; 525 526 rho = lp_build_rho(bld, unit, derivs); 527 528 /* 529 * Compute lod = log2(rho) 530 */ 531 532 if (!lod_bias && 533 !bld->static_state->lod_bias_non_zero && 534 !bld->static_state->apply_max_lod && 535 !bld->static_state->apply_min_lod) { 536 /* 537 * Special case when there are no post-log2 adjustments, which 538 * saves instructions but keeping the integer and fractional lod 539 * computations separate from the start. 540 */ 541 542 if (mip_filter == PIPE_TEX_MIPFILTER_NONE || 543 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) { 544 *out_lod_ipart = lp_build_ilog2(perquadf_bld, rho); 545 *out_lod_fpart = perquadf_bld->zero; 546 return; 547 } 548 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR && 549 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { 550 lp_build_brilinear_rho(perquadf_bld, rho, BRILINEAR_FACTOR, 551 out_lod_ipart, out_lod_fpart); 552 return; 553 } 554 } 555 556 if (0) { 557 lod = lp_build_log2(perquadf_bld, rho); 558 } 559 else { 560 lod = lp_build_fast_log2(perquadf_bld, rho); 561 } 562 563 /* add shader lod bias */ 564 if (lod_bias) { 565 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, 566 perquadf_bld->type, lod_bias); 567 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias"); 568 } 569 } 570 571 /* add sampler lod bias */ 572 if (bld->static_state->lod_bias_non_zero) { 573 LLVMValueRef sampler_lod_bias = 574 bld->dynamic_state->lod_bias(bld->dynamic_state, bld->gallivm, unit); 575 sampler_lod_bias = lp_build_broadcast_scalar(perquadf_bld, 576 sampler_lod_bias); 577 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias"); 578 } 579 580 /* clamp lod */ 581 if (bld->static_state->apply_max_lod) { 582 LLVMValueRef max_lod = 583 bld->dynamic_state->max_lod(bld->dynamic_state, bld->gallivm, unit); 584 max_lod = lp_build_broadcast_scalar(perquadf_bld, max_lod); 585 586 lod = lp_build_min(perquadf_bld, lod, max_lod); 587 } 588 if (bld->static_state->apply_min_lod) { 589 LLVMValueRef min_lod = 590 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit); 591 min_lod = lp_build_broadcast_scalar(perquadf_bld, min_lod); 592 593 lod = lp_build_max(perquadf_bld, lod, min_lod); 594 } 595 } 596 597 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { 598 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { 599 lp_build_brilinear_lod(perquadf_bld, lod, BRILINEAR_FACTOR, 600 out_lod_ipart, out_lod_fpart); 601 } 602 else { 603 lp_build_ifloor_fract(perquadf_bld, lod, out_lod_ipart, out_lod_fpart); 604 } 605 606 lp_build_name(*out_lod_fpart, "lod_fpart"); 607 } 608 else { 609 *out_lod_ipart = lp_build_iround(perquadf_bld, lod); 610 } 611 612 lp_build_name(*out_lod_ipart, "lod_ipart"); 613 614 return; 615} 616 617 618/** 619 * For PIPE_TEX_MIPFILTER_NEAREST, convert float LOD to integer 620 * mipmap level index. 621 * Note: this is all scalar per quad code. 622 * \param lod_ipart int texture level of detail 623 * \param level_out returns integer 624 */ 625void 626lp_build_nearest_mip_level(struct lp_build_sample_context *bld, 627 unsigned unit, 628 LLVMValueRef lod_ipart, 629 LLVMValueRef *level_out) 630{ 631 struct lp_build_context *perquadi_bld = &bld->perquadi_bld; 632 LLVMValueRef first_level, last_level, level; 633 634 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 635 bld->gallivm, unit); 636 last_level = bld->dynamic_state->last_level(bld->dynamic_state, 637 bld->gallivm, unit); 638 first_level = lp_build_broadcast_scalar(perquadi_bld, first_level); 639 last_level = lp_build_broadcast_scalar(perquadi_bld, last_level); 640 641 level = lp_build_add(perquadi_bld, lod_ipart, first_level); 642 643 /* clamp level to legal range of levels */ 644 *level_out = lp_build_clamp(perquadi_bld, level, first_level, last_level); 645} 646 647 648/** 649 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad int LOD(s) to two (per-quad) 650 * (adjacent) mipmap level indexes, and fix up float lod part accordingly. 651 * Later, we'll sample from those two mipmap levels and interpolate between them. 652 */ 653void 654lp_build_linear_mip_levels(struct lp_build_sample_context *bld, 655 unsigned unit, 656 LLVMValueRef lod_ipart, 657 LLVMValueRef *lod_fpart_inout, 658 LLVMValueRef *level0_out, 659 LLVMValueRef *level1_out) 660{ 661 LLVMBuilderRef builder = bld->gallivm->builder; 662 struct lp_build_context *perquadi_bld = &bld->perquadi_bld; 663 struct lp_build_context *perquadf_bld = &bld->perquadf_bld; 664 LLVMValueRef first_level, last_level; 665 LLVMValueRef clamp_min; 666 LLVMValueRef clamp_max; 667 668 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 669 bld->gallivm, unit); 670 last_level = bld->dynamic_state->last_level(bld->dynamic_state, 671 bld->gallivm, unit); 672 first_level = lp_build_broadcast_scalar(perquadi_bld, first_level); 673 last_level = lp_build_broadcast_scalar(perquadi_bld, last_level); 674 675 *level0_out = lp_build_add(perquadi_bld, lod_ipart, first_level); 676 *level1_out = lp_build_add(perquadi_bld, *level0_out, perquadi_bld->one); 677 678 /* 679 * Clamp both *level0_out and *level1_out to [first_level, last_level], with 680 * the minimum number of comparisons, and zeroing lod_fpart in the extreme 681 * ends in the process. 682 */ 683 684 /* 685 * This code (vector select in particular) only works with llvm 3.1 686 * (if there's more than one quad, with x86 backend). Might consider 687 * converting to our lp_bld_logic helpers. 688 */ 689#if HAVE_LLVM < 0x0301 690 assert(perquadi_bld->type.length == 1); 691#endif 692 693 /* *level0_out < first_level */ 694 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT, 695 *level0_out, first_level, 696 "clamp_lod_to_first"); 697 698 *level0_out = LLVMBuildSelect(builder, clamp_min, 699 first_level, *level0_out, ""); 700 701 *level1_out = LLVMBuildSelect(builder, clamp_min, 702 first_level, *level1_out, ""); 703 704 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min, 705 perquadf_bld->zero, *lod_fpart_inout, ""); 706 707 /* *level0_out >= last_level */ 708 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE, 709 *level0_out, last_level, 710 "clamp_lod_to_last"); 711 712 *level0_out = LLVMBuildSelect(builder, clamp_max, 713 last_level, *level0_out, ""); 714 715 *level1_out = LLVMBuildSelect(builder, clamp_max, 716 last_level, *level1_out, ""); 717 718 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max, 719 perquadf_bld->zero, *lod_fpart_inout, ""); 720 721 lp_build_name(*level0_out, "sampler%u_miplevel0", unit); 722 lp_build_name(*level1_out, "sampler%u_miplevel1", unit); 723 lp_build_name(*lod_fpart_inout, "sampler%u_mipweight", unit); 724} 725 726 727/** 728 * Return pointer to a single mipmap level. 729 * \param data_array array of pointers to mipmap levels 730 * \param level integer mipmap level 731 */ 732LLVMValueRef 733lp_build_get_mipmap_level(struct lp_build_sample_context *bld, 734 LLVMValueRef level) 735{ 736 LLVMBuilderRef builder = bld->gallivm->builder; 737 LLVMValueRef indexes[2], data_ptr; 738 739 indexes[0] = lp_build_const_int32(bld->gallivm, 0); 740 indexes[1] = level; 741 data_ptr = LLVMBuildGEP(builder, bld->data_array, indexes, 2, ""); 742 data_ptr = LLVMBuildLoad(builder, data_ptr, ""); 743 return data_ptr; 744} 745 746 747/** 748 * Codegen equivalent for u_minify(). 749 * Return max(1, base_size >> level); 750 */ 751LLVMValueRef 752lp_build_minify(struct lp_build_context *bld, 753 LLVMValueRef base_size, 754 LLVMValueRef level) 755{ 756 LLVMBuilderRef builder = bld->gallivm->builder; 757 assert(lp_check_value(bld->type, base_size)); 758 assert(lp_check_value(bld->type, level)); 759 760 if (level == bld->zero) { 761 /* if we're using mipmap level zero, no minification is needed */ 762 return base_size; 763 } 764 else { 765 LLVMValueRef size = 766 LLVMBuildLShr(builder, base_size, level, "minify"); 767 assert(bld->type.sign); 768 size = lp_build_max(bld, size, bld->one); 769 return size; 770 } 771} 772 773 774/** 775 * Dereference stride_array[mipmap_level] array to get a stride. 776 * Return stride as a vector. 777 */ 778static LLVMValueRef 779lp_build_get_level_stride_vec(struct lp_build_sample_context *bld, 780 LLVMValueRef stride_array, LLVMValueRef level) 781{ 782 LLVMBuilderRef builder = bld->gallivm->builder; 783 LLVMValueRef indexes[2], stride; 784 indexes[0] = lp_build_const_int32(bld->gallivm, 0); 785 indexes[1] = level; 786 stride = LLVMBuildGEP(builder, stride_array, indexes, 2, ""); 787 stride = LLVMBuildLoad(builder, stride, ""); 788 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride); 789 return stride; 790} 791 792 793/** 794 * When sampling a mipmap, we need to compute the width, height, depth 795 * of the source levels from the level indexes. This helper function 796 * does that. 797 */ 798void 799lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, 800 LLVMValueRef ilevel, 801 LLVMValueRef *out_size, 802 LLVMValueRef *row_stride_vec, 803 LLVMValueRef *img_stride_vec) 804{ 805 const unsigned dims = bld->dims; 806 LLVMValueRef ilevel_vec; 807 808 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel); 809 810 /* 811 * Compute width, height, depth at mipmap level 'ilevel' 812 */ 813 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec); 814 815 if (dims >= 2) { 816 *row_stride_vec = lp_build_get_level_stride_vec(bld, 817 bld->row_stride_array, 818 ilevel); 819 if (dims == 3 || bld->static_state->target == PIPE_TEXTURE_CUBE) { 820 *img_stride_vec = lp_build_get_level_stride_vec(bld, 821 bld->img_stride_array, 822 ilevel); 823 } 824 } 825} 826 827 828/** 829 * Extract and broadcast texture size. 830 * 831 * @param size_type type of the texture size vector (either 832 * bld->int_size_type or bld->float_size_type) 833 * @param coord_type type of the texture size vector (either 834 * bld->int_coord_type or bld->coord_type) 835 * @param size vector with the texture size (width, height, depth) 836 */ 837void 838lp_build_extract_image_sizes(struct lp_build_sample_context *bld, 839 struct lp_type size_type, 840 struct lp_type coord_type, 841 LLVMValueRef size, 842 LLVMValueRef *out_width, 843 LLVMValueRef *out_height, 844 LLVMValueRef *out_depth) 845{ 846 const unsigned dims = bld->dims; 847 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); 848 849 *out_width = lp_build_extract_broadcast(bld->gallivm, 850 size_type, 851 coord_type, 852 size, 853 LLVMConstInt(i32t, 0, 0)); 854 if (dims >= 2) { 855 *out_height = lp_build_extract_broadcast(bld->gallivm, 856 size_type, 857 coord_type, 858 size, 859 LLVMConstInt(i32t, 1, 0)); 860 if (dims == 3) { 861 *out_depth = lp_build_extract_broadcast(bld->gallivm, 862 size_type, 863 coord_type, 864 size, 865 LLVMConstInt(i32t, 2, 0)); 866 } 867 } 868} 869 870 871/** 872 * Unnormalize coords. 873 * 874 * @param flt_size vector with the integer texture size (width, height, depth) 875 */ 876void 877lp_build_unnormalized_coords(struct lp_build_sample_context *bld, 878 LLVMValueRef flt_size, 879 LLVMValueRef *s, 880 LLVMValueRef *t, 881 LLVMValueRef *r) 882{ 883 const unsigned dims = bld->dims; 884 LLVMValueRef width; 885 LLVMValueRef height; 886 LLVMValueRef depth; 887 888 lp_build_extract_image_sizes(bld, 889 bld->float_size_type, 890 bld->coord_type, 891 flt_size, 892 &width, 893 &height, 894 &depth); 895 896 /* s = s * width, t = t * height */ 897 *s = lp_build_mul(&bld->coord_bld, *s, width); 898 if (dims >= 2) { 899 *t = lp_build_mul(&bld->coord_bld, *t, height); 900 if (dims >= 3) { 901 *r = lp_build_mul(&bld->coord_bld, *r, depth); 902 } 903 } 904} 905 906 907/** Helper used by lp_build_cube_lookup() */ 908static LLVMValueRef 909lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord) 910{ 911 /* ima = +0.5 / abs(coord); */ 912 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5); 913 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord); 914 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord); 915 return ima; 916} 917 918/** Helper used by lp_build_cube_lookup() */ 919static LLVMValueRef 920lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord) 921{ 922 /* ima = -0.5 / abs(coord); */ 923 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5); 924 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord); 925 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord); 926 return ima; 927} 928 929/** 930 * Helper used by lp_build_cube_lookup() 931 * FIXME: the sign here can also be 0. 932 * Arithmetically this could definitely make a difference. Either 933 * fix the comment or use other (simpler) sign function, not sure 934 * which one it should be. 935 * \param sign scalar +1 or -1 936 * \param coord float vector 937 * \param ima float vector 938 */ 939static LLVMValueRef 940lp_build_cube_coord(struct lp_build_context *coord_bld, 941 LLVMValueRef sign, int negate_coord, 942 LLVMValueRef coord, LLVMValueRef ima) 943{ 944 /* return negate(coord) * ima * sign + 0.5; */ 945 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5); 946 LLVMValueRef res; 947 948 assert(negate_coord == +1 || negate_coord == -1); 949 950 if (negate_coord == -1) { 951 coord = lp_build_negate(coord_bld, coord); 952 } 953 954 res = lp_build_mul(coord_bld, coord, ima); 955 if (sign) { 956 sign = lp_build_broadcast_scalar(coord_bld, sign); 957 res = lp_build_mul(coord_bld, res, sign); 958 } 959 res = lp_build_add(coord_bld, res, half); 960 961 return res; 962} 963 964 965/** Helper used by lp_build_cube_lookup() 966 * Return (major_coord >= 0) ? pos_face : neg_face; 967 */ 968static LLVMValueRef 969lp_build_cube_face(struct lp_build_sample_context *bld, 970 LLVMValueRef major_coord, 971 unsigned pos_face, unsigned neg_face) 972{ 973 struct gallivm_state *gallivm = bld->gallivm; 974 LLVMBuilderRef builder = gallivm->builder; 975 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE, 976 major_coord, 977 bld->float_bld.zero, ""); 978 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face); 979 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face); 980 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, ""); 981 return res; 982} 983 984 985 986/** 987 * Generate code to do cube face selection and compute per-face texcoords. 988 */ 989void 990lp_build_cube_lookup(struct lp_build_sample_context *bld, 991 LLVMValueRef s, 992 LLVMValueRef t, 993 LLVMValueRef r, 994 LLVMValueRef *face, 995 LLVMValueRef *face_s, 996 LLVMValueRef *face_t) 997{ 998 struct lp_build_context *coord_bld = &bld->coord_bld; 999 LLVMBuilderRef builder = bld->gallivm->builder; 1000 struct gallivm_state *gallivm = bld->gallivm; 1001 LLVMValueRef rx, ry, rz; 1002 LLVMValueRef tmp[4], rxyz, arxyz; 1003 1004 /* 1005 * Use the average of the four pixel's texcoords to choose the face. 1006 * Slight simplification just calculate the sum, skip scaling. 1007 */ 1008 tmp[0] = s; 1009 tmp[1] = t; 1010 tmp[2] = r; 1011 rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3); 1012 arxyz = lp_build_abs(&bld->coord_bld, rxyz); 1013 1014 if (coord_bld->type.length > 4) { 1015 struct lp_build_context *cint_bld = &bld->int_coord_bld; 1016 struct lp_type intctype = cint_bld->type; 1017 LLVMValueRef signrxs, signrys, signrzs, signrxyz, sign; 1018 LLVMValueRef arxs, arys, arzs; 1019 LLVMValueRef arx_ge_ary, maxarxsarys, arz_ge_arx_ary; 1020 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz; 1021 LLVMValueRef ryneg, rzneg; 1022 LLVMValueRef ma, ima; 1023 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5); 1024 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype, 1025 1 << (intctype.width - 1)); 1026 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype, 1027 intctype.width -1); 1028 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X); 1029 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y); 1030 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z); 1031 1032 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1); 1033 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1); 1034 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1); 1035 1036 rx = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), ""); 1037 ry = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), ""); 1038 rz = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), ""); 1039 ryneg = LLVMBuildXor(builder, ry, signmask, ""); 1040 rzneg = LLVMBuildXor(builder, rz, signmask, ""); 1041 1042 /* the sign bit comes from the averaged vector (per quad), 1043 * as does the decision which face to use */ 1044 signrxyz = LLVMBuildBitCast(builder, rxyz, lp_build_vec_type(gallivm, intctype), ""); 1045 signrxyz = LLVMBuildAnd(builder, signrxyz, signmask, ""); 1046 1047 arxs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 0); 1048 arys = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 1); 1049 arzs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 2); 1050 1051 /* 1052 * select x if x >= y else select y 1053 * select previous result if y >= max(x,y) else select z 1054 */ 1055 arx_ge_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, arxs, arys); 1056 maxarxsarys = lp_build_max(coord_bld, arxs, arys); 1057 arz_ge_arx_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, maxarxsarys, arzs); 1058 1059 /* 1060 * compute all possible new s/t coords 1061 * snewx = signrx * -rz; 1062 * tnewx = -ry; 1063 * snewy = rx; 1064 * tnewy = signry * rz; 1065 * snewz = signrz * rx; 1066 * tnewz = -ry; 1067 */ 1068 signrxs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 0); 1069 snewx = LLVMBuildXor(builder, signrxs, rzneg, ""); 1070 tnewx = ryneg; 1071 1072 signrys = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 1); 1073 snewy = rx; 1074 tnewy = LLVMBuildXor(builder, signrys, rz, ""); 1075 1076 signrzs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 2); 1077 snewz = LLVMBuildXor(builder, signrzs, rx, ""); 1078 tnewz = ryneg; 1079 1080 /* XXX on x86 unclear if we should cast the values back to float 1081 * or not - on some cpus (nehalem) pblendvb has twice the throughput 1082 * of blendvps though on others there just might be domain 1083 * transition penalties when using it (this depends on what llvm 1084 * will chose for the bit ops above so there appears no "right way", 1085 * but given the boatload of selects let's just use the int type). 1086 * 1087 * Unfortunately we also need the sign bit of the summed coords. 1088 */ 1089 *face_s = lp_build_select(cint_bld, arx_ge_ary, snewx, snewy); 1090 *face_t = lp_build_select(cint_bld, arx_ge_ary, tnewx, tnewy); 1091 ma = lp_build_select(coord_bld, arx_ge_ary, s, t); 1092 *face = lp_build_select(cint_bld, arx_ge_ary, facex, facey); 1093 sign = lp_build_select(cint_bld, arx_ge_ary, signrxs, signrys); 1094 1095 *face_s = lp_build_select(cint_bld, arz_ge_arx_ary, *face_s, snewz); 1096 *face_t = lp_build_select(cint_bld, arz_ge_arx_ary, *face_t, tnewz); 1097 ma = lp_build_select(coord_bld, arz_ge_arx_ary, ma, r); 1098 *face = lp_build_select(cint_bld, arz_ge_arx_ary, *face, facez); 1099 sign = lp_build_select(cint_bld, arz_ge_arx_ary, sign, signrzs); 1100 1101 *face_s = LLVMBuildBitCast(builder, *face_s, 1102 lp_build_vec_type(gallivm, coord_bld->type), ""); 1103 *face_t = LLVMBuildBitCast(builder, *face_t, 1104 lp_build_vec_type(gallivm, coord_bld->type), ""); 1105 1106 /* add +1 for neg face */ 1107 /* XXX with AVX probably want to use another select here - 1108 * as long as we ensure vblendvps gets used we can actually 1109 * skip the comparison and just use sign as a "mask" directly. 1110 */ 1111 sign = LLVMBuildLShr(builder, sign, signshift, ""); 1112 *face = LLVMBuildOr(builder, *face, sign, "face"); 1113 1114 ima = lp_build_cube_imapos(coord_bld, ma); 1115 1116 *face_s = lp_build_mul(coord_bld, *face_s, ima); 1117 *face_s = lp_build_add(coord_bld, *face_s, posHalf); 1118 *face_t = lp_build_mul(coord_bld, *face_t, ima); 1119 *face_t = lp_build_add(coord_bld, *face_t, posHalf); 1120 } 1121 1122 else { 1123 struct lp_build_if_state if_ctx; 1124 LLVMValueRef face_s_var; 1125 LLVMValueRef face_t_var; 1126 LLVMValueRef face_var; 1127 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz; 1128 LLVMValueRef shuffles[4]; 1129 LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz; 1130 LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz; 1131 struct lp_build_context *float_bld = &bld->float_bld; 1132 1133 assert(bld->coord_bld.type.length == 4); 1134 1135 shuffles[0] = lp_build_const_int32(gallivm, 0); 1136 shuffles[1] = lp_build_const_int32(gallivm, 1); 1137 shuffles[2] = lp_build_const_int32(gallivm, 0); 1138 shuffles[3] = lp_build_const_int32(gallivm, 1); 1139 arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), ""); 1140 shuffles[0] = lp_build_const_int32(gallivm, 1); 1141 shuffles[1] = lp_build_const_int32(gallivm, 0); 1142 shuffles[2] = lp_build_const_int32(gallivm, 2); 1143 shuffles[3] = lp_build_const_int32(gallivm, 2); 1144 aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), ""); 1145 arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz); 1146 1147 shuffles[0] = lp_build_const_int32(gallivm, 0); 1148 shuffles[1] = lp_build_const_int32(gallivm, 1); 1149 arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz, 1150 LLVMConstVector(shuffles, 2), ""); 1151 shuffles[0] = lp_build_const_int32(gallivm, 2); 1152 shuffles[1] = lp_build_const_int32(gallivm, 3); 1153 arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz, 1154 LLVMConstVector(shuffles, 2), ""); 1155 arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, ""); 1156 1157 arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz, 1158 lp_build_const_int32(gallivm, 0), ""); 1159 arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz, 1160 lp_build_const_int32(gallivm, 0), ""); 1161 ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz, 1162 lp_build_const_int32(gallivm, 1), ""); 1163 ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz, 1164 lp_build_const_int32(gallivm, 0), ""); 1165 face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var"); 1166 face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var"); 1167 face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var"); 1168 1169 lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz); 1170 { 1171 /* +/- X face */ 1172 LLVMValueRef sign, ima; 1173 rx = LLVMBuildExtractElement(builder, rxyz, 1174 lp_build_const_int32(gallivm, 0), ""); 1175 /* +/- X face */ 1176 sign = lp_build_sgn(float_bld, rx); 1177 ima = lp_build_cube_imaneg(coord_bld, s); 1178 *face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima); 1179 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); 1180 *face = lp_build_cube_face(bld, rx, 1181 PIPE_TEX_FACE_POS_X, 1182 PIPE_TEX_FACE_NEG_X); 1183 LLVMBuildStore(builder, *face_s, face_s_var); 1184 LLVMBuildStore(builder, *face_t, face_t_var); 1185 LLVMBuildStore(builder, *face, face_var); 1186 } 1187 lp_build_else(&if_ctx); 1188 { 1189 struct lp_build_if_state if_ctx2; 1190 1191 lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz); 1192 { 1193 LLVMValueRef sign, ima; 1194 /* +/- Y face */ 1195 ry = LLVMBuildExtractElement(builder, rxyz, 1196 lp_build_const_int32(gallivm, 1), ""); 1197 sign = lp_build_sgn(float_bld, ry); 1198 ima = lp_build_cube_imaneg(coord_bld, t); 1199 *face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima); 1200 *face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima); 1201 *face = lp_build_cube_face(bld, ry, 1202 PIPE_TEX_FACE_POS_Y, 1203 PIPE_TEX_FACE_NEG_Y); 1204 LLVMBuildStore(builder, *face_s, face_s_var); 1205 LLVMBuildStore(builder, *face_t, face_t_var); 1206 LLVMBuildStore(builder, *face, face_var); 1207 } 1208 lp_build_else(&if_ctx2); 1209 { 1210 /* +/- Z face */ 1211 LLVMValueRef sign, ima; 1212 rz = LLVMBuildExtractElement(builder, rxyz, 1213 lp_build_const_int32(gallivm, 2), ""); 1214 sign = lp_build_sgn(float_bld, rz); 1215 ima = lp_build_cube_imaneg(coord_bld, r); 1216 *face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima); 1217 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); 1218 *face = lp_build_cube_face(bld, rz, 1219 PIPE_TEX_FACE_POS_Z, 1220 PIPE_TEX_FACE_NEG_Z); 1221 LLVMBuildStore(builder, *face_s, face_s_var); 1222 LLVMBuildStore(builder, *face_t, face_t_var); 1223 LLVMBuildStore(builder, *face, face_var); 1224 } 1225 lp_build_endif(&if_ctx2); 1226 } 1227 1228 lp_build_endif(&if_ctx); 1229 1230 *face_s = LLVMBuildLoad(builder, face_s_var, "face_s"); 1231 *face_t = LLVMBuildLoad(builder, face_t_var, "face_t"); 1232 *face = LLVMBuildLoad(builder, face_var, "face"); 1233 *face = lp_build_broadcast_scalar(&bld->int_coord_bld, *face); 1234 } 1235} 1236 1237 1238/** 1239 * Compute the partial offset of a pixel block along an arbitrary axis. 1240 * 1241 * @param coord coordinate in pixels 1242 * @param stride number of bytes between rows of successive pixel blocks 1243 * @param block_length number of pixels in a pixels block along the coordinate 1244 * axis 1245 * @param out_offset resulting relative offset of the pixel block in bytes 1246 * @param out_subcoord resulting sub-block pixel coordinate 1247 */ 1248void 1249lp_build_sample_partial_offset(struct lp_build_context *bld, 1250 unsigned block_length, 1251 LLVMValueRef coord, 1252 LLVMValueRef stride, 1253 LLVMValueRef *out_offset, 1254 LLVMValueRef *out_subcoord) 1255{ 1256 LLVMBuilderRef builder = bld->gallivm->builder; 1257 LLVMValueRef offset; 1258 LLVMValueRef subcoord; 1259 1260 if (block_length == 1) { 1261 subcoord = bld->zero; 1262 } 1263 else { 1264 /* 1265 * Pixel blocks have power of two dimensions. LLVM should convert the 1266 * rem/div to bit arithmetic. 1267 * TODO: Verify this. 1268 * It does indeed BUT it does transform it to scalar (and back) when doing so 1269 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7). 1270 * The generated code looks seriously unfunny and is quite expensive. 1271 */ 1272#if 0 1273 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length); 1274 subcoord = LLVMBuildURem(builder, coord, block_width, ""); 1275 coord = LLVMBuildUDiv(builder, coord, block_width, ""); 1276#else 1277 unsigned logbase2 = util_logbase2(block_length); 1278 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2); 1279 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1); 1280 subcoord = LLVMBuildAnd(builder, coord, block_mask, ""); 1281 coord = LLVMBuildLShr(builder, coord, block_shift, ""); 1282#endif 1283 } 1284 1285 offset = lp_build_mul(bld, coord, stride); 1286 1287 assert(out_offset); 1288 assert(out_subcoord); 1289 1290 *out_offset = offset; 1291 *out_subcoord = subcoord; 1292} 1293 1294 1295/** 1296 * Compute the offset of a pixel block. 1297 * 1298 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels. 1299 * 1300 * Returns the relative offset and i,j sub-block coordinates 1301 */ 1302void 1303lp_build_sample_offset(struct lp_build_context *bld, 1304 const struct util_format_description *format_desc, 1305 LLVMValueRef x, 1306 LLVMValueRef y, 1307 LLVMValueRef z, 1308 LLVMValueRef y_stride, 1309 LLVMValueRef z_stride, 1310 LLVMValueRef *out_offset, 1311 LLVMValueRef *out_i, 1312 LLVMValueRef *out_j) 1313{ 1314 LLVMValueRef x_stride; 1315 LLVMValueRef offset; 1316 1317 x_stride = lp_build_const_vec(bld->gallivm, bld->type, 1318 format_desc->block.bits/8); 1319 1320 lp_build_sample_partial_offset(bld, 1321 format_desc->block.width, 1322 x, x_stride, 1323 &offset, out_i); 1324 1325 if (y && y_stride) { 1326 LLVMValueRef y_offset; 1327 lp_build_sample_partial_offset(bld, 1328 format_desc->block.height, 1329 y, y_stride, 1330 &y_offset, out_j); 1331 offset = lp_build_add(bld, offset, y_offset); 1332 } 1333 else { 1334 *out_j = bld->zero; 1335 } 1336 1337 if (z && z_stride) { 1338 LLVMValueRef z_offset; 1339 LLVMValueRef k; 1340 lp_build_sample_partial_offset(bld, 1341 1, /* pixel blocks are always 2D */ 1342 z, z_stride, 1343 &z_offset, &k); 1344 offset = lp_build_add(bld, offset, z_offset); 1345 } 1346 1347 *out_offset = offset; 1348} 1349