lp_bld_sample.c revision dfbb18bdb58fa04ddd4cbd495299a704599ee09e
1/************************************************************************** 2 * 3 * Copyright 2009 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28/** 29 * @file 30 * Texture sampling -- common code. 31 * 32 * @author Jose Fonseca <jfonseca@vmware.com> 33 */ 34 35#include "pipe/p_defines.h" 36#include "pipe/p_state.h" 37#include "util/u_format.h" 38#include "util/u_math.h" 39#include "lp_bld_arit.h" 40#include "lp_bld_const.h" 41#include "lp_bld_debug.h" 42#include "lp_bld_printf.h" 43#include "lp_bld_flow.h" 44#include "lp_bld_sample.h" 45#include "lp_bld_swizzle.h" 46#include "lp_bld_type.h" 47 48 49/* 50 * Bri-linear factor. Should be greater than one. 51 */ 52#define BRILINEAR_FACTOR 2 53 54/** 55 * Does the given texture wrap mode allow sampling the texture border color? 56 * XXX maybe move this into gallium util code. 57 */ 58boolean 59lp_sampler_wrap_mode_uses_border_color(unsigned mode, 60 unsigned min_img_filter, 61 unsigned mag_img_filter) 62{ 63 switch (mode) { 64 case PIPE_TEX_WRAP_REPEAT: 65 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: 66 case PIPE_TEX_WRAP_MIRROR_REPEAT: 67 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: 68 return FALSE; 69 case PIPE_TEX_WRAP_CLAMP: 70 case PIPE_TEX_WRAP_MIRROR_CLAMP: 71 if (min_img_filter == PIPE_TEX_FILTER_NEAREST && 72 mag_img_filter == PIPE_TEX_FILTER_NEAREST) { 73 return FALSE; 74 } else { 75 return TRUE; 76 } 77 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: 78 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: 79 return TRUE; 80 default: 81 assert(0 && "unexpected wrap mode"); 82 return FALSE; 83 } 84} 85 86 87/** 88 * Initialize lp_sampler_static_state object with the gallium sampler 89 * and texture state. 90 * The former is considered to be static and the later dynamic. 91 */ 92void 93lp_sampler_static_state(struct lp_sampler_static_state *state, 94 const struct pipe_sampler_view *view, 95 const struct pipe_sampler_state *sampler) 96{ 97 const struct pipe_resource *texture = view->texture; 98 99 memset(state, 0, sizeof *state); 100 101 if(!texture) 102 return; 103 104 if(!sampler) 105 return; 106 107 /* 108 * We don't copy sampler state over unless it is actually enabled, to avoid 109 * spurious recompiles, as the sampler static state is part of the shader 110 * key. 111 * 112 * Ideally the state tracker or cso_cache module would make all state 113 * canonical, but until that happens it's better to be safe than sorry here. 114 * 115 * XXX: Actually there's much more than can be done here, especially 116 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc. 117 */ 118 119 state->format = view->format; 120 state->swizzle_r = view->swizzle_r; 121 state->swizzle_g = view->swizzle_g; 122 state->swizzle_b = view->swizzle_b; 123 state->swizzle_a = view->swizzle_a; 124 125 state->target = texture->target; 126 state->pot_width = util_is_power_of_two(texture->width0); 127 state->pot_height = util_is_power_of_two(texture->height0); 128 state->pot_depth = util_is_power_of_two(texture->depth0); 129 130 state->wrap_s = sampler->wrap_s; 131 state->wrap_t = sampler->wrap_t; 132 state->wrap_r = sampler->wrap_r; 133 state->min_img_filter = sampler->min_img_filter; 134 state->mag_img_filter = sampler->mag_img_filter; 135 136 if (view->u.tex.last_level && sampler->max_lod > 0.0f) { 137 state->min_mip_filter = sampler->min_mip_filter; 138 } else { 139 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE; 140 } 141 142 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) { 143 if (sampler->lod_bias != 0.0f) { 144 state->lod_bias_non_zero = 1; 145 } 146 147 /* If min_lod == max_lod we can greatly simplify mipmap selection. 148 * This is a case that occurs during automatic mipmap generation. 149 */ 150 if (sampler->min_lod == sampler->max_lod) { 151 state->min_max_lod_equal = 1; 152 } else { 153 if (sampler->min_lod > 0.0f) { 154 state->apply_min_lod = 1; 155 } 156 157 if (sampler->max_lod < (float)view->u.tex.last_level) { 158 state->apply_max_lod = 1; 159 } 160 } 161 } 162 163 state->compare_mode = sampler->compare_mode; 164 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) { 165 state->compare_func = sampler->compare_func; 166 } 167 168 state->normalized_coords = sampler->normalized_coords; 169 170 /* 171 * FIXME: Handle the remainder of pipe_sampler_view. 172 */ 173} 174 175 176/** 177 * Generate code to compute coordinate gradient (rho). 178 * \param ddx partial derivatives of (s, t, r, q) with respect to X 179 * \param ddy partial derivatives of (s, t, r, q) with respect to Y 180 * 181 * XXX: The resulting rho is scalar, so we ignore all but the first element of 182 * derivatives that are passed by the shader. 183 */ 184static LLVMValueRef 185lp_build_rho(struct lp_build_sample_context *bld, 186 unsigned unit, 187 const LLVMValueRef ddx[4], 188 const LLVMValueRef ddy[4]) 189{ 190 struct lp_build_context *int_size_bld = &bld->int_size_bld; 191 struct lp_build_context *float_size_bld = &bld->float_size_bld; 192 struct lp_build_context *float_bld = &bld->float_bld; 193 const unsigned dims = bld->dims; 194 LLVMBuilderRef builder = bld->gallivm->builder; 195 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); 196 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0); 197 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0); 198 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0); 199 LLVMValueRef dsdx, dsdy, dtdx, dtdy, drdx, drdy; 200 LLVMValueRef rho_x, rho_y; 201 LLVMValueRef rho_vec; 202 LLVMValueRef int_size, float_size; 203 LLVMValueRef rho; 204 LLVMValueRef first_level, first_level_vec; 205 206 dsdx = ddx[0]; 207 dsdy = ddy[0]; 208 209 if (dims <= 1) { 210 rho_x = dsdx; 211 rho_y = dsdy; 212 } 213 else { 214 rho_x = float_size_bld->undef; 215 rho_y = float_size_bld->undef; 216 217 rho_x = LLVMBuildInsertElement(builder, rho_x, dsdx, index0, ""); 218 rho_y = LLVMBuildInsertElement(builder, rho_y, dsdy, index0, ""); 219 220 dtdx = ddx[1]; 221 dtdy = ddy[1]; 222 223 rho_x = LLVMBuildInsertElement(builder, rho_x, dtdx, index1, ""); 224 rho_y = LLVMBuildInsertElement(builder, rho_y, dtdy, index1, ""); 225 226 if (dims >= 3) { 227 drdx = ddx[2]; 228 drdy = ddy[2]; 229 230 rho_x = LLVMBuildInsertElement(builder, rho_x, drdx, index2, ""); 231 rho_y = LLVMBuildInsertElement(builder, rho_y, drdy, index2, ""); 232 } 233 } 234 235 rho_x = lp_build_abs(float_size_bld, rho_x); 236 rho_y = lp_build_abs(float_size_bld, rho_y); 237 238 rho_vec = lp_build_max(float_size_bld, rho_x, rho_y); 239 240 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 241 bld->gallivm, unit); 242 first_level_vec = lp_build_broadcast_scalar(&bld->int_size_bld, first_level); 243 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec); 244 float_size = lp_build_int_to_float(float_size_bld, int_size); 245 246 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size); 247 248 if (dims <= 1) { 249 rho = rho_vec; 250 } 251 else { 252 if (dims >= 2) { 253 LLVMValueRef rho_s, rho_t, rho_r; 254 255 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, ""); 256 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, ""); 257 258 rho = lp_build_max(float_bld, rho_s, rho_t); 259 if (dims >= 3) { 260 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, ""); 261 rho = lp_build_max(float_bld, rho, rho_r); 262 } 263 } 264 } 265 266 return rho; 267} 268 269 270/* 271 * Bri-linear lod computation 272 * 273 * Use a piece-wise linear approximation of log2 such that: 274 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc. 275 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc, 276 * with the steepness specified in 'factor' 277 * - exact result for 0.5, 1.5, etc. 278 * 279 * 280 * 1.0 - /----* 281 * / 282 * / 283 * / 284 * 0.5 - * 285 * / 286 * / 287 * / 288 * 0.0 - *----/ 289 * 290 * | | 291 * 2^0 2^1 292 * 293 * This is a technique also commonly used in hardware: 294 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html 295 * 296 * TODO: For correctness, this should only be applied when texture is known to 297 * have regular mipmaps, i.e., mipmaps derived from the base level. 298 * 299 * TODO: This could be done in fixed point, where applicable. 300 */ 301static void 302lp_build_brilinear_lod(struct lp_build_context *bld, 303 LLVMValueRef lod, 304 double factor, 305 LLVMValueRef *out_lod_ipart, 306 LLVMValueRef *out_lod_fpart) 307{ 308 LLVMValueRef lod_fpart; 309 double pre_offset = (factor - 0.5)/factor - 0.5; 310 double post_offset = 1 - factor; 311 312 if (0) { 313 lp_build_printf(bld->gallivm, "lod = %f\n", lod); 314 } 315 316 lod = lp_build_add(bld, lod, 317 lp_build_const_vec(bld->gallivm, bld->type, pre_offset)); 318 319 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart); 320 321 lod_fpart = lp_build_mul(bld, lod_fpart, 322 lp_build_const_vec(bld->gallivm, bld->type, factor)); 323 324 lod_fpart = lp_build_add(bld, lod_fpart, 325 lp_build_const_vec(bld->gallivm, bld->type, post_offset)); 326 327 /* 328 * It's not necessary to clamp lod_fpart since: 329 * - the above expression will never produce numbers greater than one. 330 * - the mip filtering branch is only taken if lod_fpart is positive 331 */ 332 333 *out_lod_fpart = lod_fpart; 334 335 if (0) { 336 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart); 337 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart); 338 } 339} 340 341 342/* 343 * Combined log2 and brilinear lod computation. 344 * 345 * It's in all identical to calling lp_build_fast_log2() and 346 * lp_build_brilinear_lod() above, but by combining we can compute the integer 347 * and fractional part independently. 348 */ 349static void 350lp_build_brilinear_rho(struct lp_build_context *bld, 351 LLVMValueRef rho, 352 double factor, 353 LLVMValueRef *out_lod_ipart, 354 LLVMValueRef *out_lod_fpart) 355{ 356 LLVMValueRef lod_ipart; 357 LLVMValueRef lod_fpart; 358 359 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor); 360 const double post_offset = 1 - 2*factor; 361 362 assert(bld->type.floating); 363 364 assert(lp_check_value(bld->type, rho)); 365 366 /* 367 * The pre factor will make the intersections with the exact powers of two 368 * happen precisely where we want then to be, which means that the integer 369 * part will not need any post adjustments. 370 */ 371 rho = lp_build_mul(bld, rho, 372 lp_build_const_vec(bld->gallivm, bld->type, pre_factor)); 373 374 /* ipart = ifloor(log2(rho)) */ 375 lod_ipart = lp_build_extract_exponent(bld, rho, 0); 376 377 /* fpart = rho / 2**ipart */ 378 lod_fpart = lp_build_extract_mantissa(bld, rho); 379 380 lod_fpart = lp_build_mul(bld, lod_fpart, 381 lp_build_const_vec(bld->gallivm, bld->type, factor)); 382 383 lod_fpart = lp_build_add(bld, lod_fpart, 384 lp_build_const_vec(bld->gallivm, bld->type, post_offset)); 385 386 /* 387 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since: 388 * - the above expression will never produce numbers greater than one. 389 * - the mip filtering branch is only taken if lod_fpart is positive 390 */ 391 392 *out_lod_ipart = lod_ipart; 393 *out_lod_fpart = lod_fpart; 394} 395 396 397/** 398 * Generate code to compute texture level of detail (lambda). 399 * \param ddx partial derivatives of (s, t, r, q) with respect to X 400 * \param ddy partial derivatives of (s, t, r, q) with respect to Y 401 * \param lod_bias optional float vector with the shader lod bias 402 * \param explicit_lod optional float vector with the explicit lod 403 * \param width scalar int texture width 404 * \param height scalar int texture height 405 * \param depth scalar int texture depth 406 * 407 * XXX: The resulting lod is scalar, so ignore all but the first element of 408 * derivatives, lod_bias, etc that are passed by the shader. 409 */ 410void 411lp_build_lod_selector(struct lp_build_sample_context *bld, 412 unsigned unit, 413 const LLVMValueRef ddx[4], 414 const LLVMValueRef ddy[4], 415 LLVMValueRef lod_bias, /* optional */ 416 LLVMValueRef explicit_lod, /* optional */ 417 unsigned mip_filter, 418 LLVMValueRef *out_lod_ipart, 419 LLVMValueRef *out_lod_fpart) 420 421{ 422 LLVMBuilderRef builder = bld->gallivm->builder; 423 struct lp_build_context *float_bld = &bld->float_bld; 424 LLVMValueRef lod; 425 426 *out_lod_ipart = bld->int_bld.zero; 427 *out_lod_fpart = bld->float_bld.zero; 428 429 if (bld->static_state->min_max_lod_equal) { 430 /* User is forcing sampling from a particular mipmap level. 431 * This is hit during mipmap generation. 432 */ 433 LLVMValueRef min_lod = 434 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit); 435 436 lod = min_lod; 437 } 438 else { 439 LLVMValueRef sampler_lod_bias = 440 bld->dynamic_state->lod_bias(bld->dynamic_state, bld->gallivm, unit); 441 LLVMValueRef index0 = lp_build_const_int32(bld->gallivm, 0); 442 443 if (explicit_lod) { 444 lod = LLVMBuildExtractElement(builder, explicit_lod, 445 index0, ""); 446 } 447 else { 448 LLVMValueRef rho; 449 450 rho = lp_build_rho(bld, unit, ddx, ddy); 451 452 /* 453 * Compute lod = log2(rho) 454 */ 455 456 if (!lod_bias && 457 !bld->static_state->lod_bias_non_zero && 458 !bld->static_state->apply_max_lod && 459 !bld->static_state->apply_min_lod) { 460 /* 461 * Special case when there are no post-log2 adjustments, which 462 * saves instructions but keeping the integer and fractional lod 463 * computations separate from the start. 464 */ 465 466 if (mip_filter == PIPE_TEX_MIPFILTER_NONE || 467 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) { 468 *out_lod_ipart = lp_build_ilog2(float_bld, rho); 469 *out_lod_fpart = bld->float_bld.zero; 470 return; 471 } 472 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR && 473 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { 474 lp_build_brilinear_rho(float_bld, rho, BRILINEAR_FACTOR, 475 out_lod_ipart, out_lod_fpart); 476 return; 477 } 478 } 479 480 if (0) { 481 lod = lp_build_log2(float_bld, rho); 482 } 483 else { 484 lod = lp_build_fast_log2(float_bld, rho); 485 } 486 487 /* add shader lod bias */ 488 if (lod_bias) { 489 lod_bias = LLVMBuildExtractElement(builder, lod_bias, 490 index0, ""); 491 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias"); 492 } 493 } 494 495 /* add sampler lod bias */ 496 if (bld->static_state->lod_bias_non_zero) 497 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias"); 498 499 500 /* clamp lod */ 501 if (bld->static_state->apply_max_lod) { 502 LLVMValueRef max_lod = 503 bld->dynamic_state->max_lod(bld->dynamic_state, bld->gallivm, unit); 504 505 lod = lp_build_min(float_bld, lod, max_lod); 506 } 507 if (bld->static_state->apply_min_lod) { 508 LLVMValueRef min_lod = 509 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit); 510 511 lod = lp_build_max(float_bld, lod, min_lod); 512 } 513 } 514 515 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { 516 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { 517 lp_build_brilinear_lod(float_bld, lod, BRILINEAR_FACTOR, 518 out_lod_ipart, out_lod_fpart); 519 } 520 else { 521 lp_build_ifloor_fract(float_bld, lod, out_lod_ipart, out_lod_fpart); 522 } 523 524 lp_build_name(*out_lod_fpart, "lod_fpart"); 525 } 526 else { 527 *out_lod_ipart = lp_build_iround(float_bld, lod); 528 } 529 530 lp_build_name(*out_lod_ipart, "lod_ipart"); 531 532 return; 533} 534 535 536/** 537 * For PIPE_TEX_MIPFILTER_NEAREST, convert float LOD to integer 538 * mipmap level index. 539 * Note: this is all scalar code. 540 * \param lod scalar float texture level of detail 541 * \param level_out returns integer 542 */ 543void 544lp_build_nearest_mip_level(struct lp_build_sample_context *bld, 545 unsigned unit, 546 LLVMValueRef lod_ipart, 547 LLVMValueRef *level_out) 548{ 549 struct lp_build_context *int_bld = &bld->int_bld; 550 LLVMValueRef first_level, last_level, level; 551 552 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 553 bld->gallivm, unit); 554 last_level = bld->dynamic_state->last_level(bld->dynamic_state, 555 bld->gallivm, unit); 556 557 /* convert float lod to integer */ 558 level = lp_build_add(int_bld, lod_ipart, first_level); 559 560 /* clamp level to legal range of levels */ 561 *level_out = lp_build_clamp(int_bld, level, first_level, last_level); 562} 563 564 565/** 566 * For PIPE_TEX_MIPFILTER_LINEAR, convert float LOD to integer to 567 * two (adjacent) mipmap level indexes. Later, we'll sample from those 568 * two mipmap levels and interpolate between them. 569 */ 570void 571lp_build_linear_mip_levels(struct lp_build_sample_context *bld, 572 unsigned unit, 573 LLVMValueRef lod_ipart, 574 LLVMValueRef *lod_fpart_inout, 575 LLVMValueRef *level0_out, 576 LLVMValueRef *level1_out) 577{ 578 LLVMBuilderRef builder = bld->gallivm->builder; 579 struct lp_build_context *int_bld = &bld->int_bld; 580 struct lp_build_context *float_bld = &bld->float_bld; 581 LLVMValueRef first_level, last_level; 582 LLVMValueRef clamp_min; 583 LLVMValueRef clamp_max; 584 585 first_level = bld->dynamic_state->first_level(bld->dynamic_state, 586 bld->gallivm, unit); 587 588 *level0_out = lp_build_add(int_bld, lod_ipart, first_level); 589 *level1_out = lp_build_add(int_bld, *level0_out, int_bld->one); 590 591 last_level = bld->dynamic_state->last_level(bld->dynamic_state, 592 bld->gallivm, unit); 593 594 /* 595 * Clamp both *level0_out and *level1_out to [first_level, last_level], with 596 * the minimum number of comparisons, and zeroing lod_fpart in the extreme 597 * ends in the process. 598 */ 599 600 /* *level0_out < first_level */ 601 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT, 602 *level0_out, first_level, 603 "clamp_lod_to_first"); 604 605 *level0_out = LLVMBuildSelect(builder, clamp_min, 606 first_level, *level0_out, ""); 607 608 *level1_out = LLVMBuildSelect(builder, clamp_min, 609 first_level, *level1_out, ""); 610 611 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min, 612 float_bld->zero, *lod_fpart_inout, ""); 613 614 /* *level0_out >= last_level */ 615 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE, 616 *level0_out, last_level, 617 "clamp_lod_to_last"); 618 619 *level0_out = LLVMBuildSelect(builder, clamp_max, 620 last_level, *level0_out, ""); 621 622 *level1_out = LLVMBuildSelect(builder, clamp_max, 623 last_level, *level1_out, ""); 624 625 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max, 626 float_bld->zero, *lod_fpart_inout, ""); 627 628 lp_build_name(*level0_out, "sampler%u_miplevel0", unit); 629 lp_build_name(*level1_out, "sampler%u_miplevel1", unit); 630 lp_build_name(*lod_fpart_inout, "sampler%u_mipweight", unit); 631} 632 633 634/** 635 * Return pointer to a single mipmap level. 636 * \param data_array array of pointers to mipmap levels 637 * \param level integer mipmap level 638 */ 639LLVMValueRef 640lp_build_get_mipmap_level(struct lp_build_sample_context *bld, 641 LLVMValueRef level) 642{ 643 LLVMBuilderRef builder = bld->gallivm->builder; 644 LLVMValueRef indexes[2], data_ptr; 645 646 indexes[0] = lp_build_const_int32(bld->gallivm, 0); 647 indexes[1] = level; 648 data_ptr = LLVMBuildGEP(builder, bld->data_array, indexes, 2, ""); 649 data_ptr = LLVMBuildLoad(builder, data_ptr, ""); 650 return data_ptr; 651} 652 653 654LLVMValueRef 655lp_build_get_const_mipmap_level(struct lp_build_sample_context *bld, 656 int level) 657{ 658 LLVMValueRef lvl = lp_build_const_int32(bld->gallivm, level); 659 return lp_build_get_mipmap_level(bld, lvl); 660} 661 662 663/** 664 * Codegen equivalent for u_minify(). 665 * Return max(1, base_size >> level); 666 */ 667LLVMValueRef 668lp_build_minify(struct lp_build_context *bld, 669 LLVMValueRef base_size, 670 LLVMValueRef level) 671{ 672 LLVMBuilderRef builder = bld->gallivm->builder; 673 assert(lp_check_value(bld->type, base_size)); 674 assert(lp_check_value(bld->type, level)); 675 676 if (level == bld->zero) { 677 /* if we're using mipmap level zero, no minification is needed */ 678 return base_size; 679 } 680 else { 681 LLVMValueRef size = 682 LLVMBuildLShr(builder, base_size, level, "minify"); 683 assert(bld->type.sign); 684 size = lp_build_max(bld, size, bld->one); 685 return size; 686 } 687} 688 689 690/** 691 * Dereference stride_array[mipmap_level] array to get a stride. 692 * Return stride as a vector. 693 */ 694static LLVMValueRef 695lp_build_get_level_stride_vec(struct lp_build_sample_context *bld, 696 LLVMValueRef stride_array, LLVMValueRef level) 697{ 698 LLVMBuilderRef builder = bld->gallivm->builder; 699 LLVMValueRef indexes[2], stride; 700 indexes[0] = lp_build_const_int32(bld->gallivm, 0); 701 indexes[1] = level; 702 stride = LLVMBuildGEP(builder, stride_array, indexes, 2, ""); 703 stride = LLVMBuildLoad(builder, stride, ""); 704 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride); 705 return stride; 706} 707 708 709/** 710 * When sampling a mipmap, we need to compute the width, height, depth 711 * of the source levels from the level indexes. This helper function 712 * does that. 713 */ 714void 715lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, 716 LLVMValueRef ilevel, 717 LLVMValueRef *out_size, 718 LLVMValueRef *row_stride_vec, 719 LLVMValueRef *img_stride_vec) 720{ 721 const unsigned dims = bld->dims; 722 LLVMValueRef ilevel_vec; 723 724 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel); 725 726 /* 727 * Compute width, height, depth at mipmap level 'ilevel' 728 */ 729 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec); 730 731 if (dims >= 2) { 732 *row_stride_vec = lp_build_get_level_stride_vec(bld, 733 bld->row_stride_array, 734 ilevel); 735 if (dims == 3 || bld->static_state->target == PIPE_TEXTURE_CUBE) { 736 *img_stride_vec = lp_build_get_level_stride_vec(bld, 737 bld->img_stride_array, 738 ilevel); 739 } 740 } 741} 742 743 744/** 745 * Extract and broadcast texture size. 746 * 747 * @param size_type type of the texture size vector (either 748 * bld->int_size_type or bld->float_size_type) 749 * @param coord_type type of the texture size vector (either 750 * bld->int_coord_type or bld->coord_type) 751 * @param int_size vector with the integer texture size (width, height, 752 * depth) 753 */ 754void 755lp_build_extract_image_sizes(struct lp_build_sample_context *bld, 756 struct lp_type size_type, 757 struct lp_type coord_type, 758 LLVMValueRef size, 759 LLVMValueRef *out_width, 760 LLVMValueRef *out_height, 761 LLVMValueRef *out_depth) 762{ 763 const unsigned dims = bld->dims; 764 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); 765 766 *out_width = lp_build_extract_broadcast(bld->gallivm, 767 size_type, 768 coord_type, 769 size, 770 LLVMConstInt(i32t, 0, 0)); 771 if (dims >= 2) { 772 *out_height = lp_build_extract_broadcast(bld->gallivm, 773 size_type, 774 coord_type, 775 size, 776 LLVMConstInt(i32t, 1, 0)); 777 if (dims == 3) { 778 *out_depth = lp_build_extract_broadcast(bld->gallivm, 779 size_type, 780 coord_type, 781 size, 782 LLVMConstInt(i32t, 2, 0)); 783 } 784 } 785} 786 787 788/** 789 * Unnormalize coords. 790 * 791 * @param int_size vector with the integer texture size (width, height, depth) 792 */ 793void 794lp_build_unnormalized_coords(struct lp_build_sample_context *bld, 795 LLVMValueRef flt_size, 796 LLVMValueRef *s, 797 LLVMValueRef *t, 798 LLVMValueRef *r) 799{ 800 const unsigned dims = bld->dims; 801 LLVMValueRef width; 802 LLVMValueRef height; 803 LLVMValueRef depth; 804 805 lp_build_extract_image_sizes(bld, 806 bld->float_size_type, 807 bld->coord_type, 808 flt_size, 809 &width, 810 &height, 811 &depth); 812 813 /* s = s * width, t = t * height */ 814 *s = lp_build_mul(&bld->coord_bld, *s, width); 815 if (dims >= 2) { 816 *t = lp_build_mul(&bld->coord_bld, *t, height); 817 if (dims >= 3) { 818 *r = lp_build_mul(&bld->coord_bld, *r, depth); 819 } 820 } 821} 822 823 824/** Helper used by lp_build_cube_lookup() */ 825static LLVMValueRef 826lp_build_cube_ima(struct lp_build_context *coord_bld, LLVMValueRef coord) 827{ 828 /* ima = -0.5 / abs(coord); */ 829 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5); 830 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord); 831 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord); 832 return ima; 833} 834 835 836/** 837 * Helper used by lp_build_cube_lookup() 838 * \param sign scalar +1 or -1 839 * \param coord float vector 840 * \param ima float vector 841 */ 842static LLVMValueRef 843lp_build_cube_coord(struct lp_build_context *coord_bld, 844 LLVMValueRef sign, int negate_coord, 845 LLVMValueRef coord, LLVMValueRef ima) 846{ 847 /* return negate(coord) * ima * sign + 0.5; */ 848 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5); 849 LLVMValueRef res; 850 851 assert(negate_coord == +1 || negate_coord == -1); 852 853 if (negate_coord == -1) { 854 coord = lp_build_negate(coord_bld, coord); 855 } 856 857 res = lp_build_mul(coord_bld, coord, ima); 858 if (sign) { 859 sign = lp_build_broadcast_scalar(coord_bld, sign); 860 res = lp_build_mul(coord_bld, res, sign); 861 } 862 res = lp_build_add(coord_bld, res, half); 863 864 return res; 865} 866 867 868/** Helper used by lp_build_cube_lookup() 869 * Return (major_coord >= 0) ? pos_face : neg_face; 870 */ 871static LLVMValueRef 872lp_build_cube_face(struct lp_build_sample_context *bld, 873 LLVMValueRef major_coord, 874 unsigned pos_face, unsigned neg_face) 875{ 876 struct gallivm_state *gallivm = bld->gallivm; 877 LLVMBuilderRef builder = gallivm->builder; 878 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE, 879 major_coord, 880 bld->float_bld.zero, ""); 881 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face); 882 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face); 883 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, ""); 884 return res; 885} 886 887 888 889/** 890 * Generate code to do cube face selection and compute per-face texcoords. 891 */ 892void 893lp_build_cube_lookup(struct lp_build_sample_context *bld, 894 LLVMValueRef s, 895 LLVMValueRef t, 896 LLVMValueRef r, 897 LLVMValueRef *face, 898 LLVMValueRef *face_s, 899 LLVMValueRef *face_t) 900{ 901 struct lp_build_context *float_bld = &bld->float_bld; 902 struct lp_build_context *coord_bld = &bld->coord_bld; 903 LLVMBuilderRef builder = bld->gallivm->builder; 904 LLVMValueRef rx, ry, rz; 905 LLVMValueRef arx, ary, arz; 906 LLVMValueRef c25 = lp_build_const_float(bld->gallivm, 0.25); 907 LLVMValueRef arx_ge_ary, arx_ge_arz; 908 LLVMValueRef ary_ge_arx, ary_ge_arz; 909 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz; 910 911 assert(bld->coord_bld.type.length == 4); 912 913 /* 914 * Use the average of the four pixel's texcoords to choose the face. 915 */ 916 rx = lp_build_mul(float_bld, c25, 917 lp_build_sum_vector(&bld->coord_bld, s)); 918 ry = lp_build_mul(float_bld, c25, 919 lp_build_sum_vector(&bld->coord_bld, t)); 920 rz = lp_build_mul(float_bld, c25, 921 lp_build_sum_vector(&bld->coord_bld, r)); 922 923 arx = lp_build_abs(float_bld, rx); 924 ary = lp_build_abs(float_bld, ry); 925 arz = lp_build_abs(float_bld, rz); 926 927 /* 928 * Compare sign/magnitude of rx,ry,rz to determine face 929 */ 930 arx_ge_ary = LLVMBuildFCmp(builder, LLVMRealUGE, arx, ary, ""); 931 arx_ge_arz = LLVMBuildFCmp(builder, LLVMRealUGE, arx, arz, ""); 932 ary_ge_arx = LLVMBuildFCmp(builder, LLVMRealUGE, ary, arx, ""); 933 ary_ge_arz = LLVMBuildFCmp(builder, LLVMRealUGE, ary, arz, ""); 934 935 arx_ge_ary_arz = LLVMBuildAnd(builder, arx_ge_ary, arx_ge_arz, ""); 936 ary_ge_arx_arz = LLVMBuildAnd(builder, ary_ge_arx, ary_ge_arz, ""); 937 938 { 939 struct lp_build_if_state if_ctx; 940 LLVMValueRef face_s_var; 941 LLVMValueRef face_t_var; 942 LLVMValueRef face_var; 943 944 face_s_var = lp_build_alloca(bld->gallivm, bld->coord_bld.vec_type, "face_s_var"); 945 face_t_var = lp_build_alloca(bld->gallivm, bld->coord_bld.vec_type, "face_t_var"); 946 face_var = lp_build_alloca(bld->gallivm, bld->int_bld.vec_type, "face_var"); 947 948 lp_build_if(&if_ctx, bld->gallivm, arx_ge_ary_arz); 949 { 950 /* +/- X face */ 951 LLVMValueRef sign = lp_build_sgn(float_bld, rx); 952 LLVMValueRef ima = lp_build_cube_ima(coord_bld, s); 953 *face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima); 954 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); 955 *face = lp_build_cube_face(bld, rx, 956 PIPE_TEX_FACE_POS_X, 957 PIPE_TEX_FACE_NEG_X); 958 LLVMBuildStore(builder, *face_s, face_s_var); 959 LLVMBuildStore(builder, *face_t, face_t_var); 960 LLVMBuildStore(builder, *face, face_var); 961 } 962 lp_build_else(&if_ctx); 963 { 964 struct lp_build_if_state if_ctx2; 965 966 lp_build_if(&if_ctx2, bld->gallivm, ary_ge_arx_arz); 967 { 968 /* +/- Y face */ 969 LLVMValueRef sign = lp_build_sgn(float_bld, ry); 970 LLVMValueRef ima = lp_build_cube_ima(coord_bld, t); 971 *face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima); 972 *face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima); 973 *face = lp_build_cube_face(bld, ry, 974 PIPE_TEX_FACE_POS_Y, 975 PIPE_TEX_FACE_NEG_Y); 976 LLVMBuildStore(builder, *face_s, face_s_var); 977 LLVMBuildStore(builder, *face_t, face_t_var); 978 LLVMBuildStore(builder, *face, face_var); 979 } 980 lp_build_else(&if_ctx2); 981 { 982 /* +/- Z face */ 983 LLVMValueRef sign = lp_build_sgn(float_bld, rz); 984 LLVMValueRef ima = lp_build_cube_ima(coord_bld, r); 985 *face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima); 986 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); 987 *face = lp_build_cube_face(bld, rz, 988 PIPE_TEX_FACE_POS_Z, 989 PIPE_TEX_FACE_NEG_Z); 990 LLVMBuildStore(builder, *face_s, face_s_var); 991 LLVMBuildStore(builder, *face_t, face_t_var); 992 LLVMBuildStore(builder, *face, face_var); 993 } 994 lp_build_endif(&if_ctx2); 995 } 996 997 lp_build_endif(&if_ctx); 998 999 *face_s = LLVMBuildLoad(builder, face_s_var, "face_s"); 1000 *face_t = LLVMBuildLoad(builder, face_t_var, "face_t"); 1001 *face = LLVMBuildLoad(builder, face_var, "face"); 1002 } 1003} 1004 1005 1006/** 1007 * Compute the partial offset of a pixel block along an arbitrary axis. 1008 * 1009 * @param coord coordinate in pixels 1010 * @param stride number of bytes between rows of successive pixel blocks 1011 * @param block_length number of pixels in a pixels block along the coordinate 1012 * axis 1013 * @param out_offset resulting relative offset of the pixel block in bytes 1014 * @param out_subcoord resulting sub-block pixel coordinate 1015 */ 1016void 1017lp_build_sample_partial_offset(struct lp_build_context *bld, 1018 unsigned block_length, 1019 LLVMValueRef coord, 1020 LLVMValueRef stride, 1021 LLVMValueRef *out_offset, 1022 LLVMValueRef *out_subcoord) 1023{ 1024 LLVMBuilderRef builder = bld->gallivm->builder; 1025 LLVMValueRef offset; 1026 LLVMValueRef subcoord; 1027 1028 if (block_length == 1) { 1029 subcoord = bld->zero; 1030 } 1031 else { 1032 /* 1033 * Pixel blocks have power of two dimensions. LLVM should convert the 1034 * rem/div to bit arithmetic. 1035 * TODO: Verify this. 1036 * It does indeed BUT it does transform it to scalar (and back) when doing so 1037 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7). 1038 * The generated code looks seriously unfunny and is quite expensive. 1039 */ 1040#if 0 1041 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length); 1042 subcoord = LLVMBuildURem(builder, coord, block_width, ""); 1043 coord = LLVMBuildUDiv(builder, coord, block_width, ""); 1044#else 1045 unsigned logbase2 = util_logbase2(block_length); 1046 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2); 1047 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1); 1048 subcoord = LLVMBuildAnd(builder, coord, block_mask, ""); 1049 coord = LLVMBuildLShr(builder, coord, block_shift, ""); 1050#endif 1051 } 1052 1053 offset = lp_build_mul(bld, coord, stride); 1054 1055 assert(out_offset); 1056 assert(out_subcoord); 1057 1058 *out_offset = offset; 1059 *out_subcoord = subcoord; 1060} 1061 1062 1063/** 1064 * Compute the offset of a pixel block. 1065 * 1066 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels. 1067 * 1068 * Returns the relative offset and i,j sub-block coordinates 1069 */ 1070void 1071lp_build_sample_offset(struct lp_build_context *bld, 1072 const struct util_format_description *format_desc, 1073 LLVMValueRef x, 1074 LLVMValueRef y, 1075 LLVMValueRef z, 1076 LLVMValueRef y_stride, 1077 LLVMValueRef z_stride, 1078 LLVMValueRef *out_offset, 1079 LLVMValueRef *out_i, 1080 LLVMValueRef *out_j) 1081{ 1082 LLVMValueRef x_stride; 1083 LLVMValueRef offset; 1084 1085 x_stride = lp_build_const_vec(bld->gallivm, bld->type, 1086 format_desc->block.bits/8); 1087 1088 lp_build_sample_partial_offset(bld, 1089 format_desc->block.width, 1090 x, x_stride, 1091 &offset, out_i); 1092 1093 if (y && y_stride) { 1094 LLVMValueRef y_offset; 1095 lp_build_sample_partial_offset(bld, 1096 format_desc->block.height, 1097 y, y_stride, 1098 &y_offset, out_j); 1099 offset = lp_build_add(bld, offset, y_offset); 1100 } 1101 else { 1102 *out_j = bld->zero; 1103 } 1104 1105 if (z && z_stride) { 1106 LLVMValueRef z_offset; 1107 LLVMValueRef k; 1108 lp_build_sample_partial_offset(bld, 1109 1, /* pixel blocks are always 2D */ 1110 z, z_stride, 1111 &z_offset, &k); 1112 offset = lp_build_add(bld, offset, z_offset); 1113 } 1114 1115 *out_offset = offset; 1116} 1117