lp_bld_sample.c revision f7af4beae5e25c060d4f2c53d55b0e87ee9bdaeb
1/**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35#include "pipe/p_defines.h"
36#include "pipe/p_state.h"
37#include "util/u_format.h"
38#include "util/u_math.h"
39#include "lp_bld_arit.h"
40#include "lp_bld_const.h"
41#include "lp_bld_debug.h"
42#include "lp_bld_printf.h"
43#include "lp_bld_flow.h"
44#include "lp_bld_sample.h"
45#include "lp_bld_swizzle.h"
46#include "lp_bld_type.h"
47#include "lp_bld_logic.h"
48#include "lp_bld_pack.h"
49
50
51/*
52 * Bri-linear factor. Should be greater than one.
53 */
54#define BRILINEAR_FACTOR 2
55
56/**
57 * Does the given texture wrap mode allow sampling the texture border color?
58 * XXX maybe move this into gallium util code.
59 */
60boolean
61lp_sampler_wrap_mode_uses_border_color(unsigned mode,
62                                       unsigned min_img_filter,
63                                       unsigned mag_img_filter)
64{
65   switch (mode) {
66   case PIPE_TEX_WRAP_REPEAT:
67   case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
68   case PIPE_TEX_WRAP_MIRROR_REPEAT:
69   case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
70      return FALSE;
71   case PIPE_TEX_WRAP_CLAMP:
72   case PIPE_TEX_WRAP_MIRROR_CLAMP:
73      if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
74          mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
75         return FALSE;
76      } else {
77         return TRUE;
78      }
79   case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
80   case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
81      return TRUE;
82   default:
83      assert(0 && "unexpected wrap mode");
84      return FALSE;
85   }
86}
87
88
89/**
90 * Initialize lp_sampler_static_state object with the gallium sampler
91 * and texture state.
92 * The former is considered to be static and the later dynamic.
93 */
94void
95lp_sampler_static_state(struct lp_sampler_static_state *state,
96                        const struct pipe_sampler_view *view,
97                        const struct pipe_sampler_state *sampler)
98{
99   const struct pipe_resource *texture;
100
101   memset(state, 0, sizeof *state);
102
103   if (!sampler || !view || !view->texture)
104      return;
105
106   texture = view->texture;
107
108   /*
109    * We don't copy sampler state over unless it is actually enabled, to avoid
110    * spurious recompiles, as the sampler static state is part of the shader
111    * key.
112    *
113    * Ideally the state tracker or cso_cache module would make all state
114    * canonical, but until that happens it's better to be safe than sorry here.
115    *
116    * XXX: Actually there's much more than can be done here, especially
117    * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
118    */
119
120   state->format            = view->format;
121   state->swizzle_r         = view->swizzle_r;
122   state->swizzle_g         = view->swizzle_g;
123   state->swizzle_b         = view->swizzle_b;
124   state->swizzle_a         = view->swizzle_a;
125
126   state->target            = texture->target;
127   state->pot_width         = util_is_power_of_two(texture->width0);
128   state->pot_height        = util_is_power_of_two(texture->height0);
129   state->pot_depth         = util_is_power_of_two(texture->depth0);
130
131   state->wrap_s            = sampler->wrap_s;
132   state->wrap_t            = sampler->wrap_t;
133   state->wrap_r            = sampler->wrap_r;
134   state->min_img_filter    = sampler->min_img_filter;
135   state->mag_img_filter    = sampler->mag_img_filter;
136
137   if (view->u.tex.last_level && sampler->max_lod > 0.0f) {
138      state->min_mip_filter = sampler->min_mip_filter;
139   } else {
140      state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
141   }
142
143   if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
144      if (sampler->lod_bias != 0.0f) {
145         state->lod_bias_non_zero = 1;
146      }
147
148      /* If min_lod == max_lod we can greatly simplify mipmap selection.
149       * This is a case that occurs during automatic mipmap generation.
150       */
151      if (sampler->min_lod == sampler->max_lod) {
152         state->min_max_lod_equal = 1;
153      } else {
154         if (sampler->min_lod > 0.0f) {
155            state->apply_min_lod = 1;
156         }
157
158         if (sampler->max_lod < (float)view->u.tex.last_level) {
159            state->apply_max_lod = 1;
160         }
161      }
162   }
163
164   state->compare_mode      = sampler->compare_mode;
165   if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
166      state->compare_func   = sampler->compare_func;
167   }
168
169   state->normalized_coords = sampler->normalized_coords;
170
171   /*
172    * FIXME: Handle the remainder of pipe_sampler_view.
173    */
174}
175
176
177/**
178 * Generate code to compute coordinate gradient (rho).
179 * \param derivs  partial derivatives of (s, t, r, q) with respect to X and Y
180 *
181 * The resulting rho is scalar per quad.
182 */
183static LLVMValueRef
184lp_build_rho(struct lp_build_sample_context *bld,
185             unsigned unit,
186             const struct lp_derivatives *derivs)
187{
188   struct gallivm_state *gallivm = bld->gallivm;
189   struct lp_build_context *int_size_bld = &bld->int_size_bld;
190   struct lp_build_context *float_size_bld = &bld->float_size_bld;
191   struct lp_build_context *float_bld = &bld->float_bld;
192   struct lp_build_context *coord_bld = &bld->coord_bld;
193   struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
194   const LLVMValueRef *ddx_ddy = derivs->ddx_ddy;
195   const unsigned dims = bld->dims;
196   LLVMBuilderRef builder = bld->gallivm->builder;
197   LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
198   LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
199   LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
200   LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
201   LLVMValueRef rho_vec;
202   LLVMValueRef int_size, float_size;
203   LLVMValueRef rho;
204   LLVMValueRef first_level, first_level_vec;
205   LLVMValueRef abs_ddx_ddy[2];
206   unsigned length = coord_bld->type.length;
207   unsigned num_quads = length / 4;
208   unsigned i;
209   LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
210   LLVMValueRef rho_xvec, rho_yvec;
211
212   abs_ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
213   if (dims > 2) {
214      abs_ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
215   }
216   else {
217      abs_ddx_ddy[1] = NULL;
218   }
219
220   if (dims == 1) {
221      static const unsigned char swizzle1[] = {
222         0, LP_BLD_SWIZZLE_DONTCARE,
223         LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
224      };
225      static const unsigned char swizzle2[] = {
226         1, LP_BLD_SWIZZLE_DONTCARE,
227         LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
228      };
229      rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1);
230      rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2);
231   }
232   else if (dims == 2) {
233      static const unsigned char swizzle1[] = {
234         0, 2,
235         LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
236      };
237      static const unsigned char swizzle2[] = {
238         1, 3,
239         LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
240      };
241      rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1);
242      rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2);
243   }
244   else {
245      LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
246      LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
247      assert(dims == 3);
248      for (i = 0; i < num_quads; i++) {
249         shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
250         shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
251         shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
252         shuffles1[4*i + 3] = i32undef;
253         shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
254         shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
255         shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 1);
256         shuffles2[4*i + 3] = i32undef;
257      }
258      rho_xvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1],
259                                        LLVMConstVector(shuffles1, length), "");
260      rho_yvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1],
261                                        LLVMConstVector(shuffles2, length), "");
262   }
263
264   rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
265
266   first_level = bld->dynamic_state->first_level(bld->dynamic_state,
267                                                 bld->gallivm, unit);
268   first_level_vec = lp_build_broadcast_scalar(&bld->int_size_bld, first_level);
269   int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
270   float_size = lp_build_int_to_float(float_size_bld, int_size);
271
272   if (bld->coord_type.length > 4) {
273      /* expand size to each quad */
274      if (dims > 1) {
275         /* could use some broadcast_vector helper for this? */
276         int num_quads = bld->coord_type.length / 4;
277         LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
278         for (i = 0; i < num_quads; i++) {
279            src[i] = float_size;
280         }
281         float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
282      }
283      else {
284         float_size = lp_build_broadcast_scalar(coord_bld, float_size);
285      }
286      rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
287
288      if (dims <= 1) {
289         rho = rho_vec;
290      }
291      else {
292         if (dims >= 2) {
293            static const unsigned char swizzle1[] = {
294               0, LP_BLD_SWIZZLE_DONTCARE,
295               LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
296            };
297            static const unsigned char swizzle2[] = {
298               1, LP_BLD_SWIZZLE_DONTCARE,
299               LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
300            };
301            LLVMValueRef rho_s, rho_t, rho_r;
302
303            rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
304            rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
305
306            rho = lp_build_max(coord_bld, rho_s, rho_t);
307
308            if (dims >= 3) {
309               static const unsigned char swizzle3[] = {
310                  2, LP_BLD_SWIZZLE_DONTCARE,
311                  LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
312               };
313               rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle3);
314               rho = lp_build_max(coord_bld, rho, rho_r);
315            }
316         }
317      }
318      rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
319                                      perquadf_bld->type, rho);
320   }
321   else {
322      if (dims <= 1) {
323         rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
324      }
325      rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
326
327      if (dims <= 1) {
328         rho = rho_vec;
329      }
330      else {
331         if (dims >= 2) {
332            LLVMValueRef rho_s, rho_t, rho_r;
333
334            rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
335            rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
336
337            rho = lp_build_max(float_bld, rho_s, rho_t);
338
339            if (dims >= 3) {
340               rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
341               rho = lp_build_max(float_bld, rho, rho_r);
342            }
343         }
344      }
345   }
346
347   return rho;
348}
349
350
351/*
352 * Bri-linear lod computation
353 *
354 * Use a piece-wise linear approximation of log2 such that:
355 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
356 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
357 *   with the steepness specified in 'factor'
358 * - exact result for 0.5, 1.5, etc.
359 *
360 *
361 *   1.0 -              /----*
362 *                     /
363 *                    /
364 *                   /
365 *   0.5 -          *
366 *                 /
367 *                /
368 *               /
369 *   0.0 - *----/
370 *
371 *         |                 |
372 *        2^0               2^1
373 *
374 * This is a technique also commonly used in hardware:
375 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
376 *
377 * TODO: For correctness, this should only be applied when texture is known to
378 * have regular mipmaps, i.e., mipmaps derived from the base level.
379 *
380 * TODO: This could be done in fixed point, where applicable.
381 */
382static void
383lp_build_brilinear_lod(struct lp_build_context *bld,
384                       LLVMValueRef lod,
385                       double factor,
386                       LLVMValueRef *out_lod_ipart,
387                       LLVMValueRef *out_lod_fpart)
388{
389   LLVMValueRef lod_fpart;
390   double pre_offset = (factor - 0.5)/factor - 0.5;
391   double post_offset = 1 - factor;
392
393   if (0) {
394      lp_build_printf(bld->gallivm, "lod = %f\n", lod);
395   }
396
397   lod = lp_build_add(bld, lod,
398                      lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
399
400   lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
401
402   lod_fpart = lp_build_mul(bld, lod_fpart,
403                            lp_build_const_vec(bld->gallivm, bld->type, factor));
404
405   lod_fpart = lp_build_add(bld, lod_fpart,
406                            lp_build_const_vec(bld->gallivm, bld->type, post_offset));
407
408   /*
409    * It's not necessary to clamp lod_fpart since:
410    * - the above expression will never produce numbers greater than one.
411    * - the mip filtering branch is only taken if lod_fpart is positive
412    */
413
414   *out_lod_fpart = lod_fpart;
415
416   if (0) {
417      lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
418      lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
419   }
420}
421
422
423/*
424 * Combined log2 and brilinear lod computation.
425 *
426 * It's in all identical to calling lp_build_fast_log2() and
427 * lp_build_brilinear_lod() above, but by combining we can compute the integer
428 * and fractional part independently.
429 */
430static void
431lp_build_brilinear_rho(struct lp_build_context *bld,
432                       LLVMValueRef rho,
433                       double factor,
434                       LLVMValueRef *out_lod_ipart,
435                       LLVMValueRef *out_lod_fpart)
436{
437   LLVMValueRef lod_ipart;
438   LLVMValueRef lod_fpart;
439
440   const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
441   const double post_offset = 1 - 2*factor;
442
443   assert(bld->type.floating);
444
445   assert(lp_check_value(bld->type, rho));
446
447   /*
448    * The pre factor will make the intersections with the exact powers of two
449    * happen precisely where we want then to be, which means that the integer
450    * part will not need any post adjustments.
451    */
452   rho = lp_build_mul(bld, rho,
453                      lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
454
455   /* ipart = ifloor(log2(rho)) */
456   lod_ipart = lp_build_extract_exponent(bld, rho, 0);
457
458   /* fpart = rho / 2**ipart */
459   lod_fpart = lp_build_extract_mantissa(bld, rho);
460
461   lod_fpart = lp_build_mul(bld, lod_fpart,
462                            lp_build_const_vec(bld->gallivm, bld->type, factor));
463
464   lod_fpart = lp_build_add(bld, lod_fpart,
465                            lp_build_const_vec(bld->gallivm, bld->type, post_offset));
466
467   /*
468    * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
469    * - the above expression will never produce numbers greater than one.
470    * - the mip filtering branch is only taken if lod_fpart is positive
471    */
472
473   *out_lod_ipart = lod_ipart;
474   *out_lod_fpart = lod_fpart;
475}
476
477
478/**
479 * Generate code to compute texture level of detail (lambda).
480 * \param derivs  partial derivatives of (s, t, r, q) with respect to X and Y
481 * \param lod_bias  optional float vector with the shader lod bias
482 * \param explicit_lod  optional float vector with the explicit lod
483 * \param width  scalar int texture width
484 * \param height  scalar int texture height
485 * \param depth  scalar int texture depth
486 *
487 * The resulting lod is scalar per quad, so only the first value per quad
488 * passed in from lod_bias, explicit_lod is used.
489 */
490void
491lp_build_lod_selector(struct lp_build_sample_context *bld,
492                      unsigned unit,
493                      const struct lp_derivatives *derivs,
494                      LLVMValueRef lod_bias, /* optional */
495                      LLVMValueRef explicit_lod, /* optional */
496                      unsigned mip_filter,
497                      LLVMValueRef *out_lod_ipart,
498                      LLVMValueRef *out_lod_fpart)
499
500{
501   LLVMBuilderRef builder = bld->gallivm->builder;
502   struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
503   LLVMValueRef lod;
504
505   *out_lod_ipart = bld->perquadi_bld.zero;
506   *out_lod_fpart = perquadf_bld->zero;
507
508   if (bld->static_state->min_max_lod_equal) {
509      /* User is forcing sampling from a particular mipmap level.
510       * This is hit during mipmap generation.
511       */
512      LLVMValueRef min_lod =
513         bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);
514
515      lod = lp_build_broadcast_scalar(perquadf_bld, min_lod);
516   }
517   else {
518      if (explicit_lod) {
519         lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
520                                         perquadf_bld->type, explicit_lod);
521      }
522      else {
523         LLVMValueRef rho;
524
525         rho = lp_build_rho(bld, unit, derivs);
526
527         /*
528          * Compute lod = log2(rho)
529          */
530
531         if (!lod_bias &&
532             !bld->static_state->lod_bias_non_zero &&
533             !bld->static_state->apply_max_lod &&
534             !bld->static_state->apply_min_lod) {
535            /*
536             * Special case when there are no post-log2 adjustments, which
537             * saves instructions but keeping the integer and fractional lod
538             * computations separate from the start.
539             */
540
541            if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
542                mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
543               *out_lod_ipart = lp_build_ilog2(perquadf_bld, rho);
544               *out_lod_fpart = perquadf_bld->zero;
545               return;
546            }
547            if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
548                !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
549               lp_build_brilinear_rho(perquadf_bld, rho, BRILINEAR_FACTOR,
550                                      out_lod_ipart, out_lod_fpart);
551               return;
552            }
553         }
554
555         if (0) {
556            lod = lp_build_log2(perquadf_bld, rho);
557         }
558         else {
559            lod = lp_build_fast_log2(perquadf_bld, rho);
560         }
561
562         /* add shader lod bias */
563         if (lod_bias) {
564            lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
565                  perquadf_bld->type, lod_bias);
566            lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
567         }
568      }
569
570      /* add sampler lod bias */
571      if (bld->static_state->lod_bias_non_zero) {
572         LLVMValueRef sampler_lod_bias =
573            bld->dynamic_state->lod_bias(bld->dynamic_state, bld->gallivm, unit);
574         sampler_lod_bias = lp_build_broadcast_scalar(perquadf_bld,
575                                                      sampler_lod_bias);
576         lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
577      }
578
579      /* clamp lod */
580      if (bld->static_state->apply_max_lod) {
581         LLVMValueRef max_lod =
582            bld->dynamic_state->max_lod(bld->dynamic_state, bld->gallivm, unit);
583         max_lod = lp_build_broadcast_scalar(perquadf_bld, max_lod);
584
585         lod = lp_build_min(perquadf_bld, lod, max_lod);
586      }
587      if (bld->static_state->apply_min_lod) {
588         LLVMValueRef min_lod =
589            bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);
590         min_lod = lp_build_broadcast_scalar(perquadf_bld, min_lod);
591
592         lod = lp_build_max(perquadf_bld, lod, min_lod);
593      }
594   }
595
596   if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
597      if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
598         lp_build_brilinear_lod(perquadf_bld, lod, BRILINEAR_FACTOR,
599                                out_lod_ipart, out_lod_fpart);
600      }
601      else {
602         lp_build_ifloor_fract(perquadf_bld, lod, out_lod_ipart, out_lod_fpart);
603      }
604
605      lp_build_name(*out_lod_fpart, "lod_fpart");
606   }
607   else {
608      *out_lod_ipart = lp_build_iround(perquadf_bld, lod);
609   }
610
611   lp_build_name(*out_lod_ipart, "lod_ipart");
612
613   return;
614}
615
616
617/**
618 * For PIPE_TEX_MIPFILTER_NEAREST, convert float LOD to integer
619 * mipmap level index.
620 * Note: this is all scalar per quad code.
621 * \param lod_ipart  int texture level of detail
622 * \param level_out  returns integer
623 */
624void
625lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
626                           unsigned unit,
627                           LLVMValueRef lod_ipart,
628                           LLVMValueRef *level_out)
629{
630   struct lp_build_context *perquadi_bld = &bld->perquadi_bld;
631   LLVMValueRef first_level, last_level, level;
632
633   first_level = bld->dynamic_state->first_level(bld->dynamic_state,
634                                                 bld->gallivm, unit);
635   last_level = bld->dynamic_state->last_level(bld->dynamic_state,
636                                               bld->gallivm, unit);
637   first_level = lp_build_broadcast_scalar(perquadi_bld, first_level);
638   last_level = lp_build_broadcast_scalar(perquadi_bld, last_level);
639
640   level = lp_build_add(perquadi_bld, lod_ipart, first_level);
641
642   /* clamp level to legal range of levels */
643   *level_out = lp_build_clamp(perquadi_bld, level, first_level, last_level);
644}
645
646
647/**
648 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad int LOD(s) to two (per-quad)
649 * (adjacent) mipmap level indexes, and fix up float lod part accordingly.
650 * Later, we'll sample from those two mipmap levels and interpolate between them.
651 */
652void
653lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
654                           unsigned unit,
655                           LLVMValueRef lod_ipart,
656                           LLVMValueRef *lod_fpart_inout,
657                           LLVMValueRef *level0_out,
658                           LLVMValueRef *level1_out)
659{
660   LLVMBuilderRef builder = bld->gallivm->builder;
661   struct lp_build_context *perquadi_bld = &bld->perquadi_bld;
662   struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
663   LLVMValueRef first_level, last_level;
664   LLVMValueRef clamp_min;
665   LLVMValueRef clamp_max;
666
667   first_level = bld->dynamic_state->first_level(bld->dynamic_state,
668                                                 bld->gallivm, unit);
669   last_level = bld->dynamic_state->last_level(bld->dynamic_state,
670                                               bld->gallivm, unit);
671   first_level = lp_build_broadcast_scalar(perquadi_bld, first_level);
672   last_level = lp_build_broadcast_scalar(perquadi_bld, last_level);
673
674   *level0_out = lp_build_add(perquadi_bld, lod_ipart, first_level);
675   *level1_out = lp_build_add(perquadi_bld, *level0_out, perquadi_bld->one);
676
677   /*
678    * Clamp both *level0_out and *level1_out to [first_level, last_level], with
679    * the minimum number of comparisons, and zeroing lod_fpart in the extreme
680    * ends in the process.
681    */
682
683   /*
684    * This code (vector select in particular) only works with llvm 3.1
685    * (if there's more than one quad, with x86 backend). Might consider
686    * converting to our lp_bld_logic helpers.
687    */
688#if HAVE_LLVM < 0x0301
689   assert(perquadi_bld->type.length == 1);
690#endif
691
692   /* *level0_out < first_level */
693   clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
694                             *level0_out, first_level,
695                             "clamp_lod_to_first");
696
697   *level0_out = LLVMBuildSelect(builder, clamp_min,
698                                 first_level, *level0_out, "");
699
700   *level1_out = LLVMBuildSelect(builder, clamp_min,
701                                 first_level, *level1_out, "");
702
703   *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
704                                      perquadf_bld->zero, *lod_fpart_inout, "");
705
706   /* *level0_out >= last_level */
707   clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
708                             *level0_out, last_level,
709                             "clamp_lod_to_last");
710
711   *level0_out = LLVMBuildSelect(builder, clamp_max,
712                                 last_level, *level0_out, "");
713
714   *level1_out = LLVMBuildSelect(builder, clamp_max,
715                                 last_level, *level1_out, "");
716
717   *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
718                                      perquadf_bld->zero, *lod_fpart_inout, "");
719
720   lp_build_name(*level0_out, "sampler%u_miplevel0", unit);
721   lp_build_name(*level1_out, "sampler%u_miplevel1", unit);
722   lp_build_name(*lod_fpart_inout, "sampler%u_mipweight", unit);
723}
724
725
726/**
727 * Return pointer to a single mipmap level.
728 * \param data_array  array of pointers to mipmap levels
729 * \param level  integer mipmap level
730 */
731LLVMValueRef
732lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
733                          LLVMValueRef level)
734{
735   LLVMBuilderRef builder = bld->gallivm->builder;
736   LLVMValueRef indexes[2], data_ptr;
737
738   indexes[0] = lp_build_const_int32(bld->gallivm, 0);
739   indexes[1] = level;
740   data_ptr = LLVMBuildGEP(builder, bld->data_array, indexes, 2, "");
741   data_ptr = LLVMBuildLoad(builder, data_ptr, "");
742   return data_ptr;
743}
744
745
746/**
747 * Codegen equivalent for u_minify().
748 * Return max(1, base_size >> level);
749 */
750LLVMValueRef
751lp_build_minify(struct lp_build_context *bld,
752                LLVMValueRef base_size,
753                LLVMValueRef level)
754{
755   LLVMBuilderRef builder = bld->gallivm->builder;
756   assert(lp_check_value(bld->type, base_size));
757   assert(lp_check_value(bld->type, level));
758
759   if (level == bld->zero) {
760      /* if we're using mipmap level zero, no minification is needed */
761      return base_size;
762   }
763   else {
764      LLVMValueRef size =
765         LLVMBuildLShr(builder, base_size, level, "minify");
766      assert(bld->type.sign);
767      size = lp_build_max(bld, size, bld->one);
768      return size;
769   }
770}
771
772
773/**
774 * Dereference stride_array[mipmap_level] array to get a stride.
775 * Return stride as a vector.
776 */
777static LLVMValueRef
778lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
779                              LLVMValueRef stride_array, LLVMValueRef level)
780{
781   LLVMBuilderRef builder = bld->gallivm->builder;
782   LLVMValueRef indexes[2], stride;
783   indexes[0] = lp_build_const_int32(bld->gallivm, 0);
784   indexes[1] = level;
785   stride = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
786   stride = LLVMBuildLoad(builder, stride, "");
787   stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride);
788   return stride;
789}
790
791
792/**
793 * When sampling a mipmap, we need to compute the width, height, depth
794 * of the source levels from the level indexes.  This helper function
795 * does that.
796 */
797void
798lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
799                            LLVMValueRef ilevel,
800                            LLVMValueRef *out_size,
801                            LLVMValueRef *row_stride_vec,
802                            LLVMValueRef *img_stride_vec)
803{
804   const unsigned dims = bld->dims;
805   LLVMValueRef ilevel_vec;
806
807   ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
808
809   /*
810    * Compute width, height, depth at mipmap level 'ilevel'
811    */
812   *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
813
814   if (dims >= 2) {
815      *row_stride_vec = lp_build_get_level_stride_vec(bld,
816                                                      bld->row_stride_array,
817                                                      ilevel);
818      if (dims == 3 || bld->static_state->target == PIPE_TEXTURE_CUBE) {
819         *img_stride_vec = lp_build_get_level_stride_vec(bld,
820                                                         bld->img_stride_array,
821                                                         ilevel);
822      }
823   }
824}
825
826
827/**
828 * Extract and broadcast texture size.
829 *
830 * @param size_type   type of the texture size vector (either
831 *                    bld->int_size_type or bld->float_size_type)
832 * @param coord_type  type of the texture size vector (either
833 *                    bld->int_coord_type or bld->coord_type)
834 * @param size        vector with the texture size (width, height, depth)
835 */
836void
837lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
838                             struct lp_type size_type,
839                             struct lp_type coord_type,
840                             LLVMValueRef size,
841                             LLVMValueRef *out_width,
842                             LLVMValueRef *out_height,
843                             LLVMValueRef *out_depth)
844{
845   const unsigned dims = bld->dims;
846   LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
847
848   *out_width = lp_build_extract_broadcast(bld->gallivm,
849                                           size_type,
850                                           coord_type,
851                                           size,
852                                           LLVMConstInt(i32t, 0, 0));
853   if (dims >= 2) {
854      *out_height = lp_build_extract_broadcast(bld->gallivm,
855                                               size_type,
856                                               coord_type,
857                                               size,
858                                               LLVMConstInt(i32t, 1, 0));
859      if (dims == 3) {
860         *out_depth = lp_build_extract_broadcast(bld->gallivm,
861                                                 size_type,
862                                                 coord_type,
863                                                 size,
864                                                 LLVMConstInt(i32t, 2, 0));
865      }
866   }
867}
868
869
870/**
871 * Unnormalize coords.
872 *
873 * @param flt_size  vector with the integer texture size (width, height, depth)
874 */
875void
876lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
877                             LLVMValueRef flt_size,
878                             LLVMValueRef *s,
879                             LLVMValueRef *t,
880                             LLVMValueRef *r)
881{
882   const unsigned dims = bld->dims;
883   LLVMValueRef width;
884   LLVMValueRef height;
885   LLVMValueRef depth;
886
887   lp_build_extract_image_sizes(bld,
888                                bld->float_size_type,
889                                bld->coord_type,
890                                flt_size,
891                                &width,
892                                &height,
893                                &depth);
894
895   /* s = s * width, t = t * height */
896   *s = lp_build_mul(&bld->coord_bld, *s, width);
897   if (dims >= 2) {
898      *t = lp_build_mul(&bld->coord_bld, *t, height);
899      if (dims >= 3) {
900         *r = lp_build_mul(&bld->coord_bld, *r, depth);
901      }
902   }
903}
904
905
906/** Helper used by lp_build_cube_lookup() */
907static LLVMValueRef
908lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
909{
910   /* ima = +0.5 / abs(coord); */
911   LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
912   LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
913   LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
914   return ima;
915}
916
917/** Helper used by lp_build_cube_lookup() */
918static LLVMValueRef
919lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord)
920{
921   /* ima = -0.5 / abs(coord); */
922   LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5);
923   LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
924   LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord);
925   return ima;
926}
927
928/**
929 * Helper used by lp_build_cube_lookup()
930 * FIXME: the sign here can also be 0.
931 * Arithmetically this could definitely make a difference. Either
932 * fix the comment or use other (simpler) sign function, not sure
933 * which one it should be.
934 * \param sign  scalar +1 or -1
935 * \param coord  float vector
936 * \param ima  float vector
937 */
938static LLVMValueRef
939lp_build_cube_coord(struct lp_build_context *coord_bld,
940                    LLVMValueRef sign, int negate_coord,
941                    LLVMValueRef coord, LLVMValueRef ima)
942{
943   /* return negate(coord) * ima * sign + 0.5; */
944   LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
945   LLVMValueRef res;
946
947   assert(negate_coord == +1 || negate_coord == -1);
948
949   if (negate_coord == -1) {
950      coord = lp_build_negate(coord_bld, coord);
951   }
952
953   res = lp_build_mul(coord_bld, coord, ima);
954   if (sign) {
955      sign = lp_build_broadcast_scalar(coord_bld, sign);
956      res = lp_build_mul(coord_bld, res, sign);
957   }
958   res = lp_build_add(coord_bld, res, half);
959
960   return res;
961}
962
963
964/** Helper used by lp_build_cube_lookup()
965 * Return (major_coord >= 0) ? pos_face : neg_face;
966 */
967static LLVMValueRef
968lp_build_cube_face(struct lp_build_sample_context *bld,
969                   LLVMValueRef major_coord,
970                   unsigned pos_face, unsigned neg_face)
971{
972   struct gallivm_state *gallivm = bld->gallivm;
973   LLVMBuilderRef builder = gallivm->builder;
974   LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE,
975                                    major_coord,
976                                    bld->float_bld.zero, "");
977   LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face);
978   LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face);
979   LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, "");
980   return res;
981}
982
983
984
985/**
986 * Generate code to do cube face selection and compute per-face texcoords.
987 */
988void
989lp_build_cube_lookup(struct lp_build_sample_context *bld,
990                     LLVMValueRef s,
991                     LLVMValueRef t,
992                     LLVMValueRef r,
993                     LLVMValueRef *face,
994                     LLVMValueRef *face_s,
995                     LLVMValueRef *face_t)
996{
997   struct lp_build_context *coord_bld = &bld->coord_bld;
998   LLVMBuilderRef builder = bld->gallivm->builder;
999   struct gallivm_state *gallivm = bld->gallivm;
1000   LLVMValueRef rx, ry, rz;
1001   LLVMValueRef tmp[4], rxyz, arxyz;
1002
1003   /*
1004    * Use the average of the four pixel's texcoords to choose the face.
1005    * Slight simplification just calculate the sum, skip scaling.
1006    */
1007   tmp[0] = s;
1008   tmp[1] = t;
1009   tmp[2] = r;
1010   rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3);
1011   arxyz = lp_build_abs(&bld->coord_bld, rxyz);
1012
1013   if (coord_bld->type.length > 4) {
1014      struct lp_build_context *cint_bld = &bld->int_coord_bld;
1015      struct lp_type intctype = cint_bld->type;
1016      LLVMValueRef signrxs, signrys, signrzs, signrxyz, sign;
1017      LLVMValueRef arxs, arys, arzs;
1018      LLVMValueRef arx_ge_ary, maxarxsarys, arz_ge_arx_ary;
1019      LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1020      LLVMValueRef ryneg, rzneg;
1021      LLVMValueRef ma, ima;
1022      LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1023      LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1024                                                     1 << (intctype.width - 1));
1025      LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1026                                                      intctype.width -1);
1027      LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1028      LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1029      LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1030
1031      assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1032      assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1033      assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1034
1035      rx = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), "");
1036      ry = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), "");
1037      rz = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), "");
1038      ryneg = LLVMBuildXor(builder, ry, signmask, "");
1039      rzneg = LLVMBuildXor(builder, rz, signmask, "");
1040
1041      /* the sign bit comes from the averaged vector (per quad),
1042       * as does the decision which face to use */
1043      signrxyz = LLVMBuildBitCast(builder, rxyz, lp_build_vec_type(gallivm, intctype), "");
1044      signrxyz = LLVMBuildAnd(builder, signrxyz, signmask, "");
1045
1046      arxs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 0);
1047      arys = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 1);
1048      arzs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 2);
1049
1050      /*
1051       * select x if x >= y else select y
1052       * select previous result if y >= max(x,y) else select z
1053       */
1054      arx_ge_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, arxs, arys);
1055      maxarxsarys = lp_build_max(coord_bld, arxs, arys);
1056      arz_ge_arx_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, maxarxsarys, arzs);
1057
1058      /*
1059       * compute all possible new s/t coords
1060       * snewx = signrx * -rz;
1061       * tnewx = -ry;
1062       * snewy = rx;
1063       * tnewy = signry * rz;
1064       * snewz = signrz * rx;
1065       * tnewz = -ry;
1066       */
1067      signrxs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 0);
1068      snewx = LLVMBuildXor(builder, signrxs, rzneg, "");
1069      tnewx = ryneg;
1070
1071      signrys = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 1);
1072      snewy = rx;
1073      tnewy = LLVMBuildXor(builder, signrys, rz, "");
1074
1075      signrzs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 2);
1076      snewz = LLVMBuildXor(builder, signrzs, rx, "");
1077      tnewz = ryneg;
1078
1079      /* XXX on x86 unclear if we should cast the values back to float
1080       * or not - on some cpus (nehalem) pblendvb has twice the throughput
1081       * of blendvps though on others there just might be domain
1082       * transition penalties when using it (this depends on what llvm
1083       * will chose for the bit ops above so there appears no "right way",
1084       * but given the boatload of selects let's just use the int type).
1085       *
1086       * Unfortunately we also need the sign bit of the summed coords.
1087       */
1088      *face_s = lp_build_select(cint_bld, arx_ge_ary, snewx, snewy);
1089      *face_t = lp_build_select(cint_bld, arx_ge_ary, tnewx, tnewy);
1090      ma = lp_build_select(coord_bld, arx_ge_ary, s, t);
1091      *face = lp_build_select(cint_bld, arx_ge_ary, facex, facey);
1092      sign = lp_build_select(cint_bld, arx_ge_ary, signrxs, signrys);
1093
1094      *face_s = lp_build_select(cint_bld, arz_ge_arx_ary, *face_s, snewz);
1095      *face_t = lp_build_select(cint_bld, arz_ge_arx_ary, *face_t, tnewz);
1096      ma = lp_build_select(coord_bld, arz_ge_arx_ary, ma, r);
1097      *face = lp_build_select(cint_bld, arz_ge_arx_ary, *face, facez);
1098      sign = lp_build_select(cint_bld, arz_ge_arx_ary, sign, signrzs);
1099
1100      *face_s = LLVMBuildBitCast(builder, *face_s,
1101                               lp_build_vec_type(gallivm, coord_bld->type), "");
1102      *face_t = LLVMBuildBitCast(builder, *face_t,
1103                               lp_build_vec_type(gallivm, coord_bld->type), "");
1104
1105      /* add +1 for neg face */
1106      /* XXX with AVX probably want to use another select here -
1107       * as long as we ensure vblendvps gets used we can actually
1108       * skip the comparison and just use sign as a "mask" directly.
1109       */
1110      sign = LLVMBuildLShr(builder, sign, signshift, "");
1111      *face = LLVMBuildOr(builder, *face, sign, "face");
1112
1113      ima = lp_build_cube_imapos(coord_bld, ma);
1114
1115      *face_s = lp_build_mul(coord_bld, *face_s, ima);
1116      *face_s = lp_build_add(coord_bld, *face_s, posHalf);
1117      *face_t = lp_build_mul(coord_bld, *face_t, ima);
1118      *face_t = lp_build_add(coord_bld, *face_t, posHalf);
1119   }
1120
1121   else {
1122      struct lp_build_if_state if_ctx;
1123      LLVMValueRef face_s_var;
1124      LLVMValueRef face_t_var;
1125      LLVMValueRef face_var;
1126      LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz;
1127      LLVMValueRef shuffles[4];
1128      LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz;
1129      LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz;
1130      struct lp_build_context *float_bld = &bld->float_bld;
1131
1132      assert(bld->coord_bld.type.length == 4);
1133
1134      shuffles[0] = lp_build_const_int32(gallivm, 0);
1135      shuffles[1] = lp_build_const_int32(gallivm, 1);
1136      shuffles[2] = lp_build_const_int32(gallivm, 0);
1137      shuffles[3] = lp_build_const_int32(gallivm, 1);
1138      arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1139      shuffles[0] = lp_build_const_int32(gallivm, 1);
1140      shuffles[1] = lp_build_const_int32(gallivm, 0);
1141      shuffles[2] = lp_build_const_int32(gallivm, 2);
1142      shuffles[3] = lp_build_const_int32(gallivm, 2);
1143      aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1144      arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz);
1145
1146      shuffles[0] = lp_build_const_int32(gallivm, 0);
1147      shuffles[1] = lp_build_const_int32(gallivm, 1);
1148      arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1149                                            LLVMConstVector(shuffles, 2), "");
1150      shuffles[0] = lp_build_const_int32(gallivm, 2);
1151      shuffles[1] = lp_build_const_int32(gallivm, 3);
1152      arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1153                                            LLVMConstVector(shuffles, 2), "");
1154      arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, "");
1155
1156      arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1157                                               lp_build_const_int32(gallivm, 0), "");
1158      arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz,
1159                                               lp_build_const_int32(gallivm, 0), "");
1160      ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1161                                               lp_build_const_int32(gallivm, 1), "");
1162      ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz,
1163                                               lp_build_const_int32(gallivm, 0), "");
1164      face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var");
1165      face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var");
1166      face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var");
1167
1168      lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz);
1169      {
1170         /* +/- X face */
1171         LLVMValueRef sign, ima;
1172         rx = LLVMBuildExtractElement(builder, rxyz,
1173                                      lp_build_const_int32(gallivm, 0), "");
1174         /* +/- X face */
1175         sign = lp_build_sgn(float_bld, rx);
1176         ima = lp_build_cube_imaneg(coord_bld, s);
1177         *face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima);
1178         *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1179         *face = lp_build_cube_face(bld, rx,
1180                                    PIPE_TEX_FACE_POS_X,
1181                                    PIPE_TEX_FACE_NEG_X);
1182         LLVMBuildStore(builder, *face_s, face_s_var);
1183         LLVMBuildStore(builder, *face_t, face_t_var);
1184         LLVMBuildStore(builder, *face, face_var);
1185      }
1186      lp_build_else(&if_ctx);
1187      {
1188         struct lp_build_if_state if_ctx2;
1189
1190         lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz);
1191         {
1192            LLVMValueRef sign, ima;
1193            /* +/- Y face */
1194            ry = LLVMBuildExtractElement(builder, rxyz,
1195                                         lp_build_const_int32(gallivm, 1), "");
1196            sign = lp_build_sgn(float_bld, ry);
1197            ima = lp_build_cube_imaneg(coord_bld, t);
1198            *face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima);
1199            *face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima);
1200            *face = lp_build_cube_face(bld, ry,
1201                                       PIPE_TEX_FACE_POS_Y,
1202                                       PIPE_TEX_FACE_NEG_Y);
1203            LLVMBuildStore(builder, *face_s, face_s_var);
1204            LLVMBuildStore(builder, *face_t, face_t_var);
1205            LLVMBuildStore(builder, *face, face_var);
1206         }
1207         lp_build_else(&if_ctx2);
1208         {
1209            /* +/- Z face */
1210            LLVMValueRef sign, ima;
1211            rz = LLVMBuildExtractElement(builder, rxyz,
1212                                         lp_build_const_int32(gallivm, 2), "");
1213            sign = lp_build_sgn(float_bld, rz);
1214            ima = lp_build_cube_imaneg(coord_bld, r);
1215            *face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima);
1216            *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1217            *face = lp_build_cube_face(bld, rz,
1218                                       PIPE_TEX_FACE_POS_Z,
1219                                       PIPE_TEX_FACE_NEG_Z);
1220            LLVMBuildStore(builder, *face_s, face_s_var);
1221            LLVMBuildStore(builder, *face_t, face_t_var);
1222            LLVMBuildStore(builder, *face, face_var);
1223         }
1224         lp_build_endif(&if_ctx2);
1225      }
1226
1227      lp_build_endif(&if_ctx);
1228
1229      *face_s = LLVMBuildLoad(builder, face_s_var, "face_s");
1230      *face_t = LLVMBuildLoad(builder, face_t_var, "face_t");
1231      *face   = LLVMBuildLoad(builder, face_var, "face");
1232      *face   = lp_build_broadcast_scalar(&bld->int_coord_bld, *face);
1233   }
1234}
1235
1236
1237/**
1238 * Compute the partial offset of a pixel block along an arbitrary axis.
1239 *
1240 * @param coord   coordinate in pixels
1241 * @param stride  number of bytes between rows of successive pixel blocks
1242 * @param block_length  number of pixels in a pixels block along the coordinate
1243 *                      axis
1244 * @param out_offset    resulting relative offset of the pixel block in bytes
1245 * @param out_subcoord  resulting sub-block pixel coordinate
1246 */
1247void
1248lp_build_sample_partial_offset(struct lp_build_context *bld,
1249                               unsigned block_length,
1250                               LLVMValueRef coord,
1251                               LLVMValueRef stride,
1252                               LLVMValueRef *out_offset,
1253                               LLVMValueRef *out_subcoord)
1254{
1255   LLVMBuilderRef builder = bld->gallivm->builder;
1256   LLVMValueRef offset;
1257   LLVMValueRef subcoord;
1258
1259   if (block_length == 1) {
1260      subcoord = bld->zero;
1261   }
1262   else {
1263      /*
1264       * Pixel blocks have power of two dimensions. LLVM should convert the
1265       * rem/div to bit arithmetic.
1266       * TODO: Verify this.
1267       * It does indeed BUT it does transform it to scalar (and back) when doing so
1268       * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
1269       * The generated code looks seriously unfunny and is quite expensive.
1270       */
1271#if 0
1272      LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
1273      subcoord = LLVMBuildURem(builder, coord, block_width, "");
1274      coord    = LLVMBuildUDiv(builder, coord, block_width, "");
1275#else
1276      unsigned logbase2 = util_logbase2(block_length);
1277      LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
1278      LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
1279      subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
1280      coord = LLVMBuildLShr(builder, coord, block_shift, "");
1281#endif
1282   }
1283
1284   offset = lp_build_mul(bld, coord, stride);
1285
1286   assert(out_offset);
1287   assert(out_subcoord);
1288
1289   *out_offset = offset;
1290   *out_subcoord = subcoord;
1291}
1292
1293
1294/**
1295 * Compute the offset of a pixel block.
1296 *
1297 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
1298 *
1299 * Returns the relative offset and i,j sub-block coordinates
1300 */
1301void
1302lp_build_sample_offset(struct lp_build_context *bld,
1303                       const struct util_format_description *format_desc,
1304                       LLVMValueRef x,
1305                       LLVMValueRef y,
1306                       LLVMValueRef z,
1307                       LLVMValueRef y_stride,
1308                       LLVMValueRef z_stride,
1309                       LLVMValueRef *out_offset,
1310                       LLVMValueRef *out_i,
1311                       LLVMValueRef *out_j)
1312{
1313   LLVMValueRef x_stride;
1314   LLVMValueRef offset;
1315
1316   x_stride = lp_build_const_vec(bld->gallivm, bld->type,
1317                                 format_desc->block.bits/8);
1318
1319   lp_build_sample_partial_offset(bld,
1320                                  format_desc->block.width,
1321                                  x, x_stride,
1322                                  &offset, out_i);
1323
1324   if (y && y_stride) {
1325      LLVMValueRef y_offset;
1326      lp_build_sample_partial_offset(bld,
1327                                     format_desc->block.height,
1328                                     y, y_stride,
1329                                     &y_offset, out_j);
1330      offset = lp_build_add(bld, offset, y_offset);
1331   }
1332   else {
1333      *out_j = bld->zero;
1334   }
1335
1336   if (z && z_stride) {
1337      LLVMValueRef z_offset;
1338      LLVMValueRef k;
1339      lp_build_sample_partial_offset(bld,
1340                                     1, /* pixel blocks are always 2D */
1341                                     z, z_stride,
1342                                     &z_offset, &k);
1343      offset = lp_build_add(bld, offset, z_offset);
1344   }
1345
1346   *out_offset = offset;
1347}
1348