lp_bld_conv.c revision fc9a49b638c26801951c33a570178bbb2b67ec60
1/**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29/**
30 * @file
31 * Helper functions for type conversions.
32 *
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
36 *
37 * Conversion between types of different bit width is quite complex since a
38 *
39 * To remember there are a few invariants in type conversions:
40 *
41 * - register width must remain constant:
42 *
43 *     src_type.width * src_type.length == dst_type.width * dst_type.length
44 *
45 * - total number of elements must remain constant:
46 *
47 *     src_type.length * num_srcs == dst_type.length * num_dsts
48 *
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
57 *
58 * Make sure to run lp_test_conv unit test after any change to this file.
59 *
60 * @author Jose Fonseca <jfonseca@vmware.com>
61 */
62
63
64#include "util/u_debug.h"
65#include "util/u_math.h"
66
67#include "lp_bld_type.h"
68#include "lp_bld_const.h"
69#include "lp_bld_arit.h"
70#include "lp_bld_pack.h"
71#include "lp_bld_conv.h"
72
73
74/**
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
76 *
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
83 *
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
86 *
87 * Ex: src = { float, float, float, float }
88 * return { i32, i32, i32, i32 } where each value is in [0, 2^dst_width-1].
89 */
90LLVMValueRef
91lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder,
92                                        struct lp_type src_type,
93                                        unsigned dst_width,
94                                        LLVMValueRef src)
95{
96   LLVMTypeRef int_vec_type = lp_build_int_vec_type(src_type);
97   LLVMValueRef res;
98   unsigned mantissa;
99   unsigned n;
100   unsigned long long ubound;
101   unsigned long long mask;
102   double scale;
103   double bias;
104
105   assert(src_type.floating);
106
107   mantissa = lp_mantissa(src_type);
108
109   /* We cannot carry more bits than the mantissa */
110   n = MIN2(mantissa, dst_width);
111
112   /* This magic coefficients will make the desired result to appear in the
113    * lowest significant bits of the mantissa.
114    */
115   ubound = ((unsigned long long)1 << n);
116   mask = ubound - 1;
117   scale = (double)mask/ubound;
118   bias = (double)((unsigned long long)1 << (mantissa - n));
119
120   res = LLVMBuildFMul(builder, src, lp_build_const_vec(src_type, scale), "");
121   res = LLVMBuildFAdd(builder, res, lp_build_const_vec(src_type, bias), "");
122   res = LLVMBuildBitCast(builder, res, int_vec_type, "");
123
124   if(dst_width > n) {
125      int shift = dst_width - n;
126      res = LLVMBuildShl(builder, res, lp_build_const_int_vec(src_type, shift), "");
127
128      /* TODO: Fill in the empty lower bits for additional precision? */
129      /* YES: this fixes progs/trivial/tri-z-eq.c.
130       * Otherwise vertex Z=1.0 values get converted to something like
131       * 0xfffffb00 and the test for equality with 0xffffffff fails.
132       */
133#if 0
134      {
135         LLVMValueRef msb;
136         msb = LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, dst_width - 1), "");
137         msb = LLVMBuildShl(builder, msb, lp_build_const_int_vec(src_type, shift), "");
138         msb = LLVMBuildSub(builder, msb, lp_build_const_int_vec(src_type, 1), "");
139         res = LLVMBuildOr(builder, res, msb, "");
140      }
141#elif 0
142      while(shift > 0) {
143         res = LLVMBuildOr(builder, res, LLVMBuildLShr(builder, res, lp_build_const_int_vec(src_type, n), ""), "");
144         shift -= n;
145         n *= 2;
146      }
147#endif
148   }
149   else
150      res = LLVMBuildAnd(builder, res, lp_build_const_int_vec(src_type, mask), "");
151
152   return res;
153}
154
155
156/**
157 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
158 * Ex: src = { i32, i32, i32, i32 } with values in range [0, 2^src_width-1]
159 * return {float, float, float, float} with values in range [0, 1].
160 */
161LLVMValueRef
162lp_build_unsigned_norm_to_float(LLVMBuilderRef builder,
163                                unsigned src_width,
164                                struct lp_type dst_type,
165                                LLVMValueRef src)
166{
167   LLVMTypeRef vec_type = lp_build_vec_type(dst_type);
168   LLVMTypeRef int_vec_type = lp_build_int_vec_type(dst_type);
169   LLVMValueRef bias_;
170   LLVMValueRef res;
171   unsigned mantissa;
172   unsigned n;
173   unsigned long long ubound;
174   unsigned long long mask;
175   double scale;
176   double bias;
177
178   assert(dst_type.floating);
179
180   mantissa = lp_mantissa(dst_type);
181
182   n = MIN2(mantissa, src_width);
183
184   ubound = ((unsigned long long)1 << n);
185   mask = ubound - 1;
186   scale = (double)ubound/mask;
187   bias = (double)((unsigned long long)1 << (mantissa - n));
188
189   res = src;
190
191   if(src_width > mantissa) {
192      int shift = src_width - mantissa;
193      res = LLVMBuildLShr(builder, res, lp_build_const_int_vec(dst_type, shift), "");
194   }
195
196   bias_ = lp_build_const_vec(dst_type, bias);
197
198   res = LLVMBuildOr(builder,
199                     res,
200                     LLVMBuildBitCast(builder, bias_, int_vec_type, ""), "");
201
202   res = LLVMBuildBitCast(builder, res, vec_type, "");
203
204   res = LLVMBuildFSub(builder, res, bias_, "");
205   res = LLVMBuildFMul(builder, res, lp_build_const_vec(dst_type, scale), "");
206
207   return res;
208}
209
210
211/**
212 * Generic type conversion.
213 *
214 * TODO: Take a precision argument, or even better, add a new precision member
215 * to the lp_type union.
216 */
217void
218lp_build_conv(LLVMBuilderRef builder,
219              struct lp_type src_type,
220              struct lp_type dst_type,
221              const LLVMValueRef *src, unsigned num_srcs,
222              LLVMValueRef *dst, unsigned num_dsts)
223{
224   struct lp_type tmp_type;
225   LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
226   unsigned num_tmps;
227   unsigned i;
228
229   /* We must not loose or gain channels. Only precision */
230   assert(src_type.length * num_srcs == dst_type.length * num_dsts);
231
232   assert(src_type.length <= LP_MAX_VECTOR_LENGTH);
233   assert(dst_type.length <= LP_MAX_VECTOR_LENGTH);
234   assert(num_srcs <= LP_MAX_VECTOR_LENGTH);
235   assert(num_dsts <= LP_MAX_VECTOR_LENGTH);
236
237   tmp_type = src_type;
238   for(i = 0; i < num_srcs; ++i) {
239      assert(lp_check_value(src_type, src[i]));
240      tmp[i] = src[i];
241   }
242   num_tmps = num_srcs;
243
244   /*
245    * Clamp if necessary
246    */
247
248   if(memcmp(&src_type, &dst_type, sizeof src_type) != 0) {
249      struct lp_build_context bld;
250      double src_min = lp_const_min(src_type);
251      double dst_min = lp_const_min(dst_type);
252      double src_max = lp_const_max(src_type);
253      double dst_max = lp_const_max(dst_type);
254      LLVMValueRef thres;
255
256      lp_build_context_init(&bld, builder, tmp_type);
257
258      if(src_min < dst_min) {
259         if(dst_min == 0.0)
260            thres = bld.zero;
261         else
262            thres = lp_build_const_vec(src_type, dst_min);
263         for(i = 0; i < num_tmps; ++i)
264            tmp[i] = lp_build_max(&bld, tmp[i], thres);
265      }
266
267      if(src_max > dst_max) {
268         if(dst_max == 1.0)
269            thres = bld.one;
270         else
271            thres = lp_build_const_vec(src_type, dst_max);
272         for(i = 0; i < num_tmps; ++i)
273            tmp[i] = lp_build_min(&bld, tmp[i], thres);
274      }
275   }
276
277   /*
278    * Scale to the narrowest range
279    */
280
281   if(dst_type.floating) {
282      /* Nothing to do */
283   }
284   else if(tmp_type.floating) {
285      if(!dst_type.fixed && !dst_type.sign && dst_type.norm) {
286         for(i = 0; i < num_tmps; ++i) {
287            tmp[i] = lp_build_clamped_float_to_unsigned_norm(builder,
288                                                             tmp_type,
289                                                             dst_type.width,
290                                                             tmp[i]);
291         }
292         tmp_type.floating = FALSE;
293      }
294      else {
295         double dst_scale = lp_const_scale(dst_type);
296         LLVMTypeRef tmp_vec_type;
297
298         if (dst_scale != 1.0) {
299            LLVMValueRef scale = lp_build_const_vec(tmp_type, dst_scale);
300            for(i = 0; i < num_tmps; ++i)
301               tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
302         }
303
304         /* Use an equally sized integer for intermediate computations */
305         tmp_type.floating = FALSE;
306         tmp_vec_type = lp_build_vec_type(tmp_type);
307         for(i = 0; i < num_tmps; ++i) {
308#if 0
309            if(dst_type.sign)
310               tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
311            else
312               tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, "");
313#else
314           /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
315            tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, "");
316#endif
317         }
318      }
319   }
320   else {
321      unsigned src_shift = lp_const_shift(src_type);
322      unsigned dst_shift = lp_const_shift(dst_type);
323
324      /* FIXME: compensate different offsets too */
325      if(src_shift > dst_shift) {
326         LLVMValueRef shift = lp_build_const_int_vec(tmp_type, src_shift - dst_shift);
327         for(i = 0; i < num_tmps; ++i)
328            if(src_type.sign)
329               tmp[i] = LLVMBuildAShr(builder, tmp[i], shift, "");
330            else
331               tmp[i] = LLVMBuildLShr(builder, tmp[i], shift, "");
332      }
333   }
334
335   /*
336    * Truncate or expand bit width
337    *
338    * No data conversion should happen here, although the sign bits are
339    * crucial to avoid bad clamping.
340    */
341
342   {
343      struct lp_type new_type;
344
345      new_type = tmp_type;
346      new_type.sign   = dst_type.sign;
347      new_type.width  = dst_type.width;
348      new_type.length = dst_type.length;
349
350      lp_build_resize(builder, tmp_type, new_type, tmp, num_srcs, tmp, num_dsts);
351
352      tmp_type = new_type;
353      num_tmps = num_dsts;
354   }
355
356   /*
357    * Scale to the widest range
358    */
359
360   if(src_type.floating) {
361      /* Nothing to do */
362   }
363   else if(!src_type.floating && dst_type.floating) {
364      if(!src_type.fixed && !src_type.sign && src_type.norm) {
365         for(i = 0; i < num_tmps; ++i) {
366            tmp[i] = lp_build_unsigned_norm_to_float(builder,
367                                                     src_type.width,
368                                                     dst_type,
369                                                     tmp[i]);
370         }
371         tmp_type.floating = TRUE;
372      }
373      else {
374         double src_scale = lp_const_scale(src_type);
375         LLVMTypeRef tmp_vec_type;
376
377         /* Use an equally sized integer for intermediate computations */
378         tmp_type.floating = TRUE;
379         tmp_type.sign = TRUE;
380         tmp_vec_type = lp_build_vec_type(tmp_type);
381         for(i = 0; i < num_tmps; ++i) {
382#if 0
383            if(dst_type.sign)
384               tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
385            else
386               tmp[i] = LLVMBuildUIToFP(builder, tmp[i], tmp_vec_type, "");
387#else
388            /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
389            tmp[i] = LLVMBuildSIToFP(builder, tmp[i], tmp_vec_type, "");
390#endif
391          }
392
393          if (src_scale != 1.0) {
394             LLVMValueRef scale = lp_build_const_vec(tmp_type, 1.0/src_scale);
395             for(i = 0; i < num_tmps; ++i)
396                tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, "");
397          }
398      }
399    }
400    else {
401       unsigned src_shift = lp_const_shift(src_type);
402       unsigned dst_shift = lp_const_shift(dst_type);
403
404       /* FIXME: compensate different offsets too */
405       if(src_shift < dst_shift) {
406          LLVMValueRef shift = lp_build_const_int_vec(tmp_type, dst_shift - src_shift);
407          for(i = 0; i < num_tmps; ++i)
408             tmp[i] = LLVMBuildShl(builder, tmp[i], shift, "");
409       }
410    }
411
412   for(i = 0; i < num_dsts; ++i) {
413      dst[i] = tmp[i];
414      assert(lp_check_value(dst_type, dst[i]));
415   }
416}
417
418
419/**
420 * Bit mask conversion.
421 *
422 * This will convert the integer masks that match the given types.
423 *
424 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
425 * Any other value will likely cause in unpredictable results.
426 *
427 * This is basically a very trimmed down version of lp_build_conv.
428 */
429void
430lp_build_conv_mask(LLVMBuilderRef builder,
431                   struct lp_type src_type,
432                   struct lp_type dst_type,
433                   const LLVMValueRef *src, unsigned num_srcs,
434                   LLVMValueRef *dst, unsigned num_dsts)
435{
436   /* Register width must remain constant */
437   assert(src_type.width * src_type.length == dst_type.width * dst_type.length);
438
439   /* We must not loose or gain channels. Only precision */
440   assert(src_type.length * num_srcs == dst_type.length * num_dsts);
441
442   /*
443    * Drop
444    *
445    * We assume all values are 0 or -1
446    */
447
448   src_type.floating = FALSE;
449   src_type.fixed = FALSE;
450   src_type.sign = TRUE;
451   src_type.norm = FALSE;
452
453   dst_type.floating = FALSE;
454   dst_type.fixed = FALSE;
455   dst_type.sign = TRUE;
456   dst_type.norm = FALSE;
457
458   /*
459    * Truncate or expand bit width
460    */
461
462   if(src_type.width > dst_type.width) {
463      assert(num_dsts == 1);
464      dst[0] = lp_build_pack(builder, src_type, dst_type, TRUE, src, num_srcs);
465   }
466   else if(src_type.width < dst_type.width) {
467      assert(num_srcs == 1);
468      lp_build_unpack(builder, src_type, dst_type, src[0], dst, num_dsts);
469   }
470   else {
471      assert(num_srcs == num_dsts);
472      memcpy(dst, src, num_dsts * sizeof *dst);
473   }
474}
475