1#include <strings.h>
2#include "pipe/p_context.h"
3#include "pipe/p_defines.h"
4#include "pipe/p_state.h"
5#include "util/u_dynarray.h"
6#include "util/u_debug.h"
7
8#include "pipe/p_shader_tokens.h"
9#include "tgsi/tgsi_parse.h"
10#include "tgsi/tgsi_dump.h"
11#include "tgsi/tgsi_util.h"
12#include "tgsi/tgsi_ureg.h"
13
14#include "draw/draw_context.h"
15
16#include "nv_object.xml.h"
17#include "nouveau_debug.h"
18#include "nv30/nv30-40_3d.xml.h"
19#include "nv30/nv30_state.h"
20
21/* TODO (at least...):
22 *  1. Indexed consts  + ARL
23 *  3. NV_vp11, NV_vp2, NV_vp3 features
24 *       - extra arith opcodes
25 *       - branching
26 *       - texture sampling
27 *       - indexed attribs
28 *       - indexed results
29 *  4. bugs
30 */
31
32#include "nv30/nv30_vertprog.h"
33#include "nv30/nv40_vertprog.h"
34
35struct nvfx_loop_entry {
36   unsigned brk_target;
37   unsigned cont_target;
38};
39
40struct nvfx_vpc {
41   struct pipe_shader_state pipe;
42   struct nv30_vertprog *vp;
43   struct tgsi_shader_info* info;
44
45   struct nv30_vertprog_exec *vpi;
46
47   unsigned r_temps;
48   unsigned r_temps_discard;
49   struct nvfx_reg r_result[PIPE_MAX_SHADER_OUTPUTS];
50   struct nvfx_reg *r_address;
51   struct nvfx_reg *r_temp;
52   struct nvfx_reg *r_const;
53   struct nvfx_reg r_0_1;
54
55   struct nvfx_reg *imm;
56   unsigned nr_imm;
57
58   int hpos_idx;
59   int cvtx_idx;
60
61   unsigned is_nv4x;
62
63   struct util_dynarray label_relocs;
64   struct util_dynarray loop_stack;
65};
66
67static struct nvfx_reg
68temp(struct nvfx_vpc *vpc)
69{
70   int idx = ffs(~vpc->r_temps) - 1;
71
72   if (idx < 0 || (!vpc->is_nv4x && idx >= 16)) {
73      NOUVEAU_ERR("out of temps!!\n");
74      return nvfx_reg(NVFXSR_TEMP, 0);
75   }
76
77   vpc->r_temps |= (1 << idx);
78   vpc->r_temps_discard |= (1 << idx);
79   return nvfx_reg(NVFXSR_TEMP, idx);
80}
81
82static inline void
83release_temps(struct nvfx_vpc *vpc)
84{
85   vpc->r_temps &= ~vpc->r_temps_discard;
86   vpc->r_temps_discard = 0;
87}
88
89static struct nvfx_reg
90constant(struct nvfx_vpc *vpc, int pipe, float x, float y, float z, float w)
91{
92   struct nv30_vertprog *vp = vpc->vp;
93   struct nv30_vertprog_data *vpd;
94   int idx;
95
96   if (pipe >= 0) {
97      for (idx = 0; idx < vp->nr_consts; idx++) {
98         if (vp->consts[idx].index == pipe)
99            return nvfx_reg(NVFXSR_CONST, idx);
100      }
101   }
102
103   idx = vp->nr_consts++;
104   vp->consts = realloc(vp->consts, sizeof(*vpd) * vp->nr_consts);
105   vpd = &vp->consts[idx];
106
107   vpd->index = pipe;
108   vpd->value[0] = x;
109   vpd->value[1] = y;
110   vpd->value[2] = z;
111   vpd->value[3] = w;
112   return nvfx_reg(NVFXSR_CONST, idx);
113}
114
115#define arith(s,t,o,d,m,s0,s1,s2) \
116   nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
117
118static void
119emit_src(struct nvfx_vpc *vpc, uint32_t *hw,
120         int pos, struct nvfx_src src)
121{
122   struct nv30_vertprog *vp = vpc->vp;
123   uint32_t sr = 0;
124   struct nvfx_relocation reloc;
125
126   switch (src.reg.type) {
127   case NVFXSR_TEMP:
128      sr |= (NVFX_VP(SRC_REG_TYPE_TEMP) << NVFX_VP(SRC_REG_TYPE_SHIFT));
129      sr |= (src.reg.index << NVFX_VP(SRC_TEMP_SRC_SHIFT));
130      break;
131   case NVFXSR_INPUT:
132      sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
133             NVFX_VP(SRC_REG_TYPE_SHIFT));
134      vp->ir |= (1 << src.reg.index);
135      hw[1] |= (src.reg.index << NVFX_VP(INST_INPUT_SRC_SHIFT));
136      break;
137   case NVFXSR_CONST:
138      sr |= (NVFX_VP(SRC_REG_TYPE_CONST) <<
139             NVFX_VP(SRC_REG_TYPE_SHIFT));
140      if (src.reg.index < 256 && src.reg.index >= -256) {
141         reloc.location = vp->nr_insns - 1;
142         reloc.target = src.reg.index;
143         util_dynarray_append(&vp->const_relocs, struct nvfx_relocation, reloc);
144      } else {
145         hw[1] |= (src.reg.index << NVFX_VP(INST_CONST_SRC_SHIFT)) &
146               NVFX_VP(INST_CONST_SRC_MASK);
147      }
148      break;
149   case NVFXSR_NONE:
150      sr |= (NVFX_VP(SRC_REG_TYPE_INPUT) <<
151             NVFX_VP(SRC_REG_TYPE_SHIFT));
152      break;
153   default:
154      assert(0);
155   }
156
157   if (src.negate)
158      sr |= NVFX_VP(SRC_NEGATE);
159
160   if (src.abs)
161      hw[0] |= (1 << (21 + pos));
162
163   sr |= ((src.swz[0] << NVFX_VP(SRC_SWZ_X_SHIFT)) |
164          (src.swz[1] << NVFX_VP(SRC_SWZ_Y_SHIFT)) |
165          (src.swz[2] << NVFX_VP(SRC_SWZ_Z_SHIFT)) |
166          (src.swz[3] << NVFX_VP(SRC_SWZ_W_SHIFT)));
167
168   if(src.indirect) {
169      if(src.reg.type == NVFXSR_CONST)
170         hw[3] |= NVFX_VP(INST_INDEX_CONST);
171      else if(src.reg.type == NVFXSR_INPUT)
172         hw[0] |= NVFX_VP(INST_INDEX_INPUT);
173      else
174         assert(0);
175
176      if(src.indirect_reg)
177         hw[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1);
178      hw[0] |= src.indirect_swz << NVFX_VP(INST_ADDR_SWZ_SHIFT);
179   }
180
181   switch (pos) {
182   case 0:
183      hw[1] |= ((sr & NVFX_VP(SRC0_HIGH_MASK)) >>
184           NVFX_VP(SRC0_HIGH_SHIFT)) << NVFX_VP(INST_SRC0H_SHIFT);
185      hw[2] |= (sr & NVFX_VP(SRC0_LOW_MASK)) <<
186           NVFX_VP(INST_SRC0L_SHIFT);
187      break;
188   case 1:
189      hw[2] |= sr << NVFX_VP(INST_SRC1_SHIFT);
190      break;
191   case 2:
192      hw[2] |= ((sr & NVFX_VP(SRC2_HIGH_MASK)) >>
193           NVFX_VP(SRC2_HIGH_SHIFT)) << NVFX_VP(INST_SRC2H_SHIFT);
194      hw[3] |= (sr & NVFX_VP(SRC2_LOW_MASK)) <<
195           NVFX_VP(INST_SRC2L_SHIFT);
196      break;
197   default:
198      assert(0);
199   }
200}
201
202static void
203emit_dst(struct nvfx_vpc *vpc, uint32_t *hw,
204         int slot, struct nvfx_reg dst)
205{
206   struct nv30_vertprog *vp = vpc->vp;
207
208   switch (dst.type) {
209   case NVFXSR_NONE:
210      if(!vpc->is_nv4x)
211         hw[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK;
212      else {
213         hw[3] |= NV40_VP_INST_DEST_MASK;
214         if (slot == 0)
215            hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
216         else
217            hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
218      }
219      break;
220   case NVFXSR_TEMP:
221      if(!vpc->is_nv4x)
222         hw[0] |= (dst.index << NV30_VP_INST_DEST_TEMP_ID_SHIFT);
223      else {
224         hw[3] |= NV40_VP_INST_DEST_MASK;
225         if (slot == 0)
226            hw[0] |= (dst.index << NV40_VP_INST_VEC_DEST_TEMP_SHIFT);
227         else
228            hw[3] |= (dst.index << NV40_VP_INST_SCA_DEST_TEMP_SHIFT);
229      }
230      break;
231   case NVFXSR_OUTPUT:
232      /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
233      if(vpc->is_nv4x) {
234         switch (dst.index) {
235         case NV30_VP_INST_DEST_CLP(0):
236            dst.index = NVFX_VP(INST_DEST_FOGC);
237            vp->or   |= (1 << 6);
238            break;
239         case NV30_VP_INST_DEST_CLP(1):
240            dst.index = NVFX_VP(INST_DEST_FOGC);
241            vp->or   |= (1 << 7);
242            break;
243         case NV30_VP_INST_DEST_CLP(2):
244            dst.index = NVFX_VP(INST_DEST_FOGC);
245            vp->or   |= (1 << 8);
246            break;
247         case NV30_VP_INST_DEST_CLP(3):
248            dst.index = NVFX_VP(INST_DEST_PSZ);
249            vp->or   |= (1 << 9);
250            break;
251         case NV30_VP_INST_DEST_CLP(4):
252            dst.index = NVFX_VP(INST_DEST_PSZ);
253            vp->or   |= (1 << 10);
254            break;
255         case NV30_VP_INST_DEST_CLP(5):
256            dst.index = NVFX_VP(INST_DEST_PSZ);
257            vp->or   |= (1 << 11);
258            break;
259         case NV40_VP_INST_DEST_COL0: vp->or |= (1 << 0); break;
260         case NV40_VP_INST_DEST_COL1: vp->or |= (1 << 1); break;
261         case NV40_VP_INST_DEST_BFC0: vp->or |= (1 << 2); break;
262         case NV40_VP_INST_DEST_BFC1: vp->or |= (1 << 3); break;
263         case NV40_VP_INST_DEST_FOGC: vp->or |= (1 << 4); break;
264         case NV40_VP_INST_DEST_PSZ : vp->or |= (1 << 5); break;
265         }
266      }
267
268      if(!vpc->is_nv4x) {
269         hw[3] |= (dst.index << NV30_VP_INST_DEST_SHIFT);
270         hw[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK;
271
272         /*XXX: no way this is entirely correct, someone needs to
273          *     figure out what exactly it is.
274          */
275         hw[3] |= 0x800;
276      } else {
277         hw[3] |= (dst.index << NV40_VP_INST_DEST_SHIFT);
278         if (slot == 0) {
279            hw[0] |= NV40_VP_INST_VEC_RESULT;
280            hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK;
281         } else {
282            hw[3] |= NV40_VP_INST_SCA_RESULT;
283            hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
284         }
285      }
286      break;
287   default:
288      assert(0);
289   }
290}
291
292static void
293nvfx_vp_emit(struct nvfx_vpc *vpc, struct nvfx_insn insn)
294{
295   struct nv30_vertprog *vp = vpc->vp;
296   unsigned slot = insn.op >> 7;
297   unsigned op = insn.op & 0x7f;
298   uint32_t *hw;
299
300   vp->insns = realloc(vp->insns, ++vp->nr_insns * sizeof(*vpc->vpi));
301   vpc->vpi = &vp->insns[vp->nr_insns - 1];
302   memset(vpc->vpi, 0, sizeof(*vpc->vpi));
303
304   hw = vpc->vpi->data;
305
306   if (insn.cc_test != NVFX_COND_TR)
307      hw[0] |= NVFX_VP(INST_COND_TEST_ENABLE);
308   hw[0] |= (insn.cc_test << NVFX_VP(INST_COND_SHIFT));
309   hw[0] |= ((insn.cc_swz[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT)) |
310             (insn.cc_swz[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT)) |
311             (insn.cc_swz[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT)) |
312             (insn.cc_swz[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT)));
313   if(insn.cc_update)
314      hw[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE);
315
316   if(insn.sat) {
317      assert(vpc->is_nv4x);
318      if(vpc->is_nv4x)
319         hw[0] |= NV40_VP_INST_SATURATE;
320   }
321
322   if(!vpc->is_nv4x) {
323      if(slot == 0)
324         hw[1] |= (op << NV30_VP_INST_VEC_OPCODE_SHIFT);
325      else {
326         hw[0] |= ((op >> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT);
327         hw[1] |= ((op & 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT);
328      }
329//      hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
330//      hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
331
332      if (insn.dst.type == NVFXSR_OUTPUT) {
333         if (slot)
334            hw[3] |= (insn.mask << NV30_VP_INST_SDEST_WRITEMASK_SHIFT);
335         else
336            hw[3] |= (insn.mask << NV30_VP_INST_VDEST_WRITEMASK_SHIFT);
337      } else {
338         if (slot)
339            hw[3] |= (insn.mask << NV30_VP_INST_STEMP_WRITEMASK_SHIFT);
340         else
341            hw[3] |= (insn.mask << NV30_VP_INST_VTEMP_WRITEMASK_SHIFT);
342      }
343    } else {
344      if (slot == 0) {
345         hw[1] |= (op << NV40_VP_INST_VEC_OPCODE_SHIFT);
346         hw[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK;
347         hw[3] |= (insn.mask << NV40_VP_INST_VEC_WRITEMASK_SHIFT);
348       } else {
349         hw[1] |= (op << NV40_VP_INST_SCA_OPCODE_SHIFT);
350         hw[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK ;
351         hw[3] |= (insn.mask << NV40_VP_INST_SCA_WRITEMASK_SHIFT);
352      }
353   }
354
355   emit_dst(vpc, hw, slot, insn.dst);
356   emit_src(vpc, hw, 0, insn.src[0]);
357   emit_src(vpc, hw, 1, insn.src[1]);
358   emit_src(vpc, hw, 2, insn.src[2]);
359
360//   if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
361//      hw[3] |= NV40_VP_INST_SCA_RESULT;
362}
363
364static inline struct nvfx_src
365tgsi_src(struct nvfx_vpc *vpc, const struct tgsi_full_src_register *fsrc) {
366   struct nvfx_src src;
367
368   switch (fsrc->Register.File) {
369   case TGSI_FILE_INPUT:
370      src.reg = nvfx_reg(NVFXSR_INPUT, fsrc->Register.Index);
371      break;
372   case TGSI_FILE_CONSTANT:
373      if(fsrc->Register.Indirect) {
374         src.reg = vpc->r_const[0];
375         src.reg.index = fsrc->Register.Index;
376      } else {
377         src.reg = vpc->r_const[fsrc->Register.Index];
378      }
379      break;
380   case TGSI_FILE_IMMEDIATE:
381      src.reg = vpc->imm[fsrc->Register.Index];
382      break;
383   case TGSI_FILE_TEMPORARY:
384      src.reg = vpc->r_temp[fsrc->Register.Index];
385      break;
386   default:
387      NOUVEAU_ERR("bad src file\n");
388      src.reg.index = 0;
389      src.reg.type = -1;
390      break;
391   }
392
393   src.abs = fsrc->Register.Absolute;
394   src.negate = fsrc->Register.Negate;
395   src.swz[0] = fsrc->Register.SwizzleX;
396   src.swz[1] = fsrc->Register.SwizzleY;
397   src.swz[2] = fsrc->Register.SwizzleZ;
398   src.swz[3] = fsrc->Register.SwizzleW;
399   src.indirect = 0;
400   src.indirect_reg = 0;
401   src.indirect_swz = 0;
402
403   if(fsrc->Register.Indirect) {
404      if(fsrc->Indirect.File == TGSI_FILE_ADDRESS &&
405         (fsrc->Register.File == TGSI_FILE_CONSTANT ||
406          fsrc->Register.File == TGSI_FILE_INPUT)) {
407         src.indirect = 1;
408         src.indirect_reg = fsrc->Indirect.Index;
409         src.indirect_swz = fsrc->Indirect.Swizzle;
410      } else {
411         src.reg.index = 0;
412         src.reg.type = -1;
413      }
414   }
415
416   return src;
417}
418
419static inline struct nvfx_reg
420tgsi_dst(struct nvfx_vpc *vpc, const struct tgsi_full_dst_register *fdst) {
421   struct nvfx_reg dst;
422
423   switch (fdst->Register.File) {
424   case TGSI_FILE_NULL:
425      dst = nvfx_reg(NVFXSR_NONE, 0);
426      break;
427   case TGSI_FILE_OUTPUT:
428      dst = vpc->r_result[fdst->Register.Index];
429      break;
430   case TGSI_FILE_TEMPORARY:
431      dst = vpc->r_temp[fdst->Register.Index];
432      break;
433   case TGSI_FILE_ADDRESS:
434      dst = vpc->r_address[fdst->Register.Index];
435      break;
436   default:
437      NOUVEAU_ERR("bad dst file %i\n", fdst->Register.File);
438      dst.index = 0;
439      dst.type = 0;
440      break;
441   }
442
443   return dst;
444}
445
446static inline int
447tgsi_mask(uint tgsi)
448{
449   int mask = 0;
450
451   if (tgsi & TGSI_WRITEMASK_X) mask |= NVFX_VP_MASK_X;
452   if (tgsi & TGSI_WRITEMASK_Y) mask |= NVFX_VP_MASK_Y;
453   if (tgsi & TGSI_WRITEMASK_Z) mask |= NVFX_VP_MASK_Z;
454   if (tgsi & TGSI_WRITEMASK_W) mask |= NVFX_VP_MASK_W;
455   return mask;
456}
457
458static bool
459nvfx_vertprog_parse_instruction(struct nvfx_vpc *vpc,
460            unsigned idx, const struct tgsi_full_instruction *finst)
461{
462   struct nvfx_src src[3], tmp;
463   struct nvfx_reg dst;
464   struct nvfx_reg final_dst;
465   struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
466   struct nvfx_insn insn;
467   struct nvfx_relocation reloc;
468   struct nvfx_loop_entry loop;
469   bool sat = false;
470   int mask;
471   int ai = -1, ci = -1, ii = -1;
472   int i;
473   unsigned sub_depth = 0;
474
475   for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
476      const struct tgsi_full_src_register *fsrc;
477
478      fsrc = &finst->Src[i];
479      if (fsrc->Register.File == TGSI_FILE_TEMPORARY) {
480         src[i] = tgsi_src(vpc, fsrc);
481      }
482   }
483
484   for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
485      const struct tgsi_full_src_register *fsrc;
486
487      fsrc = &finst->Src[i];
488
489      switch (fsrc->Register.File) {
490      case TGSI_FILE_INPUT:
491         if (ai == -1 || ai == fsrc->Register.Index) {
492            ai = fsrc->Register.Index;
493            src[i] = tgsi_src(vpc, fsrc);
494         } else {
495            src[i] = nvfx_src(temp(vpc));
496            nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
497                         tgsi_src(vpc, fsrc), none, none));
498         }
499         break;
500      case TGSI_FILE_CONSTANT:
501         if ((ci == -1 && ii == -1) ||
502             ci == fsrc->Register.Index) {
503            ci = fsrc->Register.Index;
504            src[i] = tgsi_src(vpc, fsrc);
505         } else {
506            src[i] = nvfx_src(temp(vpc));
507            nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
508                         tgsi_src(vpc, fsrc), none, none));
509         }
510         break;
511      case TGSI_FILE_IMMEDIATE:
512         if ((ci == -1 && ii == -1) ||
513             ii == fsrc->Register.Index) {
514            ii = fsrc->Register.Index;
515            src[i] = tgsi_src(vpc, fsrc);
516         } else {
517            src[i] = nvfx_src(temp(vpc));
518            nvfx_vp_emit(vpc, arith(0, VEC, MOV, src[i].reg, NVFX_VP_MASK_ALL,
519                         tgsi_src(vpc, fsrc), none, none));
520         }
521         break;
522      case TGSI_FILE_TEMPORARY:
523         /* handled above */
524         break;
525      default:
526         NOUVEAU_ERR("bad src file\n");
527         return false;
528      }
529   }
530
531   for (i = 0; i < finst->Instruction.NumSrcRegs; i++) {
532      if(src[i].reg.type < 0)
533         return false;
534   }
535
536   if(finst->Dst[0].Register.File == TGSI_FILE_ADDRESS &&
537      finst->Instruction.Opcode != TGSI_OPCODE_ARL)
538      return false;
539
540   final_dst = dst  = tgsi_dst(vpc, &finst->Dst[0]);
541   mask = tgsi_mask(finst->Dst[0].Register.WriteMask);
542   if(finst->Instruction.Saturate) {
543      assert(finst->Instruction.Opcode != TGSI_OPCODE_ARL);
544      if (vpc->is_nv4x)
545         sat = true;
546      else
547      if(dst.type != NVFXSR_TEMP)
548         dst = temp(vpc);
549   }
550
551   switch (finst->Instruction.Opcode) {
552   case TGSI_OPCODE_ADD:
553      nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, src[0], none, src[1]));
554      break;
555   case TGSI_OPCODE_ARL:
556      nvfx_vp_emit(vpc, arith(0, VEC, ARL, dst, mask, src[0], none, none));
557      break;
558   case TGSI_OPCODE_CEIL:
559      tmp = nvfx_src(temp(vpc));
560      nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, neg(src[0]), none, none));
561      nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none));
562      break;
563   case TGSI_OPCODE_CMP:
564      insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
565      insn.cc_update = 1;
566      nvfx_vp_emit(vpc, insn);
567
568      insn = arith(sat, VEC, MOV, dst, mask, src[2], none, none);
569      insn.cc_test = NVFX_COND_GE;
570      nvfx_vp_emit(vpc, insn);
571
572      insn = arith(sat, VEC, MOV, dst, mask, src[1], none, none);
573      insn.cc_test = NVFX_COND_LT;
574      nvfx_vp_emit(vpc, insn);
575      break;
576   case TGSI_OPCODE_COS:
577      nvfx_vp_emit(vpc, arith(sat, SCA, COS, dst, mask, none, none, src[0]));
578      break;
579   case TGSI_OPCODE_DP2:
580      tmp = nvfx_src(temp(vpc));
581      nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X | NVFX_VP_MASK_Y, src[0], src[1], none));
582      nvfx_vp_emit(vpc, arith(sat, VEC, ADD, dst, mask, swz(tmp, X, X, X, X), none, swz(tmp, Y, Y, Y, Y)));
583      break;
584   case TGSI_OPCODE_DP3:
585      nvfx_vp_emit(vpc, arith(sat, VEC, DP3, dst, mask, src[0], src[1], none));
586      break;
587   case TGSI_OPCODE_DP4:
588      nvfx_vp_emit(vpc, arith(sat, VEC, DP4, dst, mask, src[0], src[1], none));
589      break;
590   case TGSI_OPCODE_DPH:
591      nvfx_vp_emit(vpc, arith(sat, VEC, DPH, dst, mask, src[0], src[1], none));
592      break;
593   case TGSI_OPCODE_DST:
594      nvfx_vp_emit(vpc, arith(sat, VEC, DST, dst, mask, src[0], src[1], none));
595      break;
596   case TGSI_OPCODE_EX2:
597      nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, src[0]));
598      break;
599   case TGSI_OPCODE_EXP:
600      nvfx_vp_emit(vpc, arith(sat, SCA, EXP, dst, mask, none, none, src[0]));
601      break;
602   case TGSI_OPCODE_FLR:
603      nvfx_vp_emit(vpc, arith(sat, VEC, FLR, dst, mask, src[0], none, none));
604      break;
605   case TGSI_OPCODE_FRC:
606      nvfx_vp_emit(vpc, arith(sat, VEC, FRC, dst, mask, src[0], none, none));
607      break;
608   case TGSI_OPCODE_LG2:
609      nvfx_vp_emit(vpc, arith(sat, SCA, LG2, dst, mask, none, none, src[0]));
610      break;
611   case TGSI_OPCODE_LIT:
612      nvfx_vp_emit(vpc, arith(sat, SCA, LIT, dst, mask, none, none, src[0]));
613      break;
614   case TGSI_OPCODE_LOG:
615      nvfx_vp_emit(vpc, arith(sat, SCA, LOG, dst, mask, none, none, src[0]));
616      break;
617   case TGSI_OPCODE_LRP:
618      tmp = nvfx_src(temp(vpc));
619      nvfx_vp_emit(vpc, arith(0, VEC, MAD, tmp.reg, mask, neg(src[0]), src[2], src[2]));
620      nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], tmp));
621      break;
622   case TGSI_OPCODE_MAD:
623      nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, mask, src[0], src[1], src[2]));
624      break;
625   case TGSI_OPCODE_MAX:
626      nvfx_vp_emit(vpc, arith(sat, VEC, MAX, dst, mask, src[0], src[1], none));
627      break;
628   case TGSI_OPCODE_MIN:
629      nvfx_vp_emit(vpc, arith(sat, VEC, MIN, dst, mask, src[0], src[1], none));
630      break;
631   case TGSI_OPCODE_MOV:
632      nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, src[0], none, none));
633      break;
634   case TGSI_OPCODE_MUL:
635      nvfx_vp_emit(vpc, arith(sat, VEC, MUL, dst, mask, src[0], src[1], none));
636      break;
637   case TGSI_OPCODE_NOP:
638      break;
639   case TGSI_OPCODE_POW:
640      tmp = nvfx_src(temp(vpc));
641      nvfx_vp_emit(vpc, arith(0, SCA, LG2, tmp.reg, NVFX_VP_MASK_X, none, none, swz(src[0], X, X, X, X)));
642      nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, NVFX_VP_MASK_X, swz(tmp, X, X, X, X), swz(src[1], X, X, X, X), none));
643      nvfx_vp_emit(vpc, arith(sat, SCA, EX2, dst, mask, none, none, swz(tmp, X, X, X, X)));
644      break;
645   case TGSI_OPCODE_RCP:
646      nvfx_vp_emit(vpc, arith(sat, SCA, RCP, dst, mask, none, none, src[0]));
647      break;
648   case TGSI_OPCODE_RSQ:
649      nvfx_vp_emit(vpc, arith(sat, SCA, RSQ, dst, mask, none, none, abs(src[0])));
650      break;
651   case TGSI_OPCODE_SEQ:
652      nvfx_vp_emit(vpc, arith(sat, VEC, SEQ, dst, mask, src[0], src[1], none));
653      break;
654   case TGSI_OPCODE_SGE:
655      nvfx_vp_emit(vpc, arith(sat, VEC, SGE, dst, mask, src[0], src[1], none));
656      break;
657   case TGSI_OPCODE_SGT:
658      nvfx_vp_emit(vpc, arith(sat, VEC, SGT, dst, mask, src[0], src[1], none));
659      break;
660   case TGSI_OPCODE_SIN:
661      nvfx_vp_emit(vpc, arith(sat, SCA, SIN, dst, mask, none, none, src[0]));
662      break;
663   case TGSI_OPCODE_SLE:
664      nvfx_vp_emit(vpc, arith(sat, VEC, SLE, dst, mask, src[0], src[1], none));
665      break;
666   case TGSI_OPCODE_SLT:
667      nvfx_vp_emit(vpc, arith(sat, VEC, SLT, dst, mask, src[0], src[1], none));
668      break;
669   case TGSI_OPCODE_SNE:
670      nvfx_vp_emit(vpc, arith(sat, VEC, SNE, dst, mask, src[0], src[1], none));
671      break;
672   case TGSI_OPCODE_SSG:
673      nvfx_vp_emit(vpc, arith(sat, VEC, SSG, dst, mask, src[0], none, none));
674      break;
675   case TGSI_OPCODE_TRUNC:
676      tmp = nvfx_src(temp(vpc));
677      insn = arith(0, VEC, MOV, none.reg, mask, src[0], none, none);
678      insn.cc_update = 1;
679      nvfx_vp_emit(vpc, insn);
680
681      nvfx_vp_emit(vpc, arith(0, VEC, FLR, tmp.reg, mask, abs(src[0]), none, none));
682      nvfx_vp_emit(vpc, arith(sat, VEC, MOV, dst, mask, tmp, none, none));
683
684      insn = arith(sat, VEC, MOV, dst, mask, neg(tmp), none, none);
685      insn.cc_test = NVFX_COND_LT;
686      nvfx_vp_emit(vpc, insn);
687      break;
688   case TGSI_OPCODE_XPD:
689      tmp = nvfx_src(temp(vpc));
690      nvfx_vp_emit(vpc, arith(0, VEC, MUL, tmp.reg, mask, swz(src[0], Z, X, Y, Y), swz(src[1], Y, Z, X, X), none));
691      nvfx_vp_emit(vpc, arith(sat, VEC, MAD, dst, (mask & ~NVFX_VP_MASK_W), swz(src[0], Y, Z, X, X), swz(src[1], Z, X, Y, Y), neg(tmp)));
692      break;
693   case TGSI_OPCODE_IF:
694      insn = arith(0, VEC, MOV, none.reg, NVFX_VP_MASK_X, src[0], none, none);
695      insn.cc_update = 1;
696      nvfx_vp_emit(vpc, insn);
697
698      reloc.location = vpc->vp->nr_insns;
699      reloc.target = finst->Label.Label + 1;
700      util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
701
702      insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
703      insn.cc_test = NVFX_COND_EQ;
704      insn.cc_swz[0] = insn.cc_swz[1] = insn.cc_swz[2] = insn.cc_swz[3] = 0;
705      nvfx_vp_emit(vpc, insn);
706      break;
707   case TGSI_OPCODE_ELSE:
708   case TGSI_OPCODE_CAL:
709      reloc.location = vpc->vp->nr_insns;
710      reloc.target = finst->Label.Label;
711      util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
712
713      if(finst->Instruction.Opcode == TGSI_OPCODE_CAL)
714         insn = arith(0, SCA, CAL, none.reg, 0, none, none, none);
715      else
716         insn = arith(0, SCA, BRA, none.reg, 0, none, none, none);
717      nvfx_vp_emit(vpc, insn);
718      break;
719   case TGSI_OPCODE_RET:
720      if(sub_depth || !vpc->vp->enabled_ucps) {
721         tmp = none;
722         tmp.swz[0] = tmp.swz[1] = tmp.swz[2] = tmp.swz[3] = 0;
723         nvfx_vp_emit(vpc, arith(0, SCA, RET, none.reg, 0, none, none, tmp));
724      } else {
725         reloc.location = vpc->vp->nr_insns;
726         reloc.target = vpc->info->num_instructions;
727         util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
728         nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
729      }
730      break;
731   case TGSI_OPCODE_BGNSUB:
732      ++sub_depth;
733      break;
734   case TGSI_OPCODE_ENDSUB:
735      --sub_depth;
736      break;
737   case TGSI_OPCODE_ENDIF:
738      /* nothing to do here */
739      break;
740   case TGSI_OPCODE_BGNLOOP:
741      loop.cont_target = idx;
742      loop.brk_target = finst->Label.Label + 1;
743      util_dynarray_append(&vpc->loop_stack, struct nvfx_loop_entry, loop);
744      break;
745   case TGSI_OPCODE_ENDLOOP:
746      loop = util_dynarray_pop(&vpc->loop_stack, struct nvfx_loop_entry);
747
748      reloc.location = vpc->vp->nr_insns;
749      reloc.target = loop.cont_target;
750      util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
751
752      nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
753      break;
754   case TGSI_OPCODE_CONT:
755      loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
756
757      reloc.location = vpc->vp->nr_insns;
758      reloc.target = loop.cont_target;
759      util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
760
761      nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
762      break;
763   case TGSI_OPCODE_BRK:
764      loop = util_dynarray_top(&vpc->loop_stack, struct nvfx_loop_entry);
765
766      reloc.location = vpc->vp->nr_insns;
767      reloc.target = loop.brk_target;
768      util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
769
770      nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
771      break;
772   case TGSI_OPCODE_END:
773      assert(!sub_depth);
774      if(vpc->vp->enabled_ucps) {
775         if(idx != (vpc->info->num_instructions - 1)) {
776            reloc.location = vpc->vp->nr_insns;
777            reloc.target = vpc->info->num_instructions;
778            util_dynarray_append(&vpc->label_relocs, struct nvfx_relocation, reloc);
779            nvfx_vp_emit(vpc, arith(0, SCA, BRA, none.reg, 0, none, none, none));
780         }
781      } else {
782         if(vpc->vp->nr_insns)
783            vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
784         nvfx_vp_emit(vpc, arith(0, VEC, NOP, none.reg, 0, none, none, none));
785         vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
786      }
787      break;
788   default:
789      NOUVEAU_ERR("invalid opcode %d\n", finst->Instruction.Opcode);
790      return false;
791   }
792
793   if(finst->Instruction.Saturate && !vpc->is_nv4x) {
794      if (!vpc->r_0_1.type)
795         vpc->r_0_1 = constant(vpc, -1, 0, 1, 0, 0);
796      nvfx_vp_emit(vpc, arith(0, VEC, MAX, dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), X, X, X, X), none));
797      nvfx_vp_emit(vpc, arith(0, VEC, MIN, final_dst, mask, nvfx_src(dst), swz(nvfx_src(vpc->r_0_1), Y, Y, Y, Y), none));
798   }
799
800   release_temps(vpc);
801   return true;
802}
803
804static bool
805nvfx_vertprog_parse_decl_output(struct nvfx_vpc *vpc,
806                                const struct tgsi_full_declaration *fdec)
807{
808   unsigned num_texcoords = vpc->is_nv4x ? 10 : 8;
809   unsigned idx = fdec->Range.First;
810   unsigned semantic_index = fdec->Semantic.Index;
811   int hw = 0, i;
812
813   switch (fdec->Semantic.Name) {
814   case TGSI_SEMANTIC_POSITION:
815      hw = NVFX_VP(INST_DEST_POS);
816      vpc->hpos_idx = idx;
817      break;
818   case TGSI_SEMANTIC_CLIPVERTEX:
819      vpc->r_result[idx] = temp(vpc);
820      vpc->r_temps_discard = 0;
821      vpc->cvtx_idx = idx;
822      return true;
823   case TGSI_SEMANTIC_COLOR:
824      if (fdec->Semantic.Index == 0) {
825         hw = NVFX_VP(INST_DEST_COL0);
826      } else
827      if (fdec->Semantic.Index == 1) {
828         hw = NVFX_VP(INST_DEST_COL1);
829      } else {
830         NOUVEAU_ERR("bad colour semantic index\n");
831         return false;
832      }
833      break;
834   case TGSI_SEMANTIC_BCOLOR:
835      if (fdec->Semantic.Index == 0) {
836         hw = NVFX_VP(INST_DEST_BFC0);
837      } else
838      if (fdec->Semantic.Index == 1) {
839         hw = NVFX_VP(INST_DEST_BFC1);
840      } else {
841         NOUVEAU_ERR("bad bcolour semantic index\n");
842         return false;
843      }
844      break;
845   case TGSI_SEMANTIC_FOG:
846      hw = NVFX_VP(INST_DEST_FOGC);
847      break;
848   case TGSI_SEMANTIC_PSIZE:
849      hw = NVFX_VP(INST_DEST_PSZ);
850      break;
851   case TGSI_SEMANTIC_GENERIC:
852      /* this is really an identifier for VP/FP linkage */
853      semantic_index += 8;
854      /* fall through */
855   case TGSI_SEMANTIC_TEXCOORD:
856      for (i = 0; i < num_texcoords; i++) {
857         if (vpc->vp->texcoord[i] == semantic_index) {
858            hw = NVFX_VP(INST_DEST_TC(i));
859            break;
860         }
861      }
862
863      if (i == num_texcoords) {
864         vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
865         return true;
866      }
867      break;
868   case TGSI_SEMANTIC_EDGEFLAG:
869      vpc->r_result[idx] = nvfx_reg(NVFXSR_NONE, 0);
870      return true;
871   default:
872      NOUVEAU_ERR("bad output semantic\n");
873      return false;
874   }
875
876   vpc->r_result[idx] = nvfx_reg(NVFXSR_OUTPUT, hw);
877   return true;
878}
879
880static bool
881nvfx_vertprog_prepare(struct nvfx_vpc *vpc)
882{
883   struct tgsi_parse_context p;
884   int high_const = -1, high_temp = -1, high_addr = -1, nr_imm = 0, i;
885
886   tgsi_parse_init(&p, vpc->pipe.tokens);
887   while (!tgsi_parse_end_of_tokens(&p)) {
888      const union tgsi_full_token *tok = &p.FullToken;
889
890      tgsi_parse_token(&p);
891      switch(tok->Token.Type) {
892      case TGSI_TOKEN_TYPE_IMMEDIATE:
893         nr_imm++;
894         break;
895      case TGSI_TOKEN_TYPE_DECLARATION:
896      {
897         const struct tgsi_full_declaration *fdec;
898
899         fdec = &p.FullToken.FullDeclaration;
900         switch (fdec->Declaration.File) {
901         case TGSI_FILE_TEMPORARY:
902            if (fdec->Range.Last > high_temp) {
903               high_temp =
904                  fdec->Range.Last;
905            }
906            break;
907         case TGSI_FILE_ADDRESS:
908            if (fdec->Range.Last > high_addr) {
909               high_addr =
910                  fdec->Range.Last;
911            }
912            break;
913         case TGSI_FILE_CONSTANT:
914            if (fdec->Range.Last > high_const) {
915               high_const =
916                     fdec->Range.Last;
917            }
918            break;
919         case TGSI_FILE_OUTPUT:
920            if (!nvfx_vertprog_parse_decl_output(vpc, fdec))
921               return false;
922            break;
923         default:
924            break;
925         }
926      }
927         break;
928      default:
929         break;
930      }
931   }
932   tgsi_parse_free(&p);
933
934   if (nr_imm) {
935      vpc->imm = CALLOC(nr_imm, sizeof(struct nvfx_reg));
936      assert(vpc->imm);
937   }
938
939   if (++high_temp) {
940      vpc->r_temp = CALLOC(high_temp, sizeof(struct nvfx_reg));
941      for (i = 0; i < high_temp; i++)
942         vpc->r_temp[i] = temp(vpc);
943   }
944
945   if (++high_addr) {
946      vpc->r_address = CALLOC(high_addr, sizeof(struct nvfx_reg));
947      for (i = 0; i < high_addr; i++)
948         vpc->r_address[i] = nvfx_reg(NVFXSR_TEMP, i);
949   }
950
951   if(++high_const) {
952      vpc->r_const = CALLOC(high_const, sizeof(struct nvfx_reg));
953      for (i = 0; i < high_const; i++)
954         vpc->r_const[i] = constant(vpc, i, 0, 0, 0, 0);
955   }
956
957   vpc->r_temps_discard = 0;
958   return true;
959}
960
961DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp, "NVFX_DUMP_VP", false)
962
963bool
964_nvfx_vertprog_translate(uint16_t oclass, struct nv30_vertprog *vp)
965{
966   struct tgsi_parse_context parse;
967   struct nvfx_vpc *vpc = NULL;
968   struct nvfx_src none = nvfx_src(nvfx_reg(NVFXSR_NONE, 0));
969   struct util_dynarray insns;
970   int i, ucps;
971
972   vp->translated = false;
973   vp->nr_insns = 0;
974   vp->nr_consts = 0;
975
976   vpc = CALLOC_STRUCT(nvfx_vpc);
977   if (!vpc)
978      return false;
979   vpc->is_nv4x = (oclass >= NV40_3D_CLASS) ? ~0 : 0;
980   vpc->vp   = vp;
981   vpc->pipe = vp->pipe;
982   vpc->info = &vp->info;
983   vpc->cvtx_idx = -1;
984
985   if (!nvfx_vertprog_prepare(vpc)) {
986      FREE(vpc);
987      return false;
988   }
989
990   /* Redirect post-transform vertex position to a temp if user clip
991    * planes are enabled.  We need to append code to the vtxprog
992    * to handle clip planes later.
993    */
994   if (vp->enabled_ucps && vpc->cvtx_idx < 0)  {
995      vpc->r_result[vpc->hpos_idx] = temp(vpc);
996      vpc->r_temps_discard = 0;
997      vpc->cvtx_idx = vpc->hpos_idx;
998   }
999
1000   util_dynarray_init(&insns);
1001
1002   tgsi_parse_init(&parse, vp->pipe.tokens);
1003   while (!tgsi_parse_end_of_tokens(&parse)) {
1004      tgsi_parse_token(&parse);
1005
1006      switch (parse.FullToken.Token.Type) {
1007      case TGSI_TOKEN_TYPE_IMMEDIATE:
1008      {
1009         const struct tgsi_full_immediate *imm;
1010
1011         imm = &parse.FullToken.FullImmediate;
1012         assert(imm->Immediate.DataType == TGSI_IMM_FLOAT32);
1013         assert(imm->Immediate.NrTokens == 4 + 1);
1014         vpc->imm[vpc->nr_imm++] =
1015            constant(vpc, -1,
1016                imm->u[0].Float,
1017                imm->u[1].Float,
1018                imm->u[2].Float,
1019                imm->u[3].Float);
1020      }
1021         break;
1022      case TGSI_TOKEN_TYPE_INSTRUCTION:
1023      {
1024         const struct tgsi_full_instruction *finst;
1025         unsigned idx = insns.size >> 2;
1026         util_dynarray_append(&insns, unsigned, vp->nr_insns);
1027         finst = &parse.FullToken.FullInstruction;
1028         if (!nvfx_vertprog_parse_instruction(vpc, idx, finst))
1029            goto out;
1030      }
1031         break;
1032      default:
1033         break;
1034      }
1035   }
1036
1037   util_dynarray_append(&insns, unsigned, vp->nr_insns);
1038
1039   for(unsigned i = 0; i < vpc->label_relocs.size; i += sizeof(struct nvfx_relocation))
1040   {
1041      struct nvfx_relocation* label_reloc = (struct nvfx_relocation*)((char*)vpc->label_relocs.data + i);
1042      struct nvfx_relocation hw_reloc;
1043
1044      hw_reloc.location = label_reloc->location;
1045      hw_reloc.target = ((unsigned*)insns.data)[label_reloc->target];
1046
1047      //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1048
1049      util_dynarray_append(&vp->branch_relocs, struct nvfx_relocation, hw_reloc);
1050   }
1051   util_dynarray_fini(&insns);
1052   util_dynarray_trim(&vp->branch_relocs);
1053
1054   /* XXX: what if we add a RET before?!  make sure we jump here...*/
1055
1056   /* Write out HPOS if it was redirected to a temp earlier */
1057   if (vpc->r_result[vpc->hpos_idx].type != NVFXSR_OUTPUT) {
1058      struct nvfx_reg hpos = nvfx_reg(NVFXSR_OUTPUT,
1059                  NVFX_VP(INST_DEST_POS));
1060      struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->hpos_idx]);
1061
1062      nvfx_vp_emit(vpc, arith(0, VEC, MOV, hpos, NVFX_VP_MASK_ALL, htmp, none, none));
1063   }
1064
1065   /* Insert code to handle user clip planes */
1066   ucps = vp->enabled_ucps;
1067   while (ucps) {
1068      int i = ffs(ucps) - 1; ucps &= ~(1 << i);
1069      struct nvfx_reg cdst = nvfx_reg(NVFXSR_OUTPUT, NV30_VP_INST_DEST_CLP(i));
1070      struct nvfx_src ceqn = nvfx_src(nvfx_reg(NVFXSR_CONST, 512 + i));
1071      struct nvfx_src htmp = nvfx_src(vpc->r_result[vpc->cvtx_idx]);
1072      unsigned mask;
1073
1074      if(vpc->is_nv4x)
1075      {
1076         switch (i) {
1077         case 0: case 3: mask = NVFX_VP_MASK_Y; break;
1078         case 1: case 4: mask = NVFX_VP_MASK_Z; break;
1079         case 2: case 5: mask = NVFX_VP_MASK_W; break;
1080         default:
1081            NOUVEAU_ERR("invalid clip dist #%d\n", i);
1082            goto out;
1083         }
1084      }
1085      else
1086         mask = NVFX_VP_MASK_X;
1087
1088      nvfx_vp_emit(vpc, arith(0, VEC, DP4, cdst, mask, htmp, ceqn, none));
1089   }
1090
1091   if (vpc->vp->nr_insns)
1092      vpc->vp->insns[vpc->vp->nr_insns - 1].data[3] |= NVFX_VP_INST_LAST;
1093
1094   if(debug_get_option_nvfx_dump_vp())
1095   {
1096      debug_printf("\n");
1097      tgsi_dump(vpc->pipe.tokens, 0);
1098
1099      debug_printf("\n%s vertex program:\n", vpc->is_nv4x ? "nv4x" : "nv3x");
1100      for (i = 0; i < vp->nr_insns; i++)
1101         debug_printf("%3u: %08x %08x %08x %08x\n", i, vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1102      debug_printf("\n");
1103   }
1104
1105   vp->translated = true;
1106
1107out:
1108   tgsi_parse_free(&parse);
1109   if (vpc) {
1110      util_dynarray_fini(&vpc->label_relocs);
1111      util_dynarray_fini(&vpc->loop_stack);
1112      FREE(vpc->r_temp);
1113      FREE(vpc->r_address);
1114      FREE(vpc->r_const);
1115      FREE(vpc->imm);
1116      FREE(vpc);
1117   }
1118
1119   return vp->translated;
1120}
1121