brw_vec4_visitor.cpp revision cb18472eca9910e7a4222ebc1b6b1b66869f5b53
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "brw_vec4.h"
25extern "C" {
26#include "main/macros.h"
27#include "program/prog_parameter.h"
28#include "program/sampler.h"
29}
30
31namespace brw {
32
33src_reg::src_reg(dst_reg reg)
34{
35   init();
36
37   this->file = reg.file;
38   this->reg = reg.reg;
39   this->reg_offset = reg.reg_offset;
40   this->type = reg.type;
41   this->reladdr = reg.reladdr;
42   this->fixed_hw_reg = reg.fixed_hw_reg;
43
44   int swizzles[4];
45   int next_chan = 0;
46   int last = 0;
47
48   for (int i = 0; i < 4; i++) {
49      if (!(reg.writemask & (1 << i)))
50	 continue;
51
52      swizzles[next_chan++] = last = i;
53   }
54
55   for (; next_chan < 4; next_chan++) {
56      swizzles[next_chan] = last;
57   }
58
59   this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
60				swizzles[2], swizzles[3]);
61}
62
63dst_reg::dst_reg(src_reg reg)
64{
65   init();
66
67   this->file = reg.file;
68   this->reg = reg.reg;
69   this->reg_offset = reg.reg_offset;
70   this->type = reg.type;
71   this->writemask = WRITEMASK_XYZW;
72   this->reladdr = reg.reladdr;
73   this->fixed_hw_reg = reg.fixed_hw_reg;
74}
75
76vec4_instruction::vec4_instruction(vec4_visitor *v,
77				   enum opcode opcode, dst_reg dst,
78				   src_reg src0, src_reg src1, src_reg src2)
79{
80   this->opcode = opcode;
81   this->dst = dst;
82   this->src[0] = src0;
83   this->src[1] = src1;
84   this->src[2] = src2;
85   this->ir = v->base_ir;
86   this->annotation = v->current_annotation;
87}
88
89vec4_instruction *
90vec4_visitor::emit(vec4_instruction *inst)
91{
92   this->instructions.push_tail(inst);
93
94   return inst;
95}
96
97vec4_instruction *
98vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst)
99{
100   new_inst->ir = inst->ir;
101   new_inst->annotation = inst->annotation;
102
103   inst->insert_before(new_inst);
104
105   return inst;
106}
107
108vec4_instruction *
109vec4_visitor::emit(enum opcode opcode, dst_reg dst,
110		   src_reg src0, src_reg src1, src_reg src2)
111{
112   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst,
113					     src0, src1, src2));
114}
115
116
117vec4_instruction *
118vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1)
119{
120   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1));
121}
122
123vec4_instruction *
124vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0)
125{
126   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0));
127}
128
129vec4_instruction *
130vec4_visitor::emit(enum opcode opcode)
131{
132   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg()));
133}
134
135#define ALU1(op)							\
136   vec4_instruction *							\
137   vec4_visitor::op(dst_reg dst, src_reg src0)				\
138   {									\
139      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
140					   src0);			\
141   }
142
143#define ALU2(op)							\
144   vec4_instruction *							\
145   vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1)		\
146   {									\
147      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
148					   src0, src1);			\
149   }
150
151ALU1(NOT)
152ALU1(MOV)
153ALU1(FRC)
154ALU1(RNDD)
155ALU1(RNDE)
156ALU1(RNDZ)
157ALU2(ADD)
158ALU2(MUL)
159ALU2(MACH)
160ALU2(AND)
161ALU2(OR)
162ALU2(XOR)
163ALU2(DP3)
164ALU2(DP4)
165
166/** Gen4 predicated IF. */
167vec4_instruction *
168vec4_visitor::IF(uint32_t predicate)
169{
170   vec4_instruction *inst;
171
172   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF);
173   inst->predicate = predicate;
174
175   return inst;
176}
177
178/** Gen6+ IF with embedded comparison. */
179vec4_instruction *
180vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition)
181{
182   assert(intel->gen >= 6);
183
184   vec4_instruction *inst;
185
186   resolve_ud_negate(&src0);
187   resolve_ud_negate(&src1);
188
189   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(),
190					src0, src1);
191   inst->conditional_mod = condition;
192
193   return inst;
194}
195
196/**
197 * CMP: Sets the low bit of the destination channels with the result
198 * of the comparison, while the upper bits are undefined, and updates
199 * the flag register with the packed 16 bits of the result.
200 */
201vec4_instruction *
202vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition)
203{
204   vec4_instruction *inst;
205
206   /* original gen4 does type conversion to the destination type
207    * before before comparison, producing garbage results for floating
208    * point comparisons.
209    */
210   if (intel->gen == 4) {
211      dst.type = src0.type;
212      if (dst.file == HW_REG)
213	 dst.fixed_hw_reg.type = dst.type;
214   }
215
216   resolve_ud_negate(&src0);
217   resolve_ud_negate(&src1);
218
219   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1);
220   inst->conditional_mod = condition;
221
222   return inst;
223}
224
225vec4_instruction *
226vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index)
227{
228   vec4_instruction *inst;
229
230   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ,
231					dst, index);
232   inst->base_mrf = 14;
233   inst->mlen = 1;
234
235   return inst;
236}
237
238vec4_instruction *
239vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index)
240{
241   vec4_instruction *inst;
242
243   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE,
244					dst, src, index);
245   inst->base_mrf = 13;
246   inst->mlen = 2;
247
248   return inst;
249}
250
251void
252vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements)
253{
254   static enum opcode dot_opcodes[] = {
255      BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4
256   };
257
258   emit(dot_opcodes[elements - 2], dst, src0, src1);
259}
260
261void
262vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src)
263{
264   /* The gen6 math instruction ignores the source modifiers --
265    * swizzle, abs, negate, and at least some parts of the register
266    * region description.
267    *
268    * While it would seem that this MOV could be avoided at this point
269    * in the case that the swizzle is matched up with the destination
270    * writemask, note that uniform packing and register allocation
271    * could rearrange our swizzle, so let's leave this matter up to
272    * copy propagation later.
273    */
274   src_reg temp_src = src_reg(this, glsl_type::vec4_type);
275   emit(MOV(dst_reg(temp_src), src));
276
277   if (dst.writemask != WRITEMASK_XYZW) {
278      /* The gen6 math instruction must be align1, so we can't do
279       * writemasks.
280       */
281      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
282
283      emit(opcode, temp_dst, temp_src);
284
285      emit(MOV(dst, src_reg(temp_dst)));
286   } else {
287      emit(opcode, dst, temp_src);
288   }
289}
290
291void
292vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src)
293{
294   vec4_instruction *inst = emit(opcode, dst, src);
295   inst->base_mrf = 1;
296   inst->mlen = 1;
297}
298
299void
300vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src)
301{
302   switch (opcode) {
303   case SHADER_OPCODE_RCP:
304   case SHADER_OPCODE_RSQ:
305   case SHADER_OPCODE_SQRT:
306   case SHADER_OPCODE_EXP2:
307   case SHADER_OPCODE_LOG2:
308   case SHADER_OPCODE_SIN:
309   case SHADER_OPCODE_COS:
310      break;
311   default:
312      assert(!"not reached: bad math opcode");
313      return;
314   }
315
316   if (intel->gen >= 7) {
317      emit(opcode, dst, src);
318   } else if (intel->gen == 6) {
319      return emit_math1_gen6(opcode, dst, src);
320   } else {
321      return emit_math1_gen4(opcode, dst, src);
322   }
323}
324
325void
326vec4_visitor::emit_math2_gen6(enum opcode opcode,
327			      dst_reg dst, src_reg src0, src_reg src1)
328{
329   src_reg expanded;
330
331   /* The gen6 math instruction ignores the source modifiers --
332    * swizzle, abs, negate, and at least some parts of the register
333    * region description.  Move the sources to temporaries to make it
334    * generally work.
335    */
336
337   expanded = src_reg(this, glsl_type::vec4_type);
338   expanded.type = src0.type;
339   emit(MOV(dst_reg(expanded), src0));
340   src0 = expanded;
341
342   expanded = src_reg(this, glsl_type::vec4_type);
343   expanded.type = src1.type;
344   emit(MOV(dst_reg(expanded), src1));
345   src1 = expanded;
346
347   if (dst.writemask != WRITEMASK_XYZW) {
348      /* The gen6 math instruction must be align1, so we can't do
349       * writemasks.
350       */
351      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
352      temp_dst.type = dst.type;
353
354      emit(opcode, temp_dst, src0, src1);
355
356      emit(MOV(dst, src_reg(temp_dst)));
357   } else {
358      emit(opcode, dst, src0, src1);
359   }
360}
361
362void
363vec4_visitor::emit_math2_gen4(enum opcode opcode,
364			      dst_reg dst, src_reg src0, src_reg src1)
365{
366   vec4_instruction *inst = emit(opcode, dst, src0, src1);
367   inst->base_mrf = 1;
368   inst->mlen = 2;
369}
370
371void
372vec4_visitor::emit_math(enum opcode opcode,
373			dst_reg dst, src_reg src0, src_reg src1)
374{
375   switch (opcode) {
376   case SHADER_OPCODE_POW:
377   case SHADER_OPCODE_INT_QUOTIENT:
378   case SHADER_OPCODE_INT_REMAINDER:
379      break;
380   default:
381      assert(!"not reached: unsupported binary math opcode");
382      return;
383   }
384
385   if (intel->gen >= 7) {
386      emit(opcode, dst, src0, src1);
387   } else if (intel->gen == 6) {
388      return emit_math2_gen6(opcode, dst, src0, src1);
389   } else {
390      return emit_math2_gen4(opcode, dst, src0, src1);
391   }
392}
393
394void
395vec4_visitor::visit_instructions(const exec_list *list)
396{
397   foreach_list(node, list) {
398      ir_instruction *ir = (ir_instruction *)node;
399
400      base_ir = ir;
401      ir->accept(this);
402   }
403}
404
405
406static int
407type_size(const struct glsl_type *type)
408{
409   unsigned int i;
410   int size;
411
412   switch (type->base_type) {
413   case GLSL_TYPE_UINT:
414   case GLSL_TYPE_INT:
415   case GLSL_TYPE_FLOAT:
416   case GLSL_TYPE_BOOL:
417      if (type->is_matrix()) {
418	 return type->matrix_columns;
419      } else {
420	 /* Regardless of size of vector, it gets a vec4. This is bad
421	  * packing for things like floats, but otherwise arrays become a
422	  * mess.  Hopefully a later pass over the code can pack scalars
423	  * down if appropriate.
424	  */
425	 return 1;
426      }
427   case GLSL_TYPE_ARRAY:
428      assert(type->length > 0);
429      return type_size(type->fields.array) * type->length;
430   case GLSL_TYPE_STRUCT:
431      size = 0;
432      for (i = 0; i < type->length; i++) {
433	 size += type_size(type->fields.structure[i].type);
434      }
435      return size;
436   case GLSL_TYPE_SAMPLER:
437      /* Samplers take up one slot in UNIFORMS[], but they're baked in
438       * at link time.
439       */
440      return 1;
441   default:
442      assert(0);
443      return 0;
444   }
445}
446
447int
448vec4_visitor::virtual_grf_alloc(int size)
449{
450   if (virtual_grf_array_size <= virtual_grf_count) {
451      if (virtual_grf_array_size == 0)
452	 virtual_grf_array_size = 16;
453      else
454	 virtual_grf_array_size *= 2;
455      virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
456				   virtual_grf_array_size);
457      virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int,
458				     virtual_grf_array_size);
459   }
460   virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count;
461   virtual_grf_reg_count += size;
462   virtual_grf_sizes[virtual_grf_count] = size;
463   return virtual_grf_count++;
464}
465
466src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type)
467{
468   init();
469
470   this->file = GRF;
471   this->reg = v->virtual_grf_alloc(type_size(type));
472
473   if (type->is_array() || type->is_record()) {
474      this->swizzle = BRW_SWIZZLE_NOOP;
475   } else {
476      this->swizzle = swizzle_for_size(type->vector_elements);
477   }
478
479   this->type = brw_type_for_base_type(type);
480}
481
482dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type)
483{
484   init();
485
486   this->file = GRF;
487   this->reg = v->virtual_grf_alloc(type_size(type));
488
489   if (type->is_array() || type->is_record()) {
490      this->writemask = WRITEMASK_XYZW;
491   } else {
492      this->writemask = (1 << type->vector_elements) - 1;
493   }
494
495   this->type = brw_type_for_base_type(type);
496}
497
498/* Our support for uniforms is piggy-backed on the struct
499 * gl_fragment_program, because that's where the values actually
500 * get stored, rather than in some global gl_shader_program uniform
501 * store.
502 */
503int
504vec4_visitor::setup_uniform_values(int loc, const glsl_type *type)
505{
506   unsigned int offset = 0;
507   float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f;
508
509   if (type->is_matrix()) {
510      const glsl_type *column = type->column_type();
511
512      for (unsigned int i = 0; i < type->matrix_columns; i++) {
513	 offset += setup_uniform_values(loc + offset, column);
514      }
515
516      return offset;
517   }
518
519   switch (type->base_type) {
520   case GLSL_TYPE_FLOAT:
521   case GLSL_TYPE_UINT:
522   case GLSL_TYPE_INT:
523   case GLSL_TYPE_BOOL:
524      for (unsigned int i = 0; i < type->vector_elements; i++) {
525	 c->prog_data.param[this->uniforms * 4 + i] = &values[i];
526      }
527
528      /* Set up pad elements to get things aligned to a vec4 boundary. */
529      for (unsigned int i = type->vector_elements; i < 4; i++) {
530	 static float zero = 0;
531
532	 c->prog_data.param[this->uniforms * 4 + i] = &zero;
533      }
534
535      /* Track the size of this uniform vector, for future packing of
536       * uniforms.
537       */
538      this->uniform_vector_size[this->uniforms] = type->vector_elements;
539      this->uniforms++;
540
541      return 1;
542
543   case GLSL_TYPE_STRUCT:
544      for (unsigned int i = 0; i < type->length; i++) {
545	 offset += setup_uniform_values(loc + offset,
546					type->fields.structure[i].type);
547      }
548      return offset;
549
550   case GLSL_TYPE_ARRAY:
551      for (unsigned int i = 0; i < type->length; i++) {
552	 offset += setup_uniform_values(loc + offset, type->fields.array);
553      }
554      return offset;
555
556   case GLSL_TYPE_SAMPLER:
557      /* The sampler takes up a slot, but we don't use any values from it. */
558      return 1;
559
560   default:
561      assert(!"not reached");
562      return 0;
563   }
564}
565
566void
567vec4_visitor::setup_uniform_clipplane_values()
568{
569   gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
570
571   /* Pre-Gen6, we compact clip planes.  For example, if the user
572    * enables just clip planes 0, 1, and 3, we will enable clip planes
573    * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip
574    * plane 2.  This simplifies the implementation of the Gen6 clip
575    * thread.
576    *
577    * In Gen6 and later, we don't compact clip planes, because this
578    * simplifies the implementation of gl_ClipDistance.
579    */
580   int compacted_clipplane_index = 0;
581   for (int i = 0; i < c->key.nr_userclip_plane_consts; ++i) {
582      if (intel->gen < 6 &&
583          !(c->key.userclip_planes_enabled_gen_4_5 & (1 << i))) {
584         continue;
585      }
586      this->uniform_vector_size[this->uniforms] = 4;
587      this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms);
588      this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F;
589      for (int j = 0; j < 4; ++j) {
590         c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j];
591      }
592      ++compacted_clipplane_index;
593      ++this->uniforms;
594   }
595}
596
597/* Our support for builtin uniforms is even scarier than non-builtin.
598 * It sits on top of the PROG_STATE_VAR parameters that are
599 * automatically updated from GL context state.
600 */
601void
602vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
603{
604   const ir_state_slot *const slots = ir->state_slots;
605   assert(ir->state_slots != NULL);
606
607   for (unsigned int i = 0; i < ir->num_state_slots; i++) {
608      /* This state reference has already been setup by ir_to_mesa,
609       * but we'll get the same index back here.  We can reference
610       * ParameterValues directly, since unlike brw_fs.cpp, we never
611       * add new state references during compile.
612       */
613      int index = _mesa_add_state_reference(this->vp->Base.Parameters,
614					    (gl_state_index *)slots[i].tokens);
615      float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f;
616
617      this->uniform_vector_size[this->uniforms] = 0;
618      /* Add each of the unique swizzled channels of the element.
619       * This will end up matching the size of the glsl_type of this field.
620       */
621      int last_swiz = -1;
622      for (unsigned int j = 0; j < 4; j++) {
623	 int swiz = GET_SWZ(slots[i].swizzle, j);
624	 last_swiz = swiz;
625
626	 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz];
627	 if (swiz <= last_swiz)
628	    this->uniform_vector_size[this->uniforms]++;
629      }
630      this->uniforms++;
631   }
632}
633
634dst_reg *
635vec4_visitor::variable_storage(ir_variable *var)
636{
637   return (dst_reg *)hash_table_find(this->variable_ht, var);
638}
639
640void
641vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
642{
643   ir_expression *expr = ir->as_expression();
644
645   *predicate = BRW_PREDICATE_NORMAL;
646
647   if (expr) {
648      src_reg op[2];
649      vec4_instruction *inst;
650
651      assert(expr->get_num_operands() <= 2);
652      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
653	 expr->operands[i]->accept(this);
654	 op[i] = this->result;
655
656	 resolve_ud_negate(&op[i]);
657      }
658
659      switch (expr->operation) {
660      case ir_unop_logic_not:
661	 inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
662	 inst->conditional_mod = BRW_CONDITIONAL_Z;
663	 break;
664
665      case ir_binop_logic_xor:
666	 inst = emit(XOR(dst_null_d(), op[0], op[1]));
667	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
668	 break;
669
670      case ir_binop_logic_or:
671	 inst = emit(OR(dst_null_d(), op[0], op[1]));
672	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
673	 break;
674
675      case ir_binop_logic_and:
676	 inst = emit(AND(dst_null_d(), op[0], op[1]));
677	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
678	 break;
679
680      case ir_unop_f2b:
681	 if (intel->gen >= 6) {
682	    emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
683	 } else {
684	    inst = emit(MOV(dst_null_f(), op[0]));
685	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
686	 }
687	 break;
688
689      case ir_unop_i2b:
690	 if (intel->gen >= 6) {
691	    emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
692	 } else {
693	    inst = emit(MOV(dst_null_d(), op[0]));
694	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
695	 }
696	 break;
697
698      case ir_binop_all_equal:
699	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
700	 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
701	 break;
702
703      case ir_binop_any_nequal:
704	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
705	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
706	 break;
707
708      case ir_unop_any:
709	 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
710	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
711	 break;
712
713      case ir_binop_greater:
714      case ir_binop_gequal:
715      case ir_binop_less:
716      case ir_binop_lequal:
717      case ir_binop_equal:
718      case ir_binop_nequal:
719	 emit(CMP(dst_null_d(), op[0], op[1],
720		  brw_conditional_for_comparison(expr->operation)));
721	 break;
722
723      default:
724	 assert(!"not reached");
725	 break;
726      }
727      return;
728   }
729
730   ir->accept(this);
731
732   resolve_ud_negate(&this->result);
733
734   if (intel->gen >= 6) {
735      vec4_instruction *inst = emit(AND(dst_null_d(),
736					this->result, src_reg(1)));
737      inst->conditional_mod = BRW_CONDITIONAL_NZ;
738   } else {
739      vec4_instruction *inst = emit(MOV(dst_null_d(), this->result));
740      inst->conditional_mod = BRW_CONDITIONAL_NZ;
741   }
742}
743
744/**
745 * Emit a gen6 IF statement with the comparison folded into the IF
746 * instruction.
747 */
748void
749vec4_visitor::emit_if_gen6(ir_if *ir)
750{
751   ir_expression *expr = ir->condition->as_expression();
752
753   if (expr) {
754      src_reg op[2];
755      dst_reg temp;
756
757      assert(expr->get_num_operands() <= 2);
758      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
759	 expr->operands[i]->accept(this);
760	 op[i] = this->result;
761      }
762
763      switch (expr->operation) {
764      case ir_unop_logic_not:
765	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
766	 return;
767
768      case ir_binop_logic_xor:
769	 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
770	 return;
771
772      case ir_binop_logic_or:
773	 temp = dst_reg(this, glsl_type::bool_type);
774	 emit(OR(temp, op[0], op[1]));
775	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
776	 return;
777
778      case ir_binop_logic_and:
779	 temp = dst_reg(this, glsl_type::bool_type);
780	 emit(AND(temp, op[0], op[1]));
781	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
782	 return;
783
784      case ir_unop_f2b:
785	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
786	 return;
787
788      case ir_unop_i2b:
789	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
790	 return;
791
792      case ir_binop_greater:
793      case ir_binop_gequal:
794      case ir_binop_less:
795      case ir_binop_lequal:
796      case ir_binop_equal:
797      case ir_binop_nequal:
798	 emit(IF(op[0], op[1],
799		 brw_conditional_for_comparison(expr->operation)));
800	 return;
801
802      case ir_binop_all_equal:
803	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
804	 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
805	 return;
806
807      case ir_binop_any_nequal:
808	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
809	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
810	 return;
811
812      case ir_unop_any:
813	 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
814	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
815	 return;
816
817      default:
818	 assert(!"not reached");
819	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
820	 return;
821      }
822      return;
823   }
824
825   ir->condition->accept(this);
826
827   emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
828}
829
830void
831vec4_visitor::visit(ir_variable *ir)
832{
833   dst_reg *reg = NULL;
834
835   if (variable_storage(ir))
836      return;
837
838   switch (ir->mode) {
839   case ir_var_in:
840      reg = new(mem_ctx) dst_reg(ATTR, ir->location);
841
842      /* Do GL_FIXED rescaling for GLES2.0.  Our GL_FIXED attributes
843       * come in as floating point conversions of the integer values.
844       */
845      for (int i = ir->location; i < ir->location + type_size(ir->type); i++) {
846	 if (!c->key.gl_fixed_input_size[i])
847	    continue;
848
849	 dst_reg dst = *reg;
850         dst.type = brw_type_for_base_type(ir->type);
851	 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1;
852	 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f)));
853      }
854      break;
855
856   case ir_var_out:
857      reg = new(mem_ctx) dst_reg(this, ir->type);
858
859      for (int i = 0; i < type_size(ir->type); i++) {
860	 output_reg[ir->location + i] = *reg;
861	 output_reg[ir->location + i].reg_offset = i;
862	 output_reg[ir->location + i].type =
863            brw_type_for_base_type(ir->type->get_scalar_type());
864	 output_reg_annotation[ir->location + i] = ir->name;
865      }
866      break;
867
868   case ir_var_auto:
869   case ir_var_temporary:
870      reg = new(mem_ctx) dst_reg(this, ir->type);
871      break;
872
873   case ir_var_uniform:
874      reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
875
876      /* Track how big the whole uniform variable is, in case we need to put a
877       * copy of its data into pull constants for array access.
878       */
879      this->uniform_size[this->uniforms] = type_size(ir->type);
880
881      if (!strncmp(ir->name, "gl_", 3)) {
882	 setup_builtin_uniform_values(ir);
883      } else {
884	 setup_uniform_values(ir->location, ir->type);
885      }
886      break;
887
888   case ir_var_system_value:
889      /* VertexID is stored by the VF as the last vertex element, but
890       * we don't represent it with a flag in inputs_read, so we call
891       * it VERT_ATTRIB_MAX, which setup_attributes() picks up on.
892       */
893      reg = new(mem_ctx) dst_reg(ATTR, VERT_ATTRIB_MAX);
894      prog_data->uses_vertexid = true;
895
896      switch (ir->location) {
897      case SYSTEM_VALUE_VERTEX_ID:
898	 reg->writemask = WRITEMASK_X;
899	 break;
900      case SYSTEM_VALUE_INSTANCE_ID:
901	 reg->writemask = WRITEMASK_Y;
902	 break;
903      default:
904	 assert(!"not reached");
905	 break;
906      }
907      break;
908
909   default:
910      assert(!"not reached");
911   }
912
913   reg->type = brw_type_for_base_type(ir->type);
914   hash_table_insert(this->variable_ht, reg, ir);
915}
916
917void
918vec4_visitor::visit(ir_loop *ir)
919{
920   dst_reg counter;
921
922   /* We don't want debugging output to print the whole body of the
923    * loop as the annotation.
924    */
925   this->base_ir = NULL;
926
927   if (ir->counter != NULL) {
928      this->base_ir = ir->counter;
929      ir->counter->accept(this);
930      counter = *(variable_storage(ir->counter));
931
932      if (ir->from != NULL) {
933	 this->base_ir = ir->from;
934	 ir->from->accept(this);
935
936	 emit(MOV(counter, this->result));
937      }
938   }
939
940   emit(BRW_OPCODE_DO);
941
942   if (ir->to) {
943      this->base_ir = ir->to;
944      ir->to->accept(this);
945
946      emit(CMP(dst_null_d(), src_reg(counter), this->result,
947	       brw_conditional_for_comparison(ir->cmp)));
948
949      vec4_instruction *inst = emit(BRW_OPCODE_BREAK);
950      inst->predicate = BRW_PREDICATE_NORMAL;
951   }
952
953   visit_instructions(&ir->body_instructions);
954
955
956   if (ir->increment) {
957      this->base_ir = ir->increment;
958      ir->increment->accept(this);
959      emit(ADD(counter, src_reg(counter), this->result));
960   }
961
962   emit(BRW_OPCODE_WHILE);
963}
964
965void
966vec4_visitor::visit(ir_loop_jump *ir)
967{
968   switch (ir->mode) {
969   case ir_loop_jump::jump_break:
970      emit(BRW_OPCODE_BREAK);
971      break;
972   case ir_loop_jump::jump_continue:
973      emit(BRW_OPCODE_CONTINUE);
974      break;
975   }
976}
977
978
979void
980vec4_visitor::visit(ir_function_signature *ir)
981{
982   assert(0);
983   (void)ir;
984}
985
986void
987vec4_visitor::visit(ir_function *ir)
988{
989   /* Ignore function bodies other than main() -- we shouldn't see calls to
990    * them since they should all be inlined.
991    */
992   if (strcmp(ir->name, "main") == 0) {
993      const ir_function_signature *sig;
994      exec_list empty;
995
996      sig = ir->matching_signature(&empty);
997
998      assert(sig);
999
1000      visit_instructions(&sig->body);
1001   }
1002}
1003
1004bool
1005vec4_visitor::try_emit_sat(ir_expression *ir)
1006{
1007   ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1008   if (!sat_src)
1009      return false;
1010
1011   sat_src->accept(this);
1012   src_reg src = this->result;
1013
1014   this->result = src_reg(this, ir->type);
1015   vec4_instruction *inst;
1016   inst = emit(MOV(dst_reg(this->result), src));
1017   inst->saturate = true;
1018
1019   return true;
1020}
1021
1022void
1023vec4_visitor::emit_bool_comparison(unsigned int op,
1024				 dst_reg dst, src_reg src0, src_reg src1)
1025{
1026   /* original gen4 does destination conversion before comparison. */
1027   if (intel->gen < 5)
1028      dst.type = src0.type;
1029
1030   emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
1031
1032   dst.type = BRW_REGISTER_TYPE_D;
1033   emit(AND(dst, src_reg(dst), src_reg(0x1)));
1034}
1035
1036void
1037vec4_visitor::visit(ir_expression *ir)
1038{
1039   unsigned int operand;
1040   src_reg op[Elements(ir->operands)];
1041   src_reg result_src;
1042   dst_reg result_dst;
1043   vec4_instruction *inst;
1044
1045   if (try_emit_sat(ir))
1046      return;
1047
1048   for (operand = 0; operand < ir->get_num_operands(); operand++) {
1049      this->result.file = BAD_FILE;
1050      ir->operands[operand]->accept(this);
1051      if (this->result.file == BAD_FILE) {
1052	 printf("Failed to get tree for expression operand:\n");
1053	 ir->operands[operand]->print();
1054	 exit(1);
1055      }
1056      op[operand] = this->result;
1057
1058      /* Matrix expression operands should have been broken down to vector
1059       * operations already.
1060       */
1061      assert(!ir->operands[operand]->type->is_matrix());
1062   }
1063
1064   int vector_elements = ir->operands[0]->type->vector_elements;
1065   if (ir->operands[1]) {
1066      vector_elements = MAX2(vector_elements,
1067			     ir->operands[1]->type->vector_elements);
1068   }
1069
1070   this->result.file = BAD_FILE;
1071
1072   /* Storage for our result.  Ideally for an assignment we'd be using
1073    * the actual storage for the result here, instead.
1074    */
1075   result_src = src_reg(this, ir->type);
1076   /* convenience for the emit functions below. */
1077   result_dst = dst_reg(result_src);
1078   /* If nothing special happens, this is the result. */
1079   this->result = result_src;
1080   /* Limit writes to the channels that will be used by result_src later.
1081    * This does limit this temp's use as a temporary for multi-instruction
1082    * sequences.
1083    */
1084   result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1085
1086   switch (ir->operation) {
1087   case ir_unop_logic_not:
1088      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
1089       * ones complement of the whole register, not just bit 0.
1090       */
1091      emit(XOR(result_dst, op[0], src_reg(1)));
1092      break;
1093   case ir_unop_neg:
1094      op[0].negate = !op[0].negate;
1095      this->result = op[0];
1096      break;
1097   case ir_unop_abs:
1098      op[0].abs = true;
1099      op[0].negate = false;
1100      this->result = op[0];
1101      break;
1102
1103   case ir_unop_sign:
1104      emit(MOV(result_dst, src_reg(0.0f)));
1105
1106      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G));
1107      inst = emit(MOV(result_dst, src_reg(1.0f)));
1108      inst->predicate = BRW_PREDICATE_NORMAL;
1109
1110      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L));
1111      inst = emit(MOV(result_dst, src_reg(-1.0f)));
1112      inst->predicate = BRW_PREDICATE_NORMAL;
1113
1114      break;
1115
1116   case ir_unop_rcp:
1117      emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
1118      break;
1119
1120   case ir_unop_exp2:
1121      emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
1122      break;
1123   case ir_unop_log2:
1124      emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
1125      break;
1126   case ir_unop_exp:
1127   case ir_unop_log:
1128      assert(!"not reached: should be handled by ir_explog_to_explog2");
1129      break;
1130   case ir_unop_sin:
1131   case ir_unop_sin_reduced:
1132      emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
1133      break;
1134   case ir_unop_cos:
1135   case ir_unop_cos_reduced:
1136      emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
1137      break;
1138
1139   case ir_unop_dFdx:
1140   case ir_unop_dFdy:
1141      assert(!"derivatives not valid in vertex shader");
1142      break;
1143
1144   case ir_unop_noise:
1145      assert(!"not reached: should be handled by lower_noise");
1146      break;
1147
1148   case ir_binop_add:
1149      emit(ADD(result_dst, op[0], op[1]));
1150      break;
1151   case ir_binop_sub:
1152      assert(!"not reached: should be handled by ir_sub_to_add_neg");
1153      break;
1154
1155   case ir_binop_mul:
1156      if (ir->type->is_integer()) {
1157	 /* For integer multiplication, the MUL uses the low 16 bits
1158	  * of one of the operands (src0 on gen6, src1 on gen7).  The
1159	  * MACH accumulates in the contribution of the upper 16 bits
1160	  * of that operand.
1161	  *
1162	  * FINISHME: Emit just the MUL if we know an operand is small
1163	  * enough.
1164	  */
1165	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
1166
1167	 emit(MUL(acc, op[0], op[1]));
1168	 emit(MACH(dst_null_d(), op[0], op[1]));
1169	 emit(MOV(result_dst, src_reg(acc)));
1170      } else {
1171	 emit(MUL(result_dst, op[0], op[1]));
1172      }
1173      break;
1174   case ir_binop_div:
1175      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
1176      assert(ir->type->is_integer());
1177      emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
1178      break;
1179   case ir_binop_mod:
1180      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
1181      assert(ir->type->is_integer());
1182      emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
1183      break;
1184
1185   case ir_binop_less:
1186   case ir_binop_greater:
1187   case ir_binop_lequal:
1188   case ir_binop_gequal:
1189   case ir_binop_equal:
1190   case ir_binop_nequal: {
1191      emit(CMP(result_dst, op[0], op[1],
1192	       brw_conditional_for_comparison(ir->operation)));
1193      emit(AND(result_dst, result_src, src_reg(0x1)));
1194      break;
1195   }
1196
1197   case ir_binop_all_equal:
1198      /* "==" operator producing a scalar boolean. */
1199      if (ir->operands[0]->type->is_vector() ||
1200	  ir->operands[1]->type->is_vector()) {
1201	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
1202	 emit(MOV(result_dst, src_reg(0)));
1203	 inst = emit(MOV(result_dst, src_reg(1)));
1204	 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1205      } else {
1206	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
1207	 emit(AND(result_dst, result_src, src_reg(0x1)));
1208      }
1209      break;
1210   case ir_binop_any_nequal:
1211      /* "!=" operator producing a scalar boolean. */
1212      if (ir->operands[0]->type->is_vector() ||
1213	  ir->operands[1]->type->is_vector()) {
1214	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
1215
1216	 emit(MOV(result_dst, src_reg(0)));
1217	 inst = emit(MOV(result_dst, src_reg(1)));
1218	 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1219      } else {
1220	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
1221	 emit(AND(result_dst, result_src, src_reg(0x1)));
1222      }
1223      break;
1224
1225   case ir_unop_any:
1226      emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1227      emit(MOV(result_dst, src_reg(0)));
1228
1229      inst = emit(MOV(result_dst, src_reg(1)));
1230      inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1231      break;
1232
1233   case ir_binop_logic_xor:
1234      emit(XOR(result_dst, op[0], op[1]));
1235      break;
1236
1237   case ir_binop_logic_or:
1238      emit(OR(result_dst, op[0], op[1]));
1239      break;
1240
1241   case ir_binop_logic_and:
1242      emit(AND(result_dst, op[0], op[1]));
1243      break;
1244
1245   case ir_binop_dot:
1246      assert(ir->operands[0]->type->is_vector());
1247      assert(ir->operands[0]->type == ir->operands[1]->type);
1248      emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
1249      break;
1250
1251   case ir_unop_sqrt:
1252      emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
1253      break;
1254   case ir_unop_rsq:
1255      emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
1256      break;
1257   case ir_unop_i2f:
1258   case ir_unop_i2u:
1259   case ir_unop_u2i:
1260   case ir_unop_u2f:
1261   case ir_unop_b2f:
1262   case ir_unop_b2i:
1263   case ir_unop_f2i:
1264      emit(MOV(result_dst, op[0]));
1265      break;
1266   case ir_unop_f2b:
1267   case ir_unop_i2b: {
1268      emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1269      emit(AND(result_dst, result_src, src_reg(1)));
1270      break;
1271   }
1272
1273   case ir_unop_trunc:
1274      emit(RNDZ(result_dst, op[0]));
1275      break;
1276   case ir_unop_ceil:
1277      op[0].negate = !op[0].negate;
1278      inst = emit(RNDD(result_dst, op[0]));
1279      this->result.negate = true;
1280      break;
1281   case ir_unop_floor:
1282      inst = emit(RNDD(result_dst, op[0]));
1283      break;
1284   case ir_unop_fract:
1285      inst = emit(FRC(result_dst, op[0]));
1286      break;
1287   case ir_unop_round_even:
1288      emit(RNDE(result_dst, op[0]));
1289      break;
1290
1291   case ir_binop_min:
1292      if (intel->gen >= 6) {
1293	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1294	 inst->conditional_mod = BRW_CONDITIONAL_L;
1295      } else {
1296	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L));
1297
1298	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1299	 inst->predicate = BRW_PREDICATE_NORMAL;
1300      }
1301      break;
1302   case ir_binop_max:
1303      if (intel->gen >= 6) {
1304	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1305	 inst->conditional_mod = BRW_CONDITIONAL_G;
1306      } else {
1307	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G));
1308
1309	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1310	 inst->predicate = BRW_PREDICATE_NORMAL;
1311      }
1312      break;
1313
1314   case ir_binop_pow:
1315      emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
1316      break;
1317
1318   case ir_unop_bit_not:
1319      inst = emit(NOT(result_dst, op[0]));
1320      break;
1321   case ir_binop_bit_and:
1322      inst = emit(AND(result_dst, op[0], op[1]));
1323      break;
1324   case ir_binop_bit_xor:
1325      inst = emit(XOR(result_dst, op[0], op[1]));
1326      break;
1327   case ir_binop_bit_or:
1328      inst = emit(OR(result_dst, op[0], op[1]));
1329      break;
1330
1331   case ir_binop_lshift:
1332      inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]);
1333      break;
1334
1335   case ir_binop_rshift:
1336      if (ir->type->base_type == GLSL_TYPE_INT)
1337	 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]);
1338      else
1339	 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]);
1340      break;
1341
1342   case ir_quadop_vector:
1343      assert(!"not reached: should be handled by lower_quadop_vector");
1344      break;
1345   }
1346}
1347
1348
1349void
1350vec4_visitor::visit(ir_swizzle *ir)
1351{
1352   src_reg src;
1353   int i = 0;
1354   int swizzle[4];
1355
1356   /* Note that this is only swizzles in expressions, not those on the left
1357    * hand side of an assignment, which do write masking.  See ir_assignment
1358    * for that.
1359    */
1360
1361   ir->val->accept(this);
1362   src = this->result;
1363   assert(src.file != BAD_FILE);
1364
1365   for (i = 0; i < ir->type->vector_elements; i++) {
1366      switch (i) {
1367      case 0:
1368	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x);
1369	 break;
1370      case 1:
1371	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y);
1372	 break;
1373      case 2:
1374	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z);
1375	 break;
1376      case 3:
1377	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w);
1378	    break;
1379      }
1380   }
1381   for (; i < 4; i++) {
1382      /* Replicate the last channel out. */
1383      swizzle[i] = swizzle[ir->type->vector_elements - 1];
1384   }
1385
1386   src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1387
1388   this->result = src;
1389}
1390
1391void
1392vec4_visitor::visit(ir_dereference_variable *ir)
1393{
1394   const struct glsl_type *type = ir->type;
1395   dst_reg *reg = variable_storage(ir->var);
1396
1397   if (!reg) {
1398      fail("Failed to find variable storage for %s\n", ir->var->name);
1399      this->result = src_reg(brw_null_reg());
1400      return;
1401   }
1402
1403   this->result = src_reg(*reg);
1404
1405   /* System values get their swizzle from the dst_reg writemask */
1406   if (ir->var->mode == ir_var_system_value)
1407      return;
1408
1409   if (type->is_scalar() || type->is_vector() || type->is_matrix())
1410      this->result.swizzle = swizzle_for_size(type->vector_elements);
1411}
1412
1413void
1414vec4_visitor::visit(ir_dereference_array *ir)
1415{
1416   ir_constant *constant_index;
1417   src_reg src;
1418   int element_size = type_size(ir->type);
1419
1420   constant_index = ir->array_index->constant_expression_value();
1421
1422   ir->array->accept(this);
1423   src = this->result;
1424
1425   if (constant_index) {
1426      src.reg_offset += constant_index->value.i[0] * element_size;
1427   } else {
1428      /* Variable index array dereference.  It eats the "vec4" of the
1429       * base of the array and an index that offsets the Mesa register
1430       * index.
1431       */
1432      ir->array_index->accept(this);
1433
1434      src_reg index_reg;
1435
1436      if (element_size == 1) {
1437	 index_reg = this->result;
1438      } else {
1439	 index_reg = src_reg(this, glsl_type::int_type);
1440
1441	 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size)));
1442      }
1443
1444      if (src.reladdr) {
1445	 src_reg temp = src_reg(this, glsl_type::int_type);
1446
1447	 emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
1448
1449	 index_reg = temp;
1450      }
1451
1452      src.reladdr = ralloc(mem_ctx, src_reg);
1453      memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1454   }
1455
1456   /* If the type is smaller than a vec4, replicate the last channel out. */
1457   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1458      src.swizzle = swizzle_for_size(ir->type->vector_elements);
1459   else
1460      src.swizzle = BRW_SWIZZLE_NOOP;
1461   src.type = brw_type_for_base_type(ir->type);
1462
1463   this->result = src;
1464}
1465
1466void
1467vec4_visitor::visit(ir_dereference_record *ir)
1468{
1469   unsigned int i;
1470   const glsl_type *struct_type = ir->record->type;
1471   int offset = 0;
1472
1473   ir->record->accept(this);
1474
1475   for (i = 0; i < struct_type->length; i++) {
1476      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1477	 break;
1478      offset += type_size(struct_type->fields.structure[i].type);
1479   }
1480
1481   /* If the type is smaller than a vec4, replicate the last channel out. */
1482   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1483      this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1484   else
1485      this->result.swizzle = BRW_SWIZZLE_NOOP;
1486   this->result.type = brw_type_for_base_type(ir->type);
1487
1488   this->result.reg_offset += offset;
1489}
1490
1491/**
1492 * We want to be careful in assignment setup to hit the actual storage
1493 * instead of potentially using a temporary like we might with the
1494 * ir_dereference handler.
1495 */
1496static dst_reg
1497get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
1498{
1499   /* The LHS must be a dereference.  If the LHS is a variable indexed array
1500    * access of a vector, it must be separated into a series conditional moves
1501    * before reaching this point (see ir_vec_index_to_cond_assign).
1502    */
1503   assert(ir->as_dereference());
1504   ir_dereference_array *deref_array = ir->as_dereference_array();
1505   if (deref_array) {
1506      assert(!deref_array->array->type->is_vector());
1507   }
1508
1509   /* Use the rvalue deref handler for the most part.  We'll ignore
1510    * swizzles in it and write swizzles using writemask, though.
1511    */
1512   ir->accept(v);
1513   return dst_reg(v->result);
1514}
1515
1516void
1517vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
1518			      const struct glsl_type *type, uint32_t predicate)
1519{
1520   if (type->base_type == GLSL_TYPE_STRUCT) {
1521      for (unsigned int i = 0; i < type->length; i++) {
1522	 emit_block_move(dst, src, type->fields.structure[i].type, predicate);
1523      }
1524      return;
1525   }
1526
1527   if (type->is_array()) {
1528      for (unsigned int i = 0; i < type->length; i++) {
1529	 emit_block_move(dst, src, type->fields.array, predicate);
1530      }
1531      return;
1532   }
1533
1534   if (type->is_matrix()) {
1535      const struct glsl_type *vec_type;
1536
1537      vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
1538					 type->vector_elements, 1);
1539
1540      for (int i = 0; i < type->matrix_columns; i++) {
1541	 emit_block_move(dst, src, vec_type, predicate);
1542      }
1543      return;
1544   }
1545
1546   assert(type->is_scalar() || type->is_vector());
1547
1548   dst->type = brw_type_for_base_type(type);
1549   src->type = dst->type;
1550
1551   dst->writemask = (1 << type->vector_elements) - 1;
1552
1553   src->swizzle = swizzle_for_size(type->vector_elements);
1554
1555   vec4_instruction *inst = emit(MOV(*dst, *src));
1556   inst->predicate = predicate;
1557
1558   dst->reg_offset++;
1559   src->reg_offset++;
1560}
1561
1562
1563/* If the RHS processing resulted in an instruction generating a
1564 * temporary value, and it would be easy to rewrite the instruction to
1565 * generate its result right into the LHS instead, do so.  This ends
1566 * up reliably removing instructions where it can be tricky to do so
1567 * later without real UD chain information.
1568 */
1569bool
1570vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
1571				     dst_reg dst,
1572				     src_reg src,
1573				     vec4_instruction *pre_rhs_inst,
1574				     vec4_instruction *last_rhs_inst)
1575{
1576   /* This could be supported, but it would take more smarts. */
1577   if (ir->condition)
1578      return false;
1579
1580   if (pre_rhs_inst == last_rhs_inst)
1581      return false; /* No instructions generated to work with. */
1582
1583   /* Make sure the last instruction generated our source reg. */
1584   if (src.file != GRF ||
1585       src.file != last_rhs_inst->dst.file ||
1586       src.reg != last_rhs_inst->dst.reg ||
1587       src.reg_offset != last_rhs_inst->dst.reg_offset ||
1588       src.reladdr ||
1589       src.abs ||
1590       src.negate ||
1591       last_rhs_inst->predicate != BRW_PREDICATE_NONE)
1592      return false;
1593
1594   /* Check that that last instruction fully initialized the channels
1595    * we want to use, in the order we want to use them.  We could
1596    * potentially reswizzle the operands of many instructions so that
1597    * we could handle out of order channels, but don't yet.
1598    */
1599
1600   for (unsigned i = 0; i < 4; i++) {
1601      if (dst.writemask & (1 << i)) {
1602	 if (!(last_rhs_inst->dst.writemask & (1 << i)))
1603	    return false;
1604
1605	 if (BRW_GET_SWZ(src.swizzle, i) != i)
1606	    return false;
1607      }
1608   }
1609
1610   /* Success!  Rewrite the instruction. */
1611   last_rhs_inst->dst.file = dst.file;
1612   last_rhs_inst->dst.reg = dst.reg;
1613   last_rhs_inst->dst.reg_offset = dst.reg_offset;
1614   last_rhs_inst->dst.reladdr = dst.reladdr;
1615   last_rhs_inst->dst.writemask &= dst.writemask;
1616
1617   return true;
1618}
1619
1620void
1621vec4_visitor::visit(ir_assignment *ir)
1622{
1623   dst_reg dst = get_assignment_lhs(ir->lhs, this);
1624   uint32_t predicate = BRW_PREDICATE_NONE;
1625
1626   if (!ir->lhs->type->is_scalar() &&
1627       !ir->lhs->type->is_vector()) {
1628      ir->rhs->accept(this);
1629      src_reg src = this->result;
1630
1631      if (ir->condition) {
1632	 emit_bool_to_cond_code(ir->condition, &predicate);
1633      }
1634
1635      /* emit_block_move doesn't account for swizzles in the source register.
1636       * This should be ok, since the source register is a structure or an
1637       * array, and those can't be swizzled.  But double-check to be sure.
1638       */
1639      assert(src.swizzle ==
1640             (ir->rhs->type->is_matrix()
1641              ? swizzle_for_size(ir->rhs->type->vector_elements)
1642              : BRW_SWIZZLE_NOOP));
1643
1644      emit_block_move(&dst, &src, ir->rhs->type, predicate);
1645      return;
1646   }
1647
1648   /* Now we're down to just a scalar/vector with writemasks. */
1649   int i;
1650
1651   vec4_instruction *pre_rhs_inst, *last_rhs_inst;
1652   pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1653
1654   ir->rhs->accept(this);
1655
1656   last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1657
1658   src_reg src = this->result;
1659
1660   int swizzles[4];
1661   int first_enabled_chan = 0;
1662   int src_chan = 0;
1663
1664   assert(ir->lhs->type->is_vector() ||
1665	  ir->lhs->type->is_scalar());
1666   dst.writemask = ir->write_mask;
1667
1668   for (int i = 0; i < 4; i++) {
1669      if (dst.writemask & (1 << i)) {
1670	 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i);
1671	 break;
1672      }
1673   }
1674
1675   /* Swizzle a small RHS vector into the channels being written.
1676    *
1677    * glsl ir treats write_mask as dictating how many channels are
1678    * present on the RHS while in our instructions we need to make
1679    * those channels appear in the slots of the vec4 they're written to.
1680    */
1681   for (int i = 0; i < 4; i++) {
1682      if (dst.writemask & (1 << i))
1683	 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++);
1684      else
1685	 swizzles[i] = first_enabled_chan;
1686   }
1687   src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
1688			      swizzles[2], swizzles[3]);
1689
1690   if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
1691      return;
1692   }
1693
1694   if (ir->condition) {
1695      emit_bool_to_cond_code(ir->condition, &predicate);
1696   }
1697
1698   for (i = 0; i < type_size(ir->lhs->type); i++) {
1699      vec4_instruction *inst = emit(MOV(dst, src));
1700      inst->predicate = predicate;
1701
1702      dst.reg_offset++;
1703      src.reg_offset++;
1704   }
1705}
1706
1707void
1708vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
1709{
1710   if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1711      foreach_list(node, &ir->components) {
1712	 ir_constant *field_value = (ir_constant *)node;
1713
1714	 emit_constant_values(dst, field_value);
1715      }
1716      return;
1717   }
1718
1719   if (ir->type->is_array()) {
1720      for (unsigned int i = 0; i < ir->type->length; i++) {
1721	 emit_constant_values(dst, ir->array_elements[i]);
1722      }
1723      return;
1724   }
1725
1726   if (ir->type->is_matrix()) {
1727      for (int i = 0; i < ir->type->matrix_columns; i++) {
1728	 float *vec = &ir->value.f[i * ir->type->vector_elements];
1729
1730	 for (int j = 0; j < ir->type->vector_elements; j++) {
1731	    dst->writemask = 1 << j;
1732	    dst->type = BRW_REGISTER_TYPE_F;
1733
1734	    emit(MOV(*dst, src_reg(vec[j])));
1735	 }
1736	 dst->reg_offset++;
1737      }
1738      return;
1739   }
1740
1741   int remaining_writemask = (1 << ir->type->vector_elements) - 1;
1742
1743   for (int i = 0; i < ir->type->vector_elements; i++) {
1744      if (!(remaining_writemask & (1 << i)))
1745	 continue;
1746
1747      dst->writemask = 1 << i;
1748      dst->type = brw_type_for_base_type(ir->type);
1749
1750      /* Find other components that match the one we're about to
1751       * write.  Emits fewer instructions for things like vec4(0.5,
1752       * 1.5, 1.5, 1.5).
1753       */
1754      for (int j = i + 1; j < ir->type->vector_elements; j++) {
1755	 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1756	    if (ir->value.b[i] == ir->value.b[j])
1757	       dst->writemask |= (1 << j);
1758	 } else {
1759	    /* u, i, and f storage all line up, so no need for a
1760	     * switch case for comparing each type.
1761	     */
1762	    if (ir->value.u[i] == ir->value.u[j])
1763	       dst->writemask |= (1 << j);
1764	 }
1765      }
1766
1767      switch (ir->type->base_type) {
1768      case GLSL_TYPE_FLOAT:
1769	 emit(MOV(*dst, src_reg(ir->value.f[i])));
1770	 break;
1771      case GLSL_TYPE_INT:
1772	 emit(MOV(*dst, src_reg(ir->value.i[i])));
1773	 break;
1774      case GLSL_TYPE_UINT:
1775	 emit(MOV(*dst, src_reg(ir->value.u[i])));
1776	 break;
1777      case GLSL_TYPE_BOOL:
1778	 emit(MOV(*dst, src_reg(ir->value.b[i])));
1779	 break;
1780      default:
1781	 assert(!"Non-float/uint/int/bool constant");
1782	 break;
1783      }
1784
1785      remaining_writemask &= ~dst->writemask;
1786   }
1787   dst->reg_offset++;
1788}
1789
1790void
1791vec4_visitor::visit(ir_constant *ir)
1792{
1793   dst_reg dst = dst_reg(this, ir->type);
1794   this->result = src_reg(dst);
1795
1796   emit_constant_values(&dst, ir);
1797}
1798
1799void
1800vec4_visitor::visit(ir_call *ir)
1801{
1802   assert(!"not reached");
1803}
1804
1805void
1806vec4_visitor::visit(ir_texture *ir)
1807{
1808   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &vp->Base);
1809   sampler = vp->Base.SamplerUnits[sampler];
1810
1811   /* Should be lowered by do_lower_texture_projection */
1812   assert(!ir->projector);
1813
1814   vec4_instruction *inst = NULL;
1815   switch (ir->op) {
1816   case ir_tex:
1817   case ir_txl:
1818      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXL);
1819      break;
1820   case ir_txd:
1821      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXD);
1822      break;
1823   case ir_txf:
1824      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF);
1825      break;
1826   case ir_txs:
1827      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
1828      break;
1829   case ir_txb:
1830      assert(!"TXB is not valid for vertex shaders.");
1831   }
1832
1833   /* Texel offsets go in the message header; Gen4 also requires headers. */
1834   inst->header_present = ir->offset || intel->gen < 5;
1835   inst->base_mrf = 2;
1836   inst->mlen = inst->header_present + 1; /* always at least one */
1837   inst->sampler = sampler;
1838   inst->dst = dst_reg(this, ir->type);
1839   inst->shadow_compare = ir->shadow_comparitor != NULL;
1840
1841   if (ir->offset != NULL && ir->op != ir_txf)
1842      inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
1843
1844   /* MRF for the first parameter */
1845   int param_base = inst->base_mrf + inst->header_present;
1846
1847   if (ir->op == ir_txs) {
1848      ir->lod_info.lod->accept(this);
1849      int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
1850      emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, writemask),
1851	   this->result));
1852   } else {
1853      int i, coord_mask = 0, zero_mask = 0;
1854      /* Load the coordinate */
1855      /* FINISHME: gl_clamp_mask and saturate */
1856      for (i = 0; i < ir->coordinate->type->vector_elements; i++)
1857	 coord_mask |= (1 << i);
1858      for (; i < 4; i++)
1859	 zero_mask |= (1 << i);
1860
1861      ir->coordinate->accept(this);
1862      if (ir->offset && ir->op == ir_txf) {
1863	 /* It appears that the ld instruction used for txf does its
1864	  * address bounds check before adding in the offset.  To work
1865	  * around this, just add the integer offset to the integer
1866	  * texel coordinate, and don't put the offset in the header.
1867	  */
1868	 ir_constant *offset = ir->offset->as_constant();
1869	 assert(offset);
1870
1871	 for (int j = 0; j < ir->coordinate->type->vector_elements; j++) {
1872	    src_reg src = this->result;
1873	    src.swizzle = BRW_SWIZZLE4(BRW_GET_SWZ(src.swizzle, j),
1874				       BRW_GET_SWZ(src.swizzle, j),
1875				       BRW_GET_SWZ(src.swizzle, j),
1876				       BRW_GET_SWZ(src.swizzle, j));
1877	    emit(ADD(dst_reg(MRF, param_base, ir->coordinate->type, 1 << j),
1878		     src, offset->value.i[j]));
1879	 }
1880      } else {
1881	 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
1882		  this->result));
1883      }
1884      emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
1885	       src_reg(0)));
1886      /* Load the shadow comparitor */
1887      if (ir->shadow_comparitor) {
1888	 ir->shadow_comparitor->accept(this);
1889	 emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
1890			  WRITEMASK_X),
1891		  this->result));
1892	 inst->mlen++;
1893      }
1894
1895      /* Load the LOD info */
1896      if (ir->op == ir_txl) {
1897	 int mrf, writemask;
1898	 if (intel->gen >= 5) {
1899	    mrf = param_base + 1;
1900	    if (ir->shadow_comparitor) {
1901	       writemask = WRITEMASK_Y;
1902	       /* mlen already incremented */
1903	    } else {
1904	       writemask = WRITEMASK_X;
1905	       inst->mlen++;
1906	    }
1907	 } else /* intel->gen == 4 */ {
1908	    mrf = param_base;
1909	    writemask = WRITEMASK_Z;
1910	 }
1911	 ir->lod_info.lod->accept(this);
1912	 emit(MOV(dst_reg(MRF, mrf, ir->lod_info.lod->type, writemask),
1913		  this->result));
1914      } else if (ir->op == ir_txf) {
1915	 ir->lod_info.lod->accept(this);
1916	 emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, WRITEMASK_W),
1917		  this->result));
1918      } else if (ir->op == ir_txd) {
1919	 const glsl_type *type = ir->lod_info.grad.dPdx->type;
1920
1921	 ir->lod_info.grad.dPdx->accept(this);
1922	 src_reg dPdx = this->result;
1923	 ir->lod_info.grad.dPdy->accept(this);
1924	 src_reg dPdy = this->result;
1925
1926	 if (intel->gen >= 5) {
1927	    dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1928	    dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1929	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
1930	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
1931	    inst->mlen++;
1932
1933	    if (ir->type->vector_elements == 3) {
1934	       dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
1935	       dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
1936	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
1937	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
1938	       inst->mlen++;
1939	    }
1940	 } else /* intel->gen == 4 */ {
1941	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
1942	    emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
1943	    inst->mlen += 2;
1944	 }
1945      }
1946   }
1947
1948   emit(inst);
1949
1950   swizzle_result(ir, src_reg(inst->dst), sampler);
1951}
1952
1953void
1954vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
1955{
1956   this->result = orig_val;
1957
1958   int s = c->key.tex.swizzles[sampler];
1959
1960   if (ir->op == ir_txs || ir->type == glsl_type::float_type
1961			|| s == SWIZZLE_NOOP)
1962      return;
1963
1964   int zero_mask = 0, one_mask = 0, copy_mask = 0;
1965   int swizzle[4];
1966
1967   for (int i = 0; i < 4; i++) {
1968      switch (GET_SWZ(s, i)) {
1969      case SWIZZLE_ZERO:
1970	 zero_mask |= (1 << i);
1971	 break;
1972      case SWIZZLE_ONE:
1973	 one_mask |= (1 << i);
1974	 break;
1975      default:
1976	 copy_mask |= (1 << i);
1977	 swizzle[i] = GET_SWZ(s, i);
1978	 break;
1979      }
1980   }
1981
1982   this->result = src_reg(this, ir->type);
1983   dst_reg swizzled_result(this->result);
1984
1985   if (copy_mask) {
1986      orig_val.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1987      swizzled_result.writemask = copy_mask;
1988      emit(MOV(swizzled_result, orig_val));
1989   }
1990
1991   if (zero_mask) {
1992      swizzled_result.writemask = zero_mask;
1993      emit(MOV(swizzled_result, src_reg(0.0f)));
1994   }
1995
1996   if (one_mask) {
1997      swizzled_result.writemask = one_mask;
1998      emit(MOV(swizzled_result, src_reg(1.0f)));
1999   }
2000}
2001
2002void
2003vec4_visitor::visit(ir_return *ir)
2004{
2005   assert(!"not reached");
2006}
2007
2008void
2009vec4_visitor::visit(ir_discard *ir)
2010{
2011   assert(!"not reached");
2012}
2013
2014void
2015vec4_visitor::visit(ir_if *ir)
2016{
2017   /* Don't point the annotation at the if statement, because then it plus
2018    * the then and else blocks get printed.
2019    */
2020   this->base_ir = ir->condition;
2021
2022   if (intel->gen == 6) {
2023      emit_if_gen6(ir);
2024   } else {
2025      uint32_t predicate;
2026      emit_bool_to_cond_code(ir->condition, &predicate);
2027      emit(IF(predicate));
2028   }
2029
2030   visit_instructions(&ir->then_instructions);
2031
2032   if (!ir->else_instructions.is_empty()) {
2033      this->base_ir = ir->condition;
2034      emit(BRW_OPCODE_ELSE);
2035
2036      visit_instructions(&ir->else_instructions);
2037   }
2038
2039   this->base_ir = ir->condition;
2040   emit(BRW_OPCODE_ENDIF);
2041}
2042
2043void
2044vec4_visitor::emit_ndc_computation()
2045{
2046   /* Get the position */
2047   src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]);
2048
2049   /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */
2050   dst_reg ndc = dst_reg(this, glsl_type::vec4_type);
2051   output_reg[BRW_VERT_RESULT_NDC] = ndc;
2052
2053   current_annotation = "NDC";
2054   dst_reg ndc_w = ndc;
2055   ndc_w.writemask = WRITEMASK_W;
2056   src_reg pos_w = pos;
2057   pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
2058   emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w);
2059
2060   dst_reg ndc_xyz = ndc;
2061   ndc_xyz.writemask = WRITEMASK_XYZ;
2062
2063   emit(MUL(ndc_xyz, pos, src_reg(ndc_w)));
2064}
2065
2066void
2067vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
2068{
2069   if (intel->gen < 6 &&
2070       ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
2071        c->key.userclip_active || brw->has_negative_rhw_bug)) {
2072      dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
2073      dst_reg header1_w = header1;
2074      header1_w.writemask = WRITEMASK_W;
2075      GLuint i;
2076
2077      emit(MOV(header1, 0u));
2078
2079      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2080	 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]);
2081
2082	 current_annotation = "Point size";
2083	 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11))));
2084	 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8));
2085      }
2086
2087      current_annotation = "Clipping flags";
2088      for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
2089	 vec4_instruction *inst;
2090
2091	 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]),
2092                         src_reg(this->userplane[i])));
2093	 inst->conditional_mod = BRW_CONDITIONAL_L;
2094
2095	 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i));
2096	 inst->predicate = BRW_PREDICATE_NORMAL;
2097      }
2098
2099      /* i965 clipping workaround:
2100       * 1) Test for -ve rhw
2101       * 2) If set,
2102       *      set ndc = (0,0,0,0)
2103       *      set ucp[6] = 1
2104       *
2105       * Later, clipping will detect ucp[6] and ensure the primitive is
2106       * clipped against all fixed planes.
2107       */
2108      if (brw->has_negative_rhw_bug) {
2109#if 0
2110	 /* FINISHME */
2111	 brw_CMP(p,
2112		 vec8(brw_null_reg()),
2113		 BRW_CONDITIONAL_L,
2114		 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3),
2115		 brw_imm_f(0));
2116
2117	 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
2118	 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0));
2119	 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2120#endif
2121      }
2122
2123      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
2124   } else if (intel->gen < 6) {
2125      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
2126   } else {
2127      emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
2128      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2129         emit(MOV(brw_writemask(reg, WRITEMASK_W),
2130                  src_reg(output_reg[VERT_RESULT_PSIZ])));
2131      }
2132   }
2133}
2134
2135void
2136vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset)
2137{
2138   if (intel->gen < 6) {
2139      /* Clip distance slots are set aside in gen5, but they are not used.  It
2140       * is not clear whether we actually need to set aside space for them,
2141       * but the performance cost is negligible.
2142       */
2143      return;
2144   }
2145
2146   /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
2147    *
2148    *     "If a linked set of shaders forming the vertex stage contains no
2149    *     static write to gl_ClipVertex or gl_ClipDistance, but the
2150    *     application has requested clipping against user clip planes through
2151    *     the API, then the coordinate written to gl_Position is used for
2152    *     comparison against the user clip planes."
2153    *
2154    * This function is only called if the shader didn't write to
2155    * gl_ClipDistance.  Accordingly, we use gl_ClipVertex to perform clipping
2156    * if the user wrote to it; otherwise we use gl_Position.
2157    */
2158   gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX;
2159   if (!(c->prog_data.outputs_written
2160         & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) {
2161      clip_vertex = VERT_RESULT_HPOS;
2162   }
2163
2164   for (int i = 0; i + offset < c->key.nr_userclip_plane_consts && i < 4;
2165        ++i) {
2166      emit(DP4(dst_reg(brw_writemask(reg, 1 << i)),
2167               src_reg(output_reg[clip_vertex]),
2168               src_reg(this->userplane[i + offset])));
2169   }
2170}
2171
2172void
2173vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result)
2174{
2175   assert (vert_result < VERT_RESULT_MAX);
2176   reg.type = output_reg[vert_result].type;
2177   current_annotation = output_reg_annotation[vert_result];
2178   /* Copy the register, saturating if necessary */
2179   vec4_instruction *inst = emit(MOV(reg,
2180                                     src_reg(output_reg[vert_result])));
2181   if ((vert_result == VERT_RESULT_COL0 ||
2182        vert_result == VERT_RESULT_COL1 ||
2183        vert_result == VERT_RESULT_BFC0 ||
2184        vert_result == VERT_RESULT_BFC1) &&
2185       c->key.clamp_vertex_color) {
2186      inst->saturate = true;
2187   }
2188}
2189
2190void
2191vec4_visitor::emit_urb_slot(int mrf, int vert_result)
2192{
2193   struct brw_reg hw_reg = brw_message_reg(mrf);
2194   dst_reg reg = dst_reg(MRF, mrf);
2195   reg.type = BRW_REGISTER_TYPE_F;
2196
2197   switch (vert_result) {
2198   case VERT_RESULT_PSIZ:
2199      /* PSIZ is always in slot 0, and is coupled with other flags. */
2200      current_annotation = "indices, point width, clip flags";
2201      emit_psiz_and_flags(hw_reg);
2202      break;
2203   case BRW_VERT_RESULT_NDC:
2204      current_annotation = "NDC";
2205      emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC])));
2206      break;
2207   case BRW_VERT_RESULT_HPOS_DUPLICATE:
2208   case VERT_RESULT_HPOS:
2209      current_annotation = "gl_Position";
2210      emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS])));
2211      break;
2212   case VERT_RESULT_CLIP_DIST0:
2213   case VERT_RESULT_CLIP_DIST1:
2214      if (this->c->key.uses_clip_distance) {
2215         emit_generic_urb_slot(reg, vert_result);
2216      } else {
2217         current_annotation = "user clip distances";
2218         emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4);
2219      }
2220      break;
2221   case BRW_VERT_RESULT_PAD:
2222      /* No need to write to this slot */
2223      break;
2224   default:
2225      emit_generic_urb_slot(reg, vert_result);
2226      break;
2227   }
2228}
2229
2230static int
2231align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
2232{
2233   struct intel_context *intel = &brw->intel;
2234
2235   if (intel->gen >= 6) {
2236      /* URB data written (does not include the message header reg) must
2237       * be a multiple of 256 bits, or 2 VS registers.  See vol5c.5,
2238       * section 5.4.3.2.2: URB_INTERLEAVED.
2239       *
2240       * URB entries are allocated on a multiple of 1024 bits, so an
2241       * extra 128 bits written here to make the end align to 256 is
2242       * no problem.
2243       */
2244      if ((mlen % 2) != 1)
2245	 mlen++;
2246   }
2247
2248   return mlen;
2249}
2250
2251/**
2252 * Generates the VUE payload plus the 1 or 2 URB write instructions to
2253 * complete the VS thread.
2254 *
2255 * The VUE layout is documented in Volume 2a.
2256 */
2257void
2258vec4_visitor::emit_urb_writes()
2259{
2260   /* MRF 0 is reserved for the debugger, so start with message header
2261    * in MRF 1.
2262    */
2263   int base_mrf = 1;
2264   int mrf = base_mrf;
2265   /* In the process of generating our URB write message contents, we
2266    * may need to unspill a register or load from an array.  Those
2267    * reads would use MRFs 14-15.
2268    */
2269   int max_usable_mrf = 13;
2270
2271   /* The following assertion verifies that max_usable_mrf causes an
2272    * even-numbered amount of URB write data, which will meet gen6's
2273    * requirements for length alignment.
2274    */
2275   assert ((max_usable_mrf - base_mrf) % 2 == 0);
2276
2277   /* FINISHME: edgeflag */
2278
2279   /* First mrf is the g0-based message header containing URB handles and such,
2280    * which is implied in VS_OPCODE_URB_WRITE.
2281    */
2282   mrf++;
2283
2284   if (intel->gen < 6) {
2285      emit_ndc_computation();
2286   }
2287
2288   /* Set up the VUE data for the first URB write */
2289   int slot;
2290   for (slot = 0; slot < c->prog_data.vue_map.num_slots; ++slot) {
2291      emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2292
2293      /* If this was max_usable_mrf, we can't fit anything more into this URB
2294       * WRITE.
2295       */
2296      if (mrf > max_usable_mrf) {
2297	 slot++;
2298	 break;
2299      }
2300   }
2301
2302   current_annotation = "URB write";
2303   vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
2304   inst->base_mrf = base_mrf;
2305   inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2306   inst->eot = (slot >= c->prog_data.vue_map.num_slots);
2307
2308   /* Optional second URB write */
2309   if (!inst->eot) {
2310      mrf = base_mrf + 1;
2311
2312      for (; slot < c->prog_data.vue_map.num_slots; ++slot) {
2313	 assert(mrf < max_usable_mrf);
2314
2315         emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2316      }
2317
2318      current_annotation = "URB write";
2319      inst = emit(VS_OPCODE_URB_WRITE);
2320      inst->base_mrf = base_mrf;
2321      inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2322      inst->eot = true;
2323      /* URB destination offset.  In the previous write, we got MRFs
2324       * 2-13 minus the one header MRF, so 12 regs.  URB offset is in
2325       * URB row increments, and each of our MRFs is half of one of
2326       * those, since we're doing interleaved writes.
2327       */
2328      inst->offset = (max_usable_mrf - base_mrf) / 2;
2329   }
2330}
2331
2332src_reg
2333vec4_visitor::get_scratch_offset(vec4_instruction *inst,
2334				 src_reg *reladdr, int reg_offset)
2335{
2336   /* Because we store the values to scratch interleaved like our
2337    * vertex data, we need to scale the vec4 index by 2.
2338    */
2339   int message_header_scale = 2;
2340
2341   /* Pre-gen6, the message header uses byte offsets instead of vec4
2342    * (16-byte) offset units.
2343    */
2344   if (intel->gen < 6)
2345      message_header_scale *= 16;
2346
2347   if (reladdr) {
2348      src_reg index = src_reg(this, glsl_type::int_type);
2349
2350      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2351      emit_before(inst, MUL(dst_reg(index),
2352			    index, src_reg(message_header_scale)));
2353
2354      return index;
2355   } else {
2356      return src_reg(reg_offset * message_header_scale);
2357   }
2358}
2359
2360src_reg
2361vec4_visitor::get_pull_constant_offset(vec4_instruction *inst,
2362				       src_reg *reladdr, int reg_offset)
2363{
2364   if (reladdr) {
2365      src_reg index = src_reg(this, glsl_type::int_type);
2366
2367      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2368
2369      /* Pre-gen6, the message header uses byte offsets instead of vec4
2370       * (16-byte) offset units.
2371       */
2372      if (intel->gen < 6) {
2373	 emit_before(inst, MUL(dst_reg(index), index, src_reg(16)));
2374      }
2375
2376      return index;
2377   } else {
2378      int message_header_scale = intel->gen < 6 ? 16 : 1;
2379      return src_reg(reg_offset * message_header_scale);
2380   }
2381}
2382
2383/**
2384 * Emits an instruction before @inst to load the value named by @orig_src
2385 * from scratch space at @base_offset to @temp.
2386 */
2387void
2388vec4_visitor::emit_scratch_read(vec4_instruction *inst,
2389				dst_reg temp, src_reg orig_src,
2390				int base_offset)
2391{
2392   int reg_offset = base_offset + orig_src.reg_offset;
2393   src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset);
2394
2395   emit_before(inst, SCRATCH_READ(temp, index));
2396}
2397
2398/**
2399 * Emits an instruction after @inst to store the value to be written
2400 * to @orig_dst to scratch space at @base_offset, from @temp.
2401 */
2402void
2403vec4_visitor::emit_scratch_write(vec4_instruction *inst,
2404				 src_reg temp, dst_reg orig_dst,
2405				 int base_offset)
2406{
2407   int reg_offset = base_offset + orig_dst.reg_offset;
2408   src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset);
2409
2410   dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0),
2411				       orig_dst.writemask));
2412   vec4_instruction *write = SCRATCH_WRITE(dst, temp, index);
2413   write->predicate = inst->predicate;
2414   write->ir = inst->ir;
2415   write->annotation = inst->annotation;
2416   inst->insert_after(write);
2417}
2418
2419/**
2420 * We can't generally support array access in GRF space, because a
2421 * single instruction's destination can only span 2 contiguous
2422 * registers.  So, we send all GRF arrays that get variable index
2423 * access to scratch space.
2424 */
2425void
2426vec4_visitor::move_grf_array_access_to_scratch()
2427{
2428   int scratch_loc[this->virtual_grf_count];
2429
2430   for (int i = 0; i < this->virtual_grf_count; i++) {
2431      scratch_loc[i] = -1;
2432   }
2433
2434   /* First, calculate the set of virtual GRFs that need to be punted
2435    * to scratch due to having any array access on them, and where in
2436    * scratch.
2437    */
2438   foreach_list(node, &this->instructions) {
2439      vec4_instruction *inst = (vec4_instruction *)node;
2440
2441      if (inst->dst.file == GRF && inst->dst.reladdr &&
2442	  scratch_loc[inst->dst.reg] == -1) {
2443	 scratch_loc[inst->dst.reg] = c->last_scratch;
2444	 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4;
2445      }
2446
2447      for (int i = 0 ; i < 3; i++) {
2448	 src_reg *src = &inst->src[i];
2449
2450	 if (src->file == GRF && src->reladdr &&
2451	     scratch_loc[src->reg] == -1) {
2452	    scratch_loc[src->reg] = c->last_scratch;
2453	    c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4;
2454	 }
2455      }
2456   }
2457
2458   /* Now, for anything that will be accessed through scratch, rewrite
2459    * it to load/store.  Note that this is a _safe list walk, because
2460    * we may generate a new scratch_write instruction after the one
2461    * we're processing.
2462    */
2463   foreach_list_safe(node, &this->instructions) {
2464      vec4_instruction *inst = (vec4_instruction *)node;
2465
2466      /* Set up the annotation tracking for new generated instructions. */
2467      base_ir = inst->ir;
2468      current_annotation = inst->annotation;
2469
2470      if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) {
2471	 src_reg temp = src_reg(this, glsl_type::vec4_type);
2472
2473	 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]);
2474
2475	 inst->dst.file = temp.file;
2476	 inst->dst.reg = temp.reg;
2477	 inst->dst.reg_offset = temp.reg_offset;
2478	 inst->dst.reladdr = NULL;
2479      }
2480
2481      for (int i = 0 ; i < 3; i++) {
2482	 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1)
2483	    continue;
2484
2485	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2486
2487	 emit_scratch_read(inst, temp, inst->src[i],
2488			   scratch_loc[inst->src[i].reg]);
2489
2490	 inst->src[i].file = temp.file;
2491	 inst->src[i].reg = temp.reg;
2492	 inst->src[i].reg_offset = temp.reg_offset;
2493	 inst->src[i].reladdr = NULL;
2494      }
2495   }
2496}
2497
2498/**
2499 * Emits an instruction before @inst to load the value named by @orig_src
2500 * from the pull constant buffer (surface) at @base_offset to @temp.
2501 */
2502void
2503vec4_visitor::emit_pull_constant_load(vec4_instruction *inst,
2504				      dst_reg temp, src_reg orig_src,
2505				      int base_offset)
2506{
2507   int reg_offset = base_offset + orig_src.reg_offset;
2508   src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
2509   vec4_instruction *load;
2510
2511   load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD,
2512					temp, index);
2513   load->base_mrf = 14;
2514   load->mlen = 1;
2515   emit_before(inst, load);
2516}
2517
2518/**
2519 * Implements array access of uniforms by inserting a
2520 * PULL_CONSTANT_LOAD instruction.
2521 *
2522 * Unlike temporary GRF array access (where we don't support it due to
2523 * the difficulty of doing relative addressing on instruction
2524 * destinations), we could potentially do array access of uniforms
2525 * that were loaded in GRF space as push constants.  In real-world
2526 * usage we've seen, though, the arrays being used are always larger
2527 * than we could load as push constants, so just always move all
2528 * uniform array access out to a pull constant buffer.
2529 */
2530void
2531vec4_visitor::move_uniform_array_access_to_pull_constants()
2532{
2533   int pull_constant_loc[this->uniforms];
2534
2535   for (int i = 0; i < this->uniforms; i++) {
2536      pull_constant_loc[i] = -1;
2537   }
2538
2539   /* Walk through and find array access of uniforms.  Put a copy of that
2540    * uniform in the pull constant buffer.
2541    *
2542    * Note that we don't move constant-indexed accesses to arrays.  No
2543    * testing has been done of the performance impact of this choice.
2544    */
2545   foreach_list_safe(node, &this->instructions) {
2546      vec4_instruction *inst = (vec4_instruction *)node;
2547
2548      for (int i = 0 ; i < 3; i++) {
2549	 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
2550	    continue;
2551
2552	 int uniform = inst->src[i].reg;
2553
2554	 /* If this array isn't already present in the pull constant buffer,
2555	  * add it.
2556	  */
2557	 if (pull_constant_loc[uniform] == -1) {
2558	    const float **values = &prog_data->param[uniform * 4];
2559
2560	    pull_constant_loc[uniform] = prog_data->nr_pull_params / 4;
2561
2562	    for (int j = 0; j < uniform_size[uniform] * 4; j++) {
2563	       prog_data->pull_param[prog_data->nr_pull_params++] = values[j];
2564	    }
2565	 }
2566
2567	 /* Set up the annotation tracking for new generated instructions. */
2568	 base_ir = inst->ir;
2569	 current_annotation = inst->annotation;
2570
2571	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2572
2573	 emit_pull_constant_load(inst, temp, inst->src[i],
2574				 pull_constant_loc[uniform]);
2575
2576	 inst->src[i].file = temp.file;
2577	 inst->src[i].reg = temp.reg;
2578	 inst->src[i].reg_offset = temp.reg_offset;
2579	 inst->src[i].reladdr = NULL;
2580      }
2581   }
2582
2583   /* Now there are no accesses of the UNIFORM file with a reladdr, so
2584    * no need to track them as larger-than-vec4 objects.  This will be
2585    * relied on in cutting out unused uniform vectors from push
2586    * constants.
2587    */
2588   split_uniform_registers();
2589}
2590
2591void
2592vec4_visitor::resolve_ud_negate(src_reg *reg)
2593{
2594   if (reg->type != BRW_REGISTER_TYPE_UD ||
2595       !reg->negate)
2596      return;
2597
2598   src_reg temp = src_reg(this, glsl_type::uvec4_type);
2599   emit(BRW_OPCODE_MOV, dst_reg(temp), *reg);
2600   *reg = temp;
2601}
2602
2603vec4_visitor::vec4_visitor(struct brw_vs_compile *c,
2604			   struct gl_shader_program *prog,
2605			   struct brw_shader *shader)
2606{
2607   this->c = c;
2608   this->p = &c->func;
2609   this->brw = p->brw;
2610   this->intel = &brw->intel;
2611   this->ctx = &intel->ctx;
2612   this->prog = prog;
2613   this->shader = shader;
2614
2615   this->mem_ctx = ralloc_context(NULL);
2616   this->failed = false;
2617
2618   this->base_ir = NULL;
2619   this->current_annotation = NULL;
2620
2621   this->c = c;
2622   this->vp = (struct gl_vertex_program *)
2623     prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
2624   this->prog_data = &c->prog_data;
2625
2626   this->variable_ht = hash_table_ctor(0,
2627				       hash_table_pointer_hash,
2628				       hash_table_pointer_compare);
2629
2630   this->virtual_grf_def = NULL;
2631   this->virtual_grf_use = NULL;
2632   this->virtual_grf_sizes = NULL;
2633   this->virtual_grf_count = 0;
2634   this->virtual_grf_reg_map = NULL;
2635   this->virtual_grf_reg_count = 0;
2636   this->virtual_grf_array_size = 0;
2637   this->live_intervals_valid = false;
2638
2639   this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
2640
2641   this->uniforms = 0;
2642}
2643
2644vec4_visitor::~vec4_visitor()
2645{
2646   ralloc_free(this->mem_ctx);
2647   hash_table_dtor(this->variable_ht);
2648}
2649
2650
2651void
2652vec4_visitor::fail(const char *format, ...)
2653{
2654   va_list va;
2655   char *msg;
2656
2657   if (failed)
2658      return;
2659
2660   failed = true;
2661
2662   va_start(va, format);
2663   msg = ralloc_vasprintf(mem_ctx, format, va);
2664   va_end(va);
2665   msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg);
2666
2667   this->fail_msg = msg;
2668
2669   if (INTEL_DEBUG & DEBUG_VS) {
2670      fprintf(stderr, "%s",  msg);
2671   }
2672}
2673
2674} /* namespace brw */
2675