brw_vec4_visitor.cpp revision 9f3d3216cf25d8ffed4d72fbce6feacbc2990e4b
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "brw_vec4.h"
25extern "C" {
26#include "main/macros.h"
27#include "program/prog_parameter.h"
28#include "program/sampler.h"
29}
30
31namespace brw {
32
33src_reg::src_reg(dst_reg reg)
34{
35   init();
36
37   this->file = reg.file;
38   this->reg = reg.reg;
39   this->reg_offset = reg.reg_offset;
40   this->type = reg.type;
41   this->reladdr = reg.reladdr;
42   this->fixed_hw_reg = reg.fixed_hw_reg;
43
44   int swizzles[4];
45   int next_chan = 0;
46   int last = 0;
47
48   for (int i = 0; i < 4; i++) {
49      if (!(reg.writemask & (1 << i)))
50	 continue;
51
52      swizzles[next_chan++] = last = i;
53   }
54
55   for (; next_chan < 4; next_chan++) {
56      swizzles[next_chan] = last;
57   }
58
59   this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
60				swizzles[2], swizzles[3]);
61}
62
63dst_reg::dst_reg(src_reg reg)
64{
65   init();
66
67   this->file = reg.file;
68   this->reg = reg.reg;
69   this->reg_offset = reg.reg_offset;
70   this->type = reg.type;
71   this->writemask = WRITEMASK_XYZW;
72   this->reladdr = reg.reladdr;
73   this->fixed_hw_reg = reg.fixed_hw_reg;
74}
75
76vec4_instruction::vec4_instruction(vec4_visitor *v,
77				   enum opcode opcode, dst_reg dst,
78				   src_reg src0, src_reg src1, src_reg src2)
79{
80   this->opcode = opcode;
81   this->dst = dst;
82   this->src[0] = src0;
83   this->src[1] = src1;
84   this->src[2] = src2;
85   this->ir = v->base_ir;
86   this->annotation = v->current_annotation;
87}
88
89vec4_instruction *
90vec4_visitor::emit(vec4_instruction *inst)
91{
92   this->instructions.push_tail(inst);
93
94   return inst;
95}
96
97vec4_instruction *
98vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst)
99{
100   new_inst->ir = inst->ir;
101   new_inst->annotation = inst->annotation;
102
103   inst->insert_before(new_inst);
104
105   return inst;
106}
107
108vec4_instruction *
109vec4_visitor::emit(enum opcode opcode, dst_reg dst,
110		   src_reg src0, src_reg src1, src_reg src2)
111{
112   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst,
113					     src0, src1, src2));
114}
115
116
117vec4_instruction *
118vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1)
119{
120   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1));
121}
122
123vec4_instruction *
124vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0)
125{
126   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0));
127}
128
129vec4_instruction *
130vec4_visitor::emit(enum opcode opcode)
131{
132   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg()));
133}
134
135#define ALU1(op)							\
136   vec4_instruction *							\
137   vec4_visitor::op(dst_reg dst, src_reg src0)				\
138   {									\
139      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
140					   src0);			\
141   }
142
143#define ALU2(op)							\
144   vec4_instruction *							\
145   vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1)		\
146   {									\
147      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
148					   src0, src1);			\
149   }
150
151ALU1(NOT)
152ALU1(MOV)
153ALU1(FRC)
154ALU1(RNDD)
155ALU1(RNDE)
156ALU1(RNDZ)
157ALU2(ADD)
158ALU2(MUL)
159ALU2(MACH)
160ALU2(AND)
161ALU2(OR)
162ALU2(XOR)
163ALU2(DP3)
164ALU2(DP4)
165
166/** Gen4 predicated IF. */
167vec4_instruction *
168vec4_visitor::IF(uint32_t predicate)
169{
170   vec4_instruction *inst;
171
172   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF);
173   inst->predicate = predicate;
174
175   return inst;
176}
177
178/** Gen6+ IF with embedded comparison. */
179vec4_instruction *
180vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition)
181{
182   assert(intel->gen >= 6);
183
184   vec4_instruction *inst;
185
186   resolve_ud_negate(&src0);
187   resolve_ud_negate(&src1);
188
189   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(),
190					src0, src1);
191   inst->conditional_mod = condition;
192
193   return inst;
194}
195
196/**
197 * CMP: Sets the low bit of the destination channels with the result
198 * of the comparison, while the upper bits are undefined, and updates
199 * the flag register with the packed 16 bits of the result.
200 */
201vec4_instruction *
202vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition)
203{
204   vec4_instruction *inst;
205
206   /* original gen4 does type conversion to the destination type
207    * before before comparison, producing garbage results for floating
208    * point comparisons.
209    */
210   if (intel->gen == 4) {
211      dst.type = src0.type;
212      if (dst.file == HW_REG)
213	 dst.fixed_hw_reg.type = dst.type;
214   }
215
216   resolve_ud_negate(&src0);
217   resolve_ud_negate(&src1);
218
219   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1);
220   inst->conditional_mod = condition;
221
222   return inst;
223}
224
225vec4_instruction *
226vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index)
227{
228   vec4_instruction *inst;
229
230   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ,
231					dst, index);
232   inst->base_mrf = 14;
233   inst->mlen = 1;
234
235   return inst;
236}
237
238vec4_instruction *
239vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index)
240{
241   vec4_instruction *inst;
242
243   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE,
244					dst, src, index);
245   inst->base_mrf = 13;
246   inst->mlen = 2;
247
248   return inst;
249}
250
251void
252vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements)
253{
254   static enum opcode dot_opcodes[] = {
255      BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4
256   };
257
258   emit(dot_opcodes[elements - 2], dst, src0, src1);
259}
260
261void
262vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src)
263{
264   /* The gen6 math instruction ignores the source modifiers --
265    * swizzle, abs, negate, and at least some parts of the register
266    * region description.
267    *
268    * While it would seem that this MOV could be avoided at this point
269    * in the case that the swizzle is matched up with the destination
270    * writemask, note that uniform packing and register allocation
271    * could rearrange our swizzle, so let's leave this matter up to
272    * copy propagation later.
273    */
274   src_reg temp_src = src_reg(this, glsl_type::vec4_type);
275   emit(MOV(dst_reg(temp_src), src));
276
277   if (dst.writemask != WRITEMASK_XYZW) {
278      /* The gen6 math instruction must be align1, so we can't do
279       * writemasks.
280       */
281      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
282
283      emit(opcode, temp_dst, temp_src);
284
285      emit(MOV(dst, src_reg(temp_dst)));
286   } else {
287      emit(opcode, dst, temp_src);
288   }
289}
290
291void
292vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src)
293{
294   vec4_instruction *inst = emit(opcode, dst, src);
295   inst->base_mrf = 1;
296   inst->mlen = 1;
297}
298
299void
300vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src)
301{
302   switch (opcode) {
303   case SHADER_OPCODE_RCP:
304   case SHADER_OPCODE_RSQ:
305   case SHADER_OPCODE_SQRT:
306   case SHADER_OPCODE_EXP2:
307   case SHADER_OPCODE_LOG2:
308   case SHADER_OPCODE_SIN:
309   case SHADER_OPCODE_COS:
310      break;
311   default:
312      assert(!"not reached: bad math opcode");
313      return;
314   }
315
316   if (intel->gen >= 7) {
317      emit(opcode, dst, src);
318   } else if (intel->gen == 6) {
319      return emit_math1_gen6(opcode, dst, src);
320   } else {
321      return emit_math1_gen4(opcode, dst, src);
322   }
323}
324
325void
326vec4_visitor::emit_math2_gen6(enum opcode opcode,
327			      dst_reg dst, src_reg src0, src_reg src1)
328{
329   src_reg expanded;
330
331   /* The gen6 math instruction ignores the source modifiers --
332    * swizzle, abs, negate, and at least some parts of the register
333    * region description.  Move the sources to temporaries to make it
334    * generally work.
335    */
336
337   expanded = src_reg(this, glsl_type::vec4_type);
338   expanded.type = src0.type;
339   emit(MOV(dst_reg(expanded), src0));
340   src0 = expanded;
341
342   expanded = src_reg(this, glsl_type::vec4_type);
343   expanded.type = src1.type;
344   emit(MOV(dst_reg(expanded), src1));
345   src1 = expanded;
346
347   if (dst.writemask != WRITEMASK_XYZW) {
348      /* The gen6 math instruction must be align1, so we can't do
349       * writemasks.
350       */
351      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
352      temp_dst.type = dst.type;
353
354      emit(opcode, temp_dst, src0, src1);
355
356      emit(MOV(dst, src_reg(temp_dst)));
357   } else {
358      emit(opcode, dst, src0, src1);
359   }
360}
361
362void
363vec4_visitor::emit_math2_gen4(enum opcode opcode,
364			      dst_reg dst, src_reg src0, src_reg src1)
365{
366   vec4_instruction *inst = emit(opcode, dst, src0, src1);
367   inst->base_mrf = 1;
368   inst->mlen = 2;
369}
370
371void
372vec4_visitor::emit_math(enum opcode opcode,
373			dst_reg dst, src_reg src0, src_reg src1)
374{
375   switch (opcode) {
376   case SHADER_OPCODE_POW:
377   case SHADER_OPCODE_INT_QUOTIENT:
378   case SHADER_OPCODE_INT_REMAINDER:
379      break;
380   default:
381      assert(!"not reached: unsupported binary math opcode");
382      return;
383   }
384
385   if (intel->gen >= 7) {
386      emit(opcode, dst, src0, src1);
387   } else if (intel->gen == 6) {
388      return emit_math2_gen6(opcode, dst, src0, src1);
389   } else {
390      return emit_math2_gen4(opcode, dst, src0, src1);
391   }
392}
393
394void
395vec4_visitor::visit_instructions(const exec_list *list)
396{
397   foreach_list(node, list) {
398      ir_instruction *ir = (ir_instruction *)node;
399
400      base_ir = ir;
401      ir->accept(this);
402   }
403}
404
405
406static int
407type_size(const struct glsl_type *type)
408{
409   unsigned int i;
410   int size;
411
412   switch (type->base_type) {
413   case GLSL_TYPE_UINT:
414   case GLSL_TYPE_INT:
415   case GLSL_TYPE_FLOAT:
416   case GLSL_TYPE_BOOL:
417      if (type->is_matrix()) {
418	 return type->matrix_columns;
419      } else {
420	 /* Regardless of size of vector, it gets a vec4. This is bad
421	  * packing for things like floats, but otherwise arrays become a
422	  * mess.  Hopefully a later pass over the code can pack scalars
423	  * down if appropriate.
424	  */
425	 return 1;
426      }
427   case GLSL_TYPE_ARRAY:
428      assert(type->length > 0);
429      return type_size(type->fields.array) * type->length;
430   case GLSL_TYPE_STRUCT:
431      size = 0;
432      for (i = 0; i < type->length; i++) {
433	 size += type_size(type->fields.structure[i].type);
434      }
435      return size;
436   case GLSL_TYPE_SAMPLER:
437      /* Samplers take up one slot in UNIFORMS[], but they're baked in
438       * at link time.
439       */
440      return 1;
441   default:
442      assert(0);
443      return 0;
444   }
445}
446
447int
448vec4_visitor::virtual_grf_alloc(int size)
449{
450   if (virtual_grf_array_size <= virtual_grf_count) {
451      if (virtual_grf_array_size == 0)
452	 virtual_grf_array_size = 16;
453      else
454	 virtual_grf_array_size *= 2;
455      virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
456				   virtual_grf_array_size);
457      virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int,
458				     virtual_grf_array_size);
459   }
460   virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count;
461   virtual_grf_reg_count += size;
462   virtual_grf_sizes[virtual_grf_count] = size;
463   return virtual_grf_count++;
464}
465
466src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type)
467{
468   init();
469
470   this->file = GRF;
471   this->reg = v->virtual_grf_alloc(type_size(type));
472
473   if (type->is_array() || type->is_record()) {
474      this->swizzle = BRW_SWIZZLE_NOOP;
475   } else {
476      this->swizzle = swizzle_for_size(type->vector_elements);
477   }
478
479   this->type = brw_type_for_base_type(type);
480}
481
482dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type)
483{
484   init();
485
486   this->file = GRF;
487   this->reg = v->virtual_grf_alloc(type_size(type));
488
489   if (type->is_array() || type->is_record()) {
490      this->writemask = WRITEMASK_XYZW;
491   } else {
492      this->writemask = (1 << type->vector_elements) - 1;
493   }
494
495   this->type = brw_type_for_base_type(type);
496}
497
498/* Our support for uniforms is piggy-backed on the struct
499 * gl_fragment_program, because that's where the values actually
500 * get stored, rather than in some global gl_shader_program uniform
501 * store.
502 */
503int
504vec4_visitor::setup_uniform_values(int loc, const glsl_type *type)
505{
506   unsigned int offset = 0;
507   float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f;
508
509   if (type->is_matrix()) {
510      const glsl_type *column = type->column_type();
511
512      for (unsigned int i = 0; i < type->matrix_columns; i++) {
513	 offset += setup_uniform_values(loc + offset, column);
514      }
515
516      return offset;
517   }
518
519   switch (type->base_type) {
520   case GLSL_TYPE_FLOAT:
521   case GLSL_TYPE_UINT:
522   case GLSL_TYPE_INT:
523   case GLSL_TYPE_BOOL:
524      for (unsigned int i = 0; i < type->vector_elements; i++) {
525	 c->prog_data.param[this->uniforms * 4 + i] = &values[i];
526      }
527
528      /* Set up pad elements to get things aligned to a vec4 boundary. */
529      for (unsigned int i = type->vector_elements; i < 4; i++) {
530	 static float zero = 0;
531
532	 c->prog_data.param[this->uniforms * 4 + i] = &zero;
533      }
534
535      /* Track the size of this uniform vector, for future packing of
536       * uniforms.
537       */
538      this->uniform_vector_size[this->uniforms] = type->vector_elements;
539      this->uniforms++;
540
541      return 1;
542
543   case GLSL_TYPE_STRUCT:
544      for (unsigned int i = 0; i < type->length; i++) {
545	 offset += setup_uniform_values(loc + offset,
546					type->fields.structure[i].type);
547      }
548      return offset;
549
550   case GLSL_TYPE_ARRAY:
551      for (unsigned int i = 0; i < type->length; i++) {
552	 offset += setup_uniform_values(loc + offset, type->fields.array);
553      }
554      return offset;
555
556   case GLSL_TYPE_SAMPLER:
557      /* The sampler takes up a slot, but we don't use any values from it. */
558      return 1;
559
560   default:
561      assert(!"not reached");
562      return 0;
563   }
564}
565
566void
567vec4_visitor::setup_uniform_clipplane_values()
568{
569   gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
570
571   /* Pre-Gen6, we compact clip planes.  For example, if the user
572    * enables just clip planes 0, 1, and 3, we will enable clip planes
573    * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip
574    * plane 2.  This simplifies the implementation of the Gen6 clip
575    * thread.
576    *
577    * In Gen6 and later, we don't compact clip planes, because this
578    * simplifies the implementation of gl_ClipDistance.
579    */
580   int compacted_clipplane_index = 0;
581   for (int i = 0; i < c->key.nr_userclip_plane_consts; ++i) {
582      if (intel->gen < 6 &&
583          !(c->key.userclip_planes_enabled_gen_4_5 & (1 << i))) {
584         continue;
585      }
586      this->uniform_vector_size[this->uniforms] = 4;
587      this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms);
588      this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F;
589      for (int j = 0; j < 4; ++j) {
590         c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j];
591      }
592      ++compacted_clipplane_index;
593      ++this->uniforms;
594   }
595}
596
597/* Our support for builtin uniforms is even scarier than non-builtin.
598 * It sits on top of the PROG_STATE_VAR parameters that are
599 * automatically updated from GL context state.
600 */
601void
602vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
603{
604   const ir_state_slot *const slots = ir->state_slots;
605   assert(ir->state_slots != NULL);
606
607   for (unsigned int i = 0; i < ir->num_state_slots; i++) {
608      /* This state reference has already been setup by ir_to_mesa,
609       * but we'll get the same index back here.  We can reference
610       * ParameterValues directly, since unlike brw_fs.cpp, we never
611       * add new state references during compile.
612       */
613      int index = _mesa_add_state_reference(this->vp->Base.Parameters,
614					    (gl_state_index *)slots[i].tokens);
615      float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f;
616
617      this->uniform_vector_size[this->uniforms] = 0;
618      /* Add each of the unique swizzled channels of the element.
619       * This will end up matching the size of the glsl_type of this field.
620       */
621      int last_swiz = -1;
622      for (unsigned int j = 0; j < 4; j++) {
623	 int swiz = GET_SWZ(slots[i].swizzle, j);
624	 last_swiz = swiz;
625
626	 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz];
627	 if (swiz <= last_swiz)
628	    this->uniform_vector_size[this->uniforms]++;
629      }
630      this->uniforms++;
631   }
632}
633
634dst_reg *
635vec4_visitor::variable_storage(ir_variable *var)
636{
637   return (dst_reg *)hash_table_find(this->variable_ht, var);
638}
639
640void
641vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
642{
643   ir_expression *expr = ir->as_expression();
644
645   *predicate = BRW_PREDICATE_NORMAL;
646
647   if (expr) {
648      src_reg op[2];
649      vec4_instruction *inst;
650
651      assert(expr->get_num_operands() <= 2);
652      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
653	 expr->operands[i]->accept(this);
654	 op[i] = this->result;
655
656	 resolve_ud_negate(&op[i]);
657      }
658
659      switch (expr->operation) {
660      case ir_unop_logic_not:
661	 inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
662	 inst->conditional_mod = BRW_CONDITIONAL_Z;
663	 break;
664
665      case ir_binop_logic_xor:
666	 inst = emit(XOR(dst_null_d(), op[0], op[1]));
667	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
668	 break;
669
670      case ir_binop_logic_or:
671	 inst = emit(OR(dst_null_d(), op[0], op[1]));
672	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
673	 break;
674
675      case ir_binop_logic_and:
676	 inst = emit(AND(dst_null_d(), op[0], op[1]));
677	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
678	 break;
679
680      case ir_unop_f2b:
681	 if (intel->gen >= 6) {
682	    emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
683	 } else {
684	    inst = emit(MOV(dst_null_f(), op[0]));
685	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
686	 }
687	 break;
688
689      case ir_unop_i2b:
690	 if (intel->gen >= 6) {
691	    emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
692	 } else {
693	    inst = emit(MOV(dst_null_d(), op[0]));
694	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
695	 }
696	 break;
697
698      case ir_binop_all_equal:
699	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
700	 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
701	 break;
702
703      case ir_binop_any_nequal:
704	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
705	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
706	 break;
707
708      case ir_unop_any:
709	 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
710	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
711	 break;
712
713      case ir_binop_greater:
714      case ir_binop_gequal:
715      case ir_binop_less:
716      case ir_binop_lequal:
717      case ir_binop_equal:
718      case ir_binop_nequal:
719	 emit(CMP(dst_null_d(), op[0], op[1],
720		  brw_conditional_for_comparison(expr->operation)));
721	 break;
722
723      default:
724	 assert(!"not reached");
725	 break;
726      }
727      return;
728   }
729
730   ir->accept(this);
731
732   resolve_ud_negate(&this->result);
733
734   if (intel->gen >= 6) {
735      vec4_instruction *inst = emit(AND(dst_null_d(),
736					this->result, src_reg(1)));
737      inst->conditional_mod = BRW_CONDITIONAL_NZ;
738   } else {
739      vec4_instruction *inst = emit(MOV(dst_null_d(), this->result));
740      inst->conditional_mod = BRW_CONDITIONAL_NZ;
741   }
742}
743
744/**
745 * Emit a gen6 IF statement with the comparison folded into the IF
746 * instruction.
747 */
748void
749vec4_visitor::emit_if_gen6(ir_if *ir)
750{
751   ir_expression *expr = ir->condition->as_expression();
752
753   if (expr) {
754      src_reg op[2];
755      dst_reg temp;
756
757      assert(expr->get_num_operands() <= 2);
758      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
759	 expr->operands[i]->accept(this);
760	 op[i] = this->result;
761      }
762
763      switch (expr->operation) {
764      case ir_unop_logic_not:
765	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
766	 return;
767
768      case ir_binop_logic_xor:
769	 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
770	 return;
771
772      case ir_binop_logic_or:
773	 temp = dst_reg(this, glsl_type::bool_type);
774	 emit(OR(temp, op[0], op[1]));
775	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
776	 return;
777
778      case ir_binop_logic_and:
779	 temp = dst_reg(this, glsl_type::bool_type);
780	 emit(AND(temp, op[0], op[1]));
781	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
782	 return;
783
784      case ir_unop_f2b:
785	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
786	 return;
787
788      case ir_unop_i2b:
789	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
790	 return;
791
792      case ir_binop_greater:
793      case ir_binop_gequal:
794      case ir_binop_less:
795      case ir_binop_lequal:
796      case ir_binop_equal:
797      case ir_binop_nequal:
798	 emit(IF(op[0], op[1],
799		 brw_conditional_for_comparison(expr->operation)));
800	 return;
801
802      case ir_binop_all_equal:
803	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
804	 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
805	 return;
806
807      case ir_binop_any_nequal:
808	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
809	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
810	 return;
811
812      case ir_unop_any:
813	 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
814	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
815	 return;
816
817      default:
818	 assert(!"not reached");
819	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
820	 return;
821      }
822      return;
823   }
824
825   ir->condition->accept(this);
826
827   emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
828}
829
830void
831vec4_visitor::visit(ir_variable *ir)
832{
833   dst_reg *reg = NULL;
834
835   if (variable_storage(ir))
836      return;
837
838   switch (ir->mode) {
839   case ir_var_in:
840      reg = new(mem_ctx) dst_reg(ATTR, ir->location);
841
842      /* Do GL_FIXED rescaling for GLES2.0.  Our GL_FIXED attributes
843       * come in as floating point conversions of the integer values.
844       */
845      for (int i = ir->location; i < ir->location + type_size(ir->type); i++) {
846	 if (!c->key.gl_fixed_input_size[i])
847	    continue;
848
849	 dst_reg dst = *reg;
850         dst.type = brw_type_for_base_type(ir->type);
851	 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1;
852	 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f)));
853      }
854      break;
855
856   case ir_var_out:
857      reg = new(mem_ctx) dst_reg(this, ir->type);
858
859      for (int i = 0; i < type_size(ir->type); i++) {
860	 output_reg[ir->location + i] = *reg;
861	 output_reg[ir->location + i].reg_offset = i;
862	 output_reg[ir->location + i].type =
863            brw_type_for_base_type(ir->type->get_scalar_type());
864	 output_reg_annotation[ir->location + i] = ir->name;
865      }
866      break;
867
868   case ir_var_auto:
869   case ir_var_temporary:
870      reg = new(mem_ctx) dst_reg(this, ir->type);
871      break;
872
873   case ir_var_uniform:
874      reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
875
876      /* Track how big the whole uniform variable is, in case we need to put a
877       * copy of its data into pull constants for array access.
878       */
879      this->uniform_size[this->uniforms] = type_size(ir->type);
880
881      if (!strncmp(ir->name, "gl_", 3)) {
882	 setup_builtin_uniform_values(ir);
883      } else {
884	 setup_uniform_values(ir->location, ir->type);
885      }
886      break;
887
888   case ir_var_system_value:
889      /* VertexID is stored by the VF as the last vertex element, but
890       * we don't represent it with a flag in inputs_read, so we call
891       * it VERT_ATTRIB_MAX, which setup_attributes() picks up on.
892       */
893      reg = new(mem_ctx) dst_reg(ATTR, VERT_ATTRIB_MAX);
894      prog_data->uses_vertexid = true;
895
896      switch (ir->location) {
897      case SYSTEM_VALUE_VERTEX_ID:
898	 reg->writemask = WRITEMASK_X;
899	 break;
900      case SYSTEM_VALUE_INSTANCE_ID:
901	 reg->writemask = WRITEMASK_Y;
902	 break;
903      default:
904	 assert(!"not reached");
905	 break;
906      }
907      break;
908
909   default:
910      assert(!"not reached");
911   }
912
913   reg->type = brw_type_for_base_type(ir->type);
914   hash_table_insert(this->variable_ht, reg, ir);
915}
916
917void
918vec4_visitor::visit(ir_loop *ir)
919{
920   dst_reg counter;
921
922   /* We don't want debugging output to print the whole body of the
923    * loop as the annotation.
924    */
925   this->base_ir = NULL;
926
927   if (ir->counter != NULL) {
928      this->base_ir = ir->counter;
929      ir->counter->accept(this);
930      counter = *(variable_storage(ir->counter));
931
932      if (ir->from != NULL) {
933	 this->base_ir = ir->from;
934	 ir->from->accept(this);
935
936	 emit(MOV(counter, this->result));
937      }
938   }
939
940   emit(BRW_OPCODE_DO);
941
942   if (ir->to) {
943      this->base_ir = ir->to;
944      ir->to->accept(this);
945
946      emit(CMP(dst_null_d(), src_reg(counter), this->result,
947	       brw_conditional_for_comparison(ir->cmp)));
948
949      vec4_instruction *inst = emit(BRW_OPCODE_BREAK);
950      inst->predicate = BRW_PREDICATE_NORMAL;
951   }
952
953   visit_instructions(&ir->body_instructions);
954
955
956   if (ir->increment) {
957      this->base_ir = ir->increment;
958      ir->increment->accept(this);
959      emit(ADD(counter, src_reg(counter), this->result));
960   }
961
962   emit(BRW_OPCODE_WHILE);
963}
964
965void
966vec4_visitor::visit(ir_loop_jump *ir)
967{
968   switch (ir->mode) {
969   case ir_loop_jump::jump_break:
970      emit(BRW_OPCODE_BREAK);
971      break;
972   case ir_loop_jump::jump_continue:
973      emit(BRW_OPCODE_CONTINUE);
974      break;
975   }
976}
977
978
979void
980vec4_visitor::visit(ir_function_signature *ir)
981{
982   assert(0);
983   (void)ir;
984}
985
986void
987vec4_visitor::visit(ir_function *ir)
988{
989   /* Ignore function bodies other than main() -- we shouldn't see calls to
990    * them since they should all be inlined.
991    */
992   if (strcmp(ir->name, "main") == 0) {
993      const ir_function_signature *sig;
994      exec_list empty;
995
996      sig = ir->matching_signature(&empty);
997
998      assert(sig);
999
1000      visit_instructions(&sig->body);
1001   }
1002}
1003
1004bool
1005vec4_visitor::try_emit_sat(ir_expression *ir)
1006{
1007   ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1008   if (!sat_src)
1009      return false;
1010
1011   sat_src->accept(this);
1012   src_reg src = this->result;
1013
1014   this->result = src_reg(this, ir->type);
1015   vec4_instruction *inst;
1016   inst = emit(MOV(dst_reg(this->result), src));
1017   inst->saturate = true;
1018
1019   return true;
1020}
1021
1022void
1023vec4_visitor::emit_bool_comparison(unsigned int op,
1024				 dst_reg dst, src_reg src0, src_reg src1)
1025{
1026   /* original gen4 does destination conversion before comparison. */
1027   if (intel->gen < 5)
1028      dst.type = src0.type;
1029
1030   emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
1031
1032   dst.type = BRW_REGISTER_TYPE_D;
1033   emit(AND(dst, src_reg(dst), src_reg(0x1)));
1034}
1035
1036void
1037vec4_visitor::visit(ir_expression *ir)
1038{
1039   unsigned int operand;
1040   src_reg op[Elements(ir->operands)];
1041   src_reg result_src;
1042   dst_reg result_dst;
1043   vec4_instruction *inst;
1044
1045   if (try_emit_sat(ir))
1046      return;
1047
1048   for (operand = 0; operand < ir->get_num_operands(); operand++) {
1049      this->result.file = BAD_FILE;
1050      ir->operands[operand]->accept(this);
1051      if (this->result.file == BAD_FILE) {
1052	 printf("Failed to get tree for expression operand:\n");
1053	 ir->operands[operand]->print();
1054	 exit(1);
1055      }
1056      op[operand] = this->result;
1057
1058      /* Matrix expression operands should have been broken down to vector
1059       * operations already.
1060       */
1061      assert(!ir->operands[operand]->type->is_matrix());
1062   }
1063
1064   int vector_elements = ir->operands[0]->type->vector_elements;
1065   if (ir->operands[1]) {
1066      vector_elements = MAX2(vector_elements,
1067			     ir->operands[1]->type->vector_elements);
1068   }
1069
1070   this->result.file = BAD_FILE;
1071
1072   /* Storage for our result.  Ideally for an assignment we'd be using
1073    * the actual storage for the result here, instead.
1074    */
1075   result_src = src_reg(this, ir->type);
1076   /* convenience for the emit functions below. */
1077   result_dst = dst_reg(result_src);
1078   /* If nothing special happens, this is the result. */
1079   this->result = result_src;
1080   /* Limit writes to the channels that will be used by result_src later.
1081    * This does limit this temp's use as a temporary for multi-instruction
1082    * sequences.
1083    */
1084   result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1085
1086   switch (ir->operation) {
1087   case ir_unop_logic_not:
1088      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
1089       * ones complement of the whole register, not just bit 0.
1090       */
1091      emit(XOR(result_dst, op[0], src_reg(1)));
1092      break;
1093   case ir_unop_neg:
1094      op[0].negate = !op[0].negate;
1095      this->result = op[0];
1096      break;
1097   case ir_unop_abs:
1098      op[0].abs = true;
1099      op[0].negate = false;
1100      this->result = op[0];
1101      break;
1102
1103   case ir_unop_sign:
1104      emit(MOV(result_dst, src_reg(0.0f)));
1105
1106      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G));
1107      inst = emit(MOV(result_dst, src_reg(1.0f)));
1108      inst->predicate = BRW_PREDICATE_NORMAL;
1109
1110      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L));
1111      inst = emit(MOV(result_dst, src_reg(-1.0f)));
1112      inst->predicate = BRW_PREDICATE_NORMAL;
1113
1114      break;
1115
1116   case ir_unop_rcp:
1117      emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
1118      break;
1119
1120   case ir_unop_exp2:
1121      emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
1122      break;
1123   case ir_unop_log2:
1124      emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
1125      break;
1126   case ir_unop_exp:
1127   case ir_unop_log:
1128      assert(!"not reached: should be handled by ir_explog_to_explog2");
1129      break;
1130   case ir_unop_sin:
1131   case ir_unop_sin_reduced:
1132      emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
1133      break;
1134   case ir_unop_cos:
1135   case ir_unop_cos_reduced:
1136      emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
1137      break;
1138
1139   case ir_unop_dFdx:
1140   case ir_unop_dFdy:
1141      assert(!"derivatives not valid in vertex shader");
1142      break;
1143
1144   case ir_unop_noise:
1145      assert(!"not reached: should be handled by lower_noise");
1146      break;
1147
1148   case ir_binop_add:
1149      emit(ADD(result_dst, op[0], op[1]));
1150      break;
1151   case ir_binop_sub:
1152      assert(!"not reached: should be handled by ir_sub_to_add_neg");
1153      break;
1154
1155   case ir_binop_mul:
1156      if (ir->type->is_integer()) {
1157	 /* For integer multiplication, the MUL uses the low 16 bits
1158	  * of one of the operands (src0 on gen6, src1 on gen7).  The
1159	  * MACH accumulates in the contribution of the upper 16 bits
1160	  * of that operand.
1161	  *
1162	  * FINISHME: Emit just the MUL if we know an operand is small
1163	  * enough.
1164	  */
1165	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
1166
1167	 emit(MUL(acc, op[0], op[1]));
1168	 emit(MACH(dst_null_d(), op[0], op[1]));
1169	 emit(MOV(result_dst, src_reg(acc)));
1170      } else {
1171	 emit(MUL(result_dst, op[0], op[1]));
1172      }
1173      break;
1174   case ir_binop_div:
1175      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
1176      assert(ir->type->is_integer());
1177      emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
1178      break;
1179   case ir_binop_mod:
1180      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
1181      assert(ir->type->is_integer());
1182      emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
1183      break;
1184
1185   case ir_binop_less:
1186   case ir_binop_greater:
1187   case ir_binop_lequal:
1188   case ir_binop_gequal:
1189   case ir_binop_equal:
1190   case ir_binop_nequal: {
1191      emit(CMP(result_dst, op[0], op[1],
1192	       brw_conditional_for_comparison(ir->operation)));
1193      emit(AND(result_dst, result_src, src_reg(0x1)));
1194      break;
1195   }
1196
1197   case ir_binop_all_equal:
1198      /* "==" operator producing a scalar boolean. */
1199      if (ir->operands[0]->type->is_vector() ||
1200	  ir->operands[1]->type->is_vector()) {
1201	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
1202	 emit(MOV(result_dst, src_reg(0)));
1203	 inst = emit(MOV(result_dst, src_reg(1)));
1204	 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1205      } else {
1206	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
1207	 emit(AND(result_dst, result_src, src_reg(0x1)));
1208      }
1209      break;
1210   case ir_binop_any_nequal:
1211      /* "!=" operator producing a scalar boolean. */
1212      if (ir->operands[0]->type->is_vector() ||
1213	  ir->operands[1]->type->is_vector()) {
1214	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
1215
1216	 emit(MOV(result_dst, src_reg(0)));
1217	 inst = emit(MOV(result_dst, src_reg(1)));
1218	 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1219      } else {
1220	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
1221	 emit(AND(result_dst, result_src, src_reg(0x1)));
1222      }
1223      break;
1224
1225   case ir_unop_any:
1226      emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1227      emit(MOV(result_dst, src_reg(0)));
1228
1229      inst = emit(MOV(result_dst, src_reg(1)));
1230      inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1231      break;
1232
1233   case ir_binop_logic_xor:
1234      emit(XOR(result_dst, op[0], op[1]));
1235      break;
1236
1237   case ir_binop_logic_or:
1238      emit(OR(result_dst, op[0], op[1]));
1239      break;
1240
1241   case ir_binop_logic_and:
1242      emit(AND(result_dst, op[0], op[1]));
1243      break;
1244
1245   case ir_binop_dot:
1246      assert(ir->operands[0]->type->is_vector());
1247      assert(ir->operands[0]->type == ir->operands[1]->type);
1248      emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
1249      break;
1250
1251   case ir_unop_sqrt:
1252      emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
1253      break;
1254   case ir_unop_rsq:
1255      emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
1256      break;
1257   case ir_unop_i2f:
1258   case ir_unop_i2u:
1259   case ir_unop_u2i:
1260   case ir_unop_u2f:
1261   case ir_unop_b2f:
1262   case ir_unop_b2i:
1263   case ir_unop_f2i:
1264      emit(MOV(result_dst, op[0]));
1265      break;
1266   case ir_unop_f2b:
1267   case ir_unop_i2b: {
1268      emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1269      emit(AND(result_dst, result_src, src_reg(1)));
1270      break;
1271   }
1272
1273   case ir_unop_trunc:
1274      emit(RNDZ(result_dst, op[0]));
1275      break;
1276   case ir_unop_ceil:
1277      op[0].negate = !op[0].negate;
1278      inst = emit(RNDD(result_dst, op[0]));
1279      this->result.negate = true;
1280      break;
1281   case ir_unop_floor:
1282      inst = emit(RNDD(result_dst, op[0]));
1283      break;
1284   case ir_unop_fract:
1285      inst = emit(FRC(result_dst, op[0]));
1286      break;
1287   case ir_unop_round_even:
1288      emit(RNDE(result_dst, op[0]));
1289      break;
1290
1291   case ir_binop_min:
1292      if (intel->gen >= 6) {
1293	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1294	 inst->conditional_mod = BRW_CONDITIONAL_L;
1295      } else {
1296	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L));
1297
1298	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1299	 inst->predicate = BRW_PREDICATE_NORMAL;
1300      }
1301      break;
1302   case ir_binop_max:
1303      if (intel->gen >= 6) {
1304	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1305	 inst->conditional_mod = BRW_CONDITIONAL_G;
1306      } else {
1307	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G));
1308
1309	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1310	 inst->predicate = BRW_PREDICATE_NORMAL;
1311      }
1312      break;
1313
1314   case ir_binop_pow:
1315      emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
1316      break;
1317
1318   case ir_unop_bit_not:
1319      inst = emit(NOT(result_dst, op[0]));
1320      break;
1321   case ir_binop_bit_and:
1322      inst = emit(AND(result_dst, op[0], op[1]));
1323      break;
1324   case ir_binop_bit_xor:
1325      inst = emit(XOR(result_dst, op[0], op[1]));
1326      break;
1327   case ir_binop_bit_or:
1328      inst = emit(OR(result_dst, op[0], op[1]));
1329      break;
1330
1331   case ir_binop_lshift:
1332      inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]);
1333      break;
1334
1335   case ir_binop_rshift:
1336      if (ir->type->base_type == GLSL_TYPE_INT)
1337	 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]);
1338      else
1339	 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]);
1340      break;
1341
1342   case ir_quadop_vector:
1343      assert(!"not reached: should be handled by lower_quadop_vector");
1344      break;
1345   }
1346}
1347
1348
1349void
1350vec4_visitor::visit(ir_swizzle *ir)
1351{
1352   src_reg src;
1353   int i = 0;
1354   int swizzle[4];
1355
1356   /* Note that this is only swizzles in expressions, not those on the left
1357    * hand side of an assignment, which do write masking.  See ir_assignment
1358    * for that.
1359    */
1360
1361   ir->val->accept(this);
1362   src = this->result;
1363   assert(src.file != BAD_FILE);
1364
1365   for (i = 0; i < ir->type->vector_elements; i++) {
1366      switch (i) {
1367      case 0:
1368	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x);
1369	 break;
1370      case 1:
1371	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y);
1372	 break;
1373      case 2:
1374	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z);
1375	 break;
1376      case 3:
1377	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w);
1378	    break;
1379      }
1380   }
1381   for (; i < 4; i++) {
1382      /* Replicate the last channel out. */
1383      swizzle[i] = swizzle[ir->type->vector_elements - 1];
1384   }
1385
1386   src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1387
1388   this->result = src;
1389}
1390
1391void
1392vec4_visitor::visit(ir_dereference_variable *ir)
1393{
1394   const struct glsl_type *type = ir->type;
1395   dst_reg *reg = variable_storage(ir->var);
1396
1397   if (!reg) {
1398      fail("Failed to find variable storage for %s\n", ir->var->name);
1399      this->result = src_reg(brw_null_reg());
1400      return;
1401   }
1402
1403   this->result = src_reg(*reg);
1404
1405   if (type->is_scalar() || type->is_vector() || type->is_matrix())
1406      this->result.swizzle = swizzle_for_size(type->vector_elements);
1407}
1408
1409void
1410vec4_visitor::visit(ir_dereference_array *ir)
1411{
1412   ir_constant *constant_index;
1413   src_reg src;
1414   int element_size = type_size(ir->type);
1415
1416   constant_index = ir->array_index->constant_expression_value();
1417
1418   ir->array->accept(this);
1419   src = this->result;
1420
1421   if (constant_index) {
1422      src.reg_offset += constant_index->value.i[0] * element_size;
1423   } else {
1424      /* Variable index array dereference.  It eats the "vec4" of the
1425       * base of the array and an index that offsets the Mesa register
1426       * index.
1427       */
1428      ir->array_index->accept(this);
1429
1430      src_reg index_reg;
1431
1432      if (element_size == 1) {
1433	 index_reg = this->result;
1434      } else {
1435	 index_reg = src_reg(this, glsl_type::int_type);
1436
1437	 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size)));
1438      }
1439
1440      if (src.reladdr) {
1441	 src_reg temp = src_reg(this, glsl_type::int_type);
1442
1443	 emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
1444
1445	 index_reg = temp;
1446      }
1447
1448      src.reladdr = ralloc(mem_ctx, src_reg);
1449      memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1450   }
1451
1452   /* If the type is smaller than a vec4, replicate the last channel out. */
1453   if (ir->type->is_scalar() || ir->type->is_vector())
1454      src.swizzle = swizzle_for_size(ir->type->vector_elements);
1455   else
1456      src.swizzle = BRW_SWIZZLE_NOOP;
1457   src.type = brw_type_for_base_type(ir->type);
1458
1459   this->result = src;
1460}
1461
1462void
1463vec4_visitor::visit(ir_dereference_record *ir)
1464{
1465   unsigned int i;
1466   const glsl_type *struct_type = ir->record->type;
1467   int offset = 0;
1468
1469   ir->record->accept(this);
1470
1471   for (i = 0; i < struct_type->length; i++) {
1472      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1473	 break;
1474      offset += type_size(struct_type->fields.structure[i].type);
1475   }
1476
1477   /* If the type is smaller than a vec4, replicate the last channel out. */
1478   if (ir->type->is_scalar() || ir->type->is_vector())
1479      this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1480   else
1481      this->result.swizzle = BRW_SWIZZLE_NOOP;
1482   this->result.type = brw_type_for_base_type(ir->type);
1483
1484   this->result.reg_offset += offset;
1485}
1486
1487/**
1488 * We want to be careful in assignment setup to hit the actual storage
1489 * instead of potentially using a temporary like we might with the
1490 * ir_dereference handler.
1491 */
1492static dst_reg
1493get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
1494{
1495   /* The LHS must be a dereference.  If the LHS is a variable indexed array
1496    * access of a vector, it must be separated into a series conditional moves
1497    * before reaching this point (see ir_vec_index_to_cond_assign).
1498    */
1499   assert(ir->as_dereference());
1500   ir_dereference_array *deref_array = ir->as_dereference_array();
1501   if (deref_array) {
1502      assert(!deref_array->array->type->is_vector());
1503   }
1504
1505   /* Use the rvalue deref handler for the most part.  We'll ignore
1506    * swizzles in it and write swizzles using writemask, though.
1507    */
1508   ir->accept(v);
1509   return dst_reg(v->result);
1510}
1511
1512void
1513vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
1514			      const struct glsl_type *type, uint32_t predicate)
1515{
1516   if (type->base_type == GLSL_TYPE_STRUCT) {
1517      for (unsigned int i = 0; i < type->length; i++) {
1518	 emit_block_move(dst, src, type->fields.structure[i].type, predicate);
1519      }
1520      return;
1521   }
1522
1523   if (type->is_array()) {
1524      for (unsigned int i = 0; i < type->length; i++) {
1525	 emit_block_move(dst, src, type->fields.array, predicate);
1526      }
1527      return;
1528   }
1529
1530   if (type->is_matrix()) {
1531      const struct glsl_type *vec_type;
1532
1533      vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
1534					 type->vector_elements, 1);
1535
1536      for (int i = 0; i < type->matrix_columns; i++) {
1537	 emit_block_move(dst, src, vec_type, predicate);
1538      }
1539      return;
1540   }
1541
1542   assert(type->is_scalar() || type->is_vector());
1543
1544   dst->type = brw_type_for_base_type(type);
1545   src->type = dst->type;
1546
1547   dst->writemask = (1 << type->vector_elements) - 1;
1548
1549   src->swizzle = swizzle_for_size(type->vector_elements);
1550
1551   vec4_instruction *inst = emit(MOV(*dst, *src));
1552   inst->predicate = predicate;
1553
1554   dst->reg_offset++;
1555   src->reg_offset++;
1556}
1557
1558
1559/* If the RHS processing resulted in an instruction generating a
1560 * temporary value, and it would be easy to rewrite the instruction to
1561 * generate its result right into the LHS instead, do so.  This ends
1562 * up reliably removing instructions where it can be tricky to do so
1563 * later without real UD chain information.
1564 */
1565bool
1566vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
1567				     dst_reg dst,
1568				     src_reg src,
1569				     vec4_instruction *pre_rhs_inst,
1570				     vec4_instruction *last_rhs_inst)
1571{
1572   /* This could be supported, but it would take more smarts. */
1573   if (ir->condition)
1574      return false;
1575
1576   if (pre_rhs_inst == last_rhs_inst)
1577      return false; /* No instructions generated to work with. */
1578
1579   /* Make sure the last instruction generated our source reg. */
1580   if (src.file != GRF ||
1581       src.file != last_rhs_inst->dst.file ||
1582       src.reg != last_rhs_inst->dst.reg ||
1583       src.reg_offset != last_rhs_inst->dst.reg_offset ||
1584       src.reladdr ||
1585       src.abs ||
1586       src.negate ||
1587       last_rhs_inst->predicate != BRW_PREDICATE_NONE)
1588      return false;
1589
1590   /* Check that that last instruction fully initialized the channels
1591    * we want to use, in the order we want to use them.  We could
1592    * potentially reswizzle the operands of many instructions so that
1593    * we could handle out of order channels, but don't yet.
1594    */
1595
1596   for (unsigned i = 0; i < 4; i++) {
1597      if (dst.writemask & (1 << i)) {
1598	 if (!(last_rhs_inst->dst.writemask & (1 << i)))
1599	    return false;
1600
1601	 if (BRW_GET_SWZ(src.swizzle, i) != i)
1602	    return false;
1603      }
1604   }
1605
1606   /* Success!  Rewrite the instruction. */
1607   last_rhs_inst->dst.file = dst.file;
1608   last_rhs_inst->dst.reg = dst.reg;
1609   last_rhs_inst->dst.reg_offset = dst.reg_offset;
1610   last_rhs_inst->dst.reladdr = dst.reladdr;
1611   last_rhs_inst->dst.writemask &= dst.writemask;
1612
1613   return true;
1614}
1615
1616void
1617vec4_visitor::visit(ir_assignment *ir)
1618{
1619   dst_reg dst = get_assignment_lhs(ir->lhs, this);
1620   uint32_t predicate = BRW_PREDICATE_NONE;
1621
1622   if (!ir->lhs->type->is_scalar() &&
1623       !ir->lhs->type->is_vector()) {
1624      ir->rhs->accept(this);
1625      src_reg src = this->result;
1626
1627      if (ir->condition) {
1628	 emit_bool_to_cond_code(ir->condition, &predicate);
1629      }
1630
1631      /* emit_block_move doesn't account for swizzles in the source register.
1632       * This should be ok, since the source register is a structure or an
1633       * array, and those can't be swizzled.  But double-check to be sure.
1634       */
1635      assert(src.swizzle ==
1636             (ir->rhs->type->is_matrix()
1637              ? swizzle_for_size(ir->rhs->type->vector_elements)
1638              : BRW_SWIZZLE_NOOP));
1639
1640      emit_block_move(&dst, &src, ir->rhs->type, predicate);
1641      return;
1642   }
1643
1644   /* Now we're down to just a scalar/vector with writemasks. */
1645   int i;
1646
1647   vec4_instruction *pre_rhs_inst, *last_rhs_inst;
1648   pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1649
1650   ir->rhs->accept(this);
1651
1652   last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1653
1654   src_reg src = this->result;
1655
1656   int swizzles[4];
1657   int first_enabled_chan = 0;
1658   int src_chan = 0;
1659
1660   assert(ir->lhs->type->is_vector() ||
1661	  ir->lhs->type->is_scalar());
1662   dst.writemask = ir->write_mask;
1663
1664   for (int i = 0; i < 4; i++) {
1665      if (dst.writemask & (1 << i)) {
1666	 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i);
1667	 break;
1668      }
1669   }
1670
1671   /* Swizzle a small RHS vector into the channels being written.
1672    *
1673    * glsl ir treats write_mask as dictating how many channels are
1674    * present on the RHS while in our instructions we need to make
1675    * those channels appear in the slots of the vec4 they're written to.
1676    */
1677   for (int i = 0; i < 4; i++) {
1678      if (dst.writemask & (1 << i))
1679	 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++);
1680      else
1681	 swizzles[i] = first_enabled_chan;
1682   }
1683   src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
1684			      swizzles[2], swizzles[3]);
1685
1686   if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
1687      return;
1688   }
1689
1690   if (ir->condition) {
1691      emit_bool_to_cond_code(ir->condition, &predicate);
1692   }
1693
1694   for (i = 0; i < type_size(ir->lhs->type); i++) {
1695      vec4_instruction *inst = emit(MOV(dst, src));
1696      inst->predicate = predicate;
1697
1698      dst.reg_offset++;
1699      src.reg_offset++;
1700   }
1701}
1702
1703void
1704vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
1705{
1706   if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1707      foreach_list(node, &ir->components) {
1708	 ir_constant *field_value = (ir_constant *)node;
1709
1710	 emit_constant_values(dst, field_value);
1711      }
1712      return;
1713   }
1714
1715   if (ir->type->is_array()) {
1716      for (unsigned int i = 0; i < ir->type->length; i++) {
1717	 emit_constant_values(dst, ir->array_elements[i]);
1718      }
1719      return;
1720   }
1721
1722   if (ir->type->is_matrix()) {
1723      for (int i = 0; i < ir->type->matrix_columns; i++) {
1724	 float *vec = &ir->value.f[i * ir->type->vector_elements];
1725
1726	 for (int j = 0; j < ir->type->vector_elements; j++) {
1727	    dst->writemask = 1 << j;
1728	    dst->type = BRW_REGISTER_TYPE_F;
1729
1730	    emit(MOV(*dst, src_reg(vec[j])));
1731	 }
1732	 dst->reg_offset++;
1733      }
1734      return;
1735   }
1736
1737   int remaining_writemask = (1 << ir->type->vector_elements) - 1;
1738
1739   for (int i = 0; i < ir->type->vector_elements; i++) {
1740      if (!(remaining_writemask & (1 << i)))
1741	 continue;
1742
1743      dst->writemask = 1 << i;
1744      dst->type = brw_type_for_base_type(ir->type);
1745
1746      /* Find other components that match the one we're about to
1747       * write.  Emits fewer instructions for things like vec4(0.5,
1748       * 1.5, 1.5, 1.5).
1749       */
1750      for (int j = i + 1; j < ir->type->vector_elements; j++) {
1751	 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1752	    if (ir->value.b[i] == ir->value.b[j])
1753	       dst->writemask |= (1 << j);
1754	 } else {
1755	    /* u, i, and f storage all line up, so no need for a
1756	     * switch case for comparing each type.
1757	     */
1758	    if (ir->value.u[i] == ir->value.u[j])
1759	       dst->writemask |= (1 << j);
1760	 }
1761      }
1762
1763      switch (ir->type->base_type) {
1764      case GLSL_TYPE_FLOAT:
1765	 emit(MOV(*dst, src_reg(ir->value.f[i])));
1766	 break;
1767      case GLSL_TYPE_INT:
1768	 emit(MOV(*dst, src_reg(ir->value.i[i])));
1769	 break;
1770      case GLSL_TYPE_UINT:
1771	 emit(MOV(*dst, src_reg(ir->value.u[i])));
1772	 break;
1773      case GLSL_TYPE_BOOL:
1774	 emit(MOV(*dst, src_reg(ir->value.b[i])));
1775	 break;
1776      default:
1777	 assert(!"Non-float/uint/int/bool constant");
1778	 break;
1779      }
1780
1781      remaining_writemask &= ~dst->writemask;
1782   }
1783   dst->reg_offset++;
1784}
1785
1786void
1787vec4_visitor::visit(ir_constant *ir)
1788{
1789   dst_reg dst = dst_reg(this, ir->type);
1790   this->result = src_reg(dst);
1791
1792   emit_constant_values(&dst, ir);
1793}
1794
1795void
1796vec4_visitor::visit(ir_call *ir)
1797{
1798   assert(!"not reached");
1799}
1800
1801void
1802vec4_visitor::visit(ir_texture *ir)
1803{
1804   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &vp->Base);
1805   sampler = vp->Base.SamplerUnits[sampler];
1806
1807   /* Should be lowered by do_lower_texture_projection */
1808   assert(!ir->projector);
1809
1810   vec4_instruction *inst = NULL;
1811   switch (ir->op) {
1812   case ir_tex:
1813   case ir_txl:
1814      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXL);
1815      break;
1816   case ir_txd:
1817      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXD);
1818      break;
1819   case ir_txf:
1820      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF);
1821      break;
1822   case ir_txs:
1823      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
1824      break;
1825   case ir_txb:
1826      assert(!"TXB is not valid for vertex shaders.");
1827   }
1828
1829   /* Texel offsets go in the message header; Gen4 also requires headers. */
1830   inst->header_present = ir->offset || intel->gen < 5;
1831   inst->base_mrf = 2;
1832   inst->mlen = inst->header_present + 1; /* always at least one */
1833   inst->sampler = sampler;
1834   inst->dst = dst_reg(this, ir->type);
1835   inst->shadow_compare = ir->shadow_comparitor != NULL;
1836
1837   if (ir->offset != NULL)
1838      inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
1839
1840   /* MRF for the first parameter */
1841   int param_base = inst->base_mrf + inst->header_present;
1842
1843   if (ir->op == ir_txs) {
1844      ir->lod_info.lod->accept(this);
1845      int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
1846      emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, writemask),
1847	   this->result));
1848   } else {
1849      int i, coord_mask = 0, zero_mask = 0;
1850      /* Load the coordinate */
1851      /* FINISHME: gl_clamp_mask and saturate */
1852      for (i = 0; i < ir->coordinate->type->vector_elements; i++)
1853	 coord_mask |= (1 << i);
1854      for (; i < 4; i++)
1855	 zero_mask |= (1 << i);
1856
1857      ir->coordinate->accept(this);
1858      emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
1859	       this->result));
1860      emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
1861	       src_reg(0)));
1862      /* Load the shadow comparitor */
1863      if (ir->shadow_comparitor) {
1864	 ir->shadow_comparitor->accept(this);
1865	 emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
1866			  WRITEMASK_X),
1867		  this->result));
1868	 inst->mlen++;
1869      }
1870
1871      /* Load the LOD info */
1872      if (ir->op == ir_txl) {
1873	 int mrf, writemask;
1874	 if (intel->gen >= 5) {
1875	    mrf = param_base + 1;
1876	    if (ir->shadow_comparitor) {
1877	       writemask = WRITEMASK_Y;
1878	       /* mlen already incremented */
1879	    } else {
1880	       writemask = WRITEMASK_X;
1881	       inst->mlen++;
1882	    }
1883	 } else /* intel->gen == 4 */ {
1884	    mrf = param_base;
1885	    writemask = WRITEMASK_Z;
1886	 }
1887	 ir->lod_info.lod->accept(this);
1888	 emit(MOV(dst_reg(MRF, mrf, ir->lod_info.lod->type, writemask),
1889		  this->result));
1890      } else if (ir->op == ir_txf) {
1891	 ir->lod_info.lod->accept(this);
1892	 emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, WRITEMASK_W),
1893		  this->result));
1894      } else if (ir->op == ir_txd) {
1895	 const glsl_type *type = ir->lod_info.grad.dPdx->type;
1896
1897	 ir->lod_info.grad.dPdx->accept(this);
1898	 src_reg dPdx = this->result;
1899	 ir->lod_info.grad.dPdy->accept(this);
1900	 src_reg dPdy = this->result;
1901
1902	 if (intel->gen >= 5) {
1903	    dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1904	    dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1905	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
1906	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
1907	    inst->mlen++;
1908
1909	    if (ir->type->vector_elements == 3) {
1910	       dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
1911	       dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
1912	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
1913	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
1914	       inst->mlen++;
1915	    }
1916	 } else /* intel->gen == 4 */ {
1917	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
1918	    emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
1919	    inst->mlen += 2;
1920	 }
1921      }
1922   }
1923
1924   emit(inst);
1925
1926   swizzle_result(ir, src_reg(inst->dst), sampler);
1927}
1928
1929void
1930vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
1931{
1932   this->result = orig_val;
1933
1934   int s = c->key.tex.swizzles[sampler];
1935
1936   if (ir->op == ir_txs || ir->type == glsl_type::float_type
1937			|| s == SWIZZLE_NOOP)
1938      return;
1939
1940   int zero_mask = 0, one_mask = 0, copy_mask = 0;
1941   int swizzle[4];
1942
1943   for (int i = 0; i < 4; i++) {
1944      switch (GET_SWZ(s, i)) {
1945      case SWIZZLE_ZERO:
1946	 zero_mask |= (1 << i);
1947	 break;
1948      case SWIZZLE_ONE:
1949	 one_mask |= (1 << i);
1950	 break;
1951      default:
1952	 copy_mask |= (1 << i);
1953	 swizzle[i] = GET_SWZ(s, i);
1954	 break;
1955      }
1956   }
1957
1958   this->result = src_reg(this, ir->type);
1959   dst_reg swizzled_result(this->result);
1960
1961   if (copy_mask) {
1962      orig_val.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1963      swizzled_result.writemask = copy_mask;
1964      emit(MOV(swizzled_result, orig_val));
1965   }
1966
1967   if (zero_mask) {
1968      swizzled_result.writemask = zero_mask;
1969      emit(MOV(swizzled_result, src_reg(0.0f)));
1970   }
1971
1972   if (one_mask) {
1973      swizzled_result.writemask = one_mask;
1974      emit(MOV(swizzled_result, src_reg(1.0f)));
1975   }
1976}
1977
1978void
1979vec4_visitor::visit(ir_return *ir)
1980{
1981   assert(!"not reached");
1982}
1983
1984void
1985vec4_visitor::visit(ir_discard *ir)
1986{
1987   assert(!"not reached");
1988}
1989
1990void
1991vec4_visitor::visit(ir_if *ir)
1992{
1993   /* Don't point the annotation at the if statement, because then it plus
1994    * the then and else blocks get printed.
1995    */
1996   this->base_ir = ir->condition;
1997
1998   if (intel->gen == 6) {
1999      emit_if_gen6(ir);
2000   } else {
2001      uint32_t predicate;
2002      emit_bool_to_cond_code(ir->condition, &predicate);
2003      emit(IF(predicate));
2004   }
2005
2006   visit_instructions(&ir->then_instructions);
2007
2008   if (!ir->else_instructions.is_empty()) {
2009      this->base_ir = ir->condition;
2010      emit(BRW_OPCODE_ELSE);
2011
2012      visit_instructions(&ir->else_instructions);
2013   }
2014
2015   this->base_ir = ir->condition;
2016   emit(BRW_OPCODE_ENDIF);
2017}
2018
2019void
2020vec4_visitor::emit_ndc_computation()
2021{
2022   /* Get the position */
2023   src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]);
2024
2025   /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */
2026   dst_reg ndc = dst_reg(this, glsl_type::vec4_type);
2027   output_reg[BRW_VERT_RESULT_NDC] = ndc;
2028
2029   current_annotation = "NDC";
2030   dst_reg ndc_w = ndc;
2031   ndc_w.writemask = WRITEMASK_W;
2032   src_reg pos_w = pos;
2033   pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
2034   emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w);
2035
2036   dst_reg ndc_xyz = ndc;
2037   ndc_xyz.writemask = WRITEMASK_XYZ;
2038
2039   emit(MUL(ndc_xyz, pos, src_reg(ndc_w)));
2040}
2041
2042void
2043vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
2044{
2045   if (intel->gen < 6 &&
2046       ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
2047        c->key.userclip_active || brw->has_negative_rhw_bug)) {
2048      dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
2049      dst_reg header1_w = header1;
2050      header1_w.writemask = WRITEMASK_W;
2051      GLuint i;
2052
2053      emit(MOV(header1, 0u));
2054
2055      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2056	 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]);
2057
2058	 current_annotation = "Point size";
2059	 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11))));
2060	 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8));
2061      }
2062
2063      current_annotation = "Clipping flags";
2064      for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
2065	 vec4_instruction *inst;
2066
2067	 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]),
2068                         src_reg(this->userplane[i])));
2069	 inst->conditional_mod = BRW_CONDITIONAL_L;
2070
2071	 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i));
2072	 inst->predicate = BRW_PREDICATE_NORMAL;
2073      }
2074
2075      /* i965 clipping workaround:
2076       * 1) Test for -ve rhw
2077       * 2) If set,
2078       *      set ndc = (0,0,0,0)
2079       *      set ucp[6] = 1
2080       *
2081       * Later, clipping will detect ucp[6] and ensure the primitive is
2082       * clipped against all fixed planes.
2083       */
2084      if (brw->has_negative_rhw_bug) {
2085#if 0
2086	 /* FINISHME */
2087	 brw_CMP(p,
2088		 vec8(brw_null_reg()),
2089		 BRW_CONDITIONAL_L,
2090		 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3),
2091		 brw_imm_f(0));
2092
2093	 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
2094	 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0));
2095	 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2096#endif
2097      }
2098
2099      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
2100   } else if (intel->gen < 6) {
2101      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
2102   } else {
2103      emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
2104      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2105         emit(MOV(brw_writemask(reg, WRITEMASK_W),
2106                  src_reg(output_reg[VERT_RESULT_PSIZ])));
2107      }
2108   }
2109}
2110
2111void
2112vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset)
2113{
2114   if (intel->gen < 6) {
2115      /* Clip distance slots are set aside in gen5, but they are not used.  It
2116       * is not clear whether we actually need to set aside space for them,
2117       * but the performance cost is negligible.
2118       */
2119      return;
2120   }
2121
2122   /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
2123    *
2124    *     "If a linked set of shaders forming the vertex stage contains no
2125    *     static write to gl_ClipVertex or gl_ClipDistance, but the
2126    *     application has requested clipping against user clip planes through
2127    *     the API, then the coordinate written to gl_Position is used for
2128    *     comparison against the user clip planes."
2129    *
2130    * This function is only called if the shader didn't write to
2131    * gl_ClipDistance.  Accordingly, we use gl_ClipVertex to perform clipping
2132    * if the user wrote to it; otherwise we use gl_Position.
2133    */
2134   gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX;
2135   if (!(c->prog_data.outputs_written
2136         & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) {
2137      clip_vertex = VERT_RESULT_HPOS;
2138   }
2139
2140   for (int i = 0; i + offset < c->key.nr_userclip_plane_consts && i < 4;
2141        ++i) {
2142      emit(DP4(dst_reg(brw_writemask(reg, 1 << i)),
2143               src_reg(output_reg[clip_vertex]),
2144               src_reg(this->userplane[i + offset])));
2145   }
2146}
2147
2148void
2149vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result)
2150{
2151   assert (vert_result < VERT_RESULT_MAX);
2152   reg.type = output_reg[vert_result].type;
2153   current_annotation = output_reg_annotation[vert_result];
2154   /* Copy the register, saturating if necessary */
2155   vec4_instruction *inst = emit(MOV(reg,
2156                                     src_reg(output_reg[vert_result])));
2157   if ((vert_result == VERT_RESULT_COL0 ||
2158        vert_result == VERT_RESULT_COL1 ||
2159        vert_result == VERT_RESULT_BFC0 ||
2160        vert_result == VERT_RESULT_BFC1) &&
2161       c->key.clamp_vertex_color) {
2162      inst->saturate = true;
2163   }
2164}
2165
2166void
2167vec4_visitor::emit_urb_slot(int mrf, int vert_result)
2168{
2169   struct brw_reg hw_reg = brw_message_reg(mrf);
2170   dst_reg reg = dst_reg(MRF, mrf);
2171   reg.type = BRW_REGISTER_TYPE_F;
2172
2173   switch (vert_result) {
2174   case VERT_RESULT_PSIZ:
2175      /* PSIZ is always in slot 0, and is coupled with other flags. */
2176      current_annotation = "indices, point width, clip flags";
2177      emit_psiz_and_flags(hw_reg);
2178      break;
2179   case BRW_VERT_RESULT_NDC:
2180      current_annotation = "NDC";
2181      emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC])));
2182      break;
2183   case BRW_VERT_RESULT_HPOS_DUPLICATE:
2184   case VERT_RESULT_HPOS:
2185      current_annotation = "gl_Position";
2186      emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS])));
2187      break;
2188   case VERT_RESULT_CLIP_DIST0:
2189   case VERT_RESULT_CLIP_DIST1:
2190      if (this->c->key.uses_clip_distance) {
2191         emit_generic_urb_slot(reg, vert_result);
2192      } else {
2193         current_annotation = "user clip distances";
2194         emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4);
2195      }
2196      break;
2197   case BRW_VERT_RESULT_PAD:
2198      /* No need to write to this slot */
2199      break;
2200   default:
2201      emit_generic_urb_slot(reg, vert_result);
2202      break;
2203   }
2204}
2205
2206static int
2207align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
2208{
2209   struct intel_context *intel = &brw->intel;
2210
2211   if (intel->gen >= 6) {
2212      /* URB data written (does not include the message header reg) must
2213       * be a multiple of 256 bits, or 2 VS registers.  See vol5c.5,
2214       * section 5.4.3.2.2: URB_INTERLEAVED.
2215       *
2216       * URB entries are allocated on a multiple of 1024 bits, so an
2217       * extra 128 bits written here to make the end align to 256 is
2218       * no problem.
2219       */
2220      if ((mlen % 2) != 1)
2221	 mlen++;
2222   }
2223
2224   return mlen;
2225}
2226
2227/**
2228 * Generates the VUE payload plus the 1 or 2 URB write instructions to
2229 * complete the VS thread.
2230 *
2231 * The VUE layout is documented in Volume 2a.
2232 */
2233void
2234vec4_visitor::emit_urb_writes()
2235{
2236   /* MRF 0 is reserved for the debugger, so start with message header
2237    * in MRF 1.
2238    */
2239   int base_mrf = 1;
2240   int mrf = base_mrf;
2241   /* In the process of generating our URB write message contents, we
2242    * may need to unspill a register or load from an array.  Those
2243    * reads would use MRFs 14-15.
2244    */
2245   int max_usable_mrf = 13;
2246
2247   /* The following assertion verifies that max_usable_mrf causes an
2248    * even-numbered amount of URB write data, which will meet gen6's
2249    * requirements for length alignment.
2250    */
2251   assert ((max_usable_mrf - base_mrf) % 2 == 0);
2252
2253   /* FINISHME: edgeflag */
2254
2255   brw_compute_vue_map(&c->vue_map, intel, &c->prog_data);
2256
2257   /* First mrf is the g0-based message header containing URB handles and such,
2258    * which is implied in VS_OPCODE_URB_WRITE.
2259    */
2260   mrf++;
2261
2262   if (intel->gen < 6) {
2263      emit_ndc_computation();
2264   }
2265
2266   /* Set up the VUE data for the first URB write */
2267   int slot;
2268   for (slot = 0; slot < c->vue_map.num_slots; ++slot) {
2269      emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]);
2270
2271      /* If this was max_usable_mrf, we can't fit anything more into this URB
2272       * WRITE.
2273       */
2274      if (mrf > max_usable_mrf) {
2275	 slot++;
2276	 break;
2277      }
2278   }
2279
2280   current_annotation = "URB write";
2281   vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
2282   inst->base_mrf = base_mrf;
2283   inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2284   inst->eot = (slot >= c->vue_map.num_slots);
2285
2286   /* Optional second URB write */
2287   if (!inst->eot) {
2288      mrf = base_mrf + 1;
2289
2290      for (; slot < c->vue_map.num_slots; ++slot) {
2291	 assert(mrf < max_usable_mrf);
2292
2293         emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]);
2294      }
2295
2296      current_annotation = "URB write";
2297      inst = emit(VS_OPCODE_URB_WRITE);
2298      inst->base_mrf = base_mrf;
2299      inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2300      inst->eot = true;
2301      /* URB destination offset.  In the previous write, we got MRFs
2302       * 2-13 minus the one header MRF, so 12 regs.  URB offset is in
2303       * URB row increments, and each of our MRFs is half of one of
2304       * those, since we're doing interleaved writes.
2305       */
2306      inst->offset = (max_usable_mrf - base_mrf) / 2;
2307   }
2308}
2309
2310src_reg
2311vec4_visitor::get_scratch_offset(vec4_instruction *inst,
2312				 src_reg *reladdr, int reg_offset)
2313{
2314   /* Because we store the values to scratch interleaved like our
2315    * vertex data, we need to scale the vec4 index by 2.
2316    */
2317   int message_header_scale = 2;
2318
2319   /* Pre-gen6, the message header uses byte offsets instead of vec4
2320    * (16-byte) offset units.
2321    */
2322   if (intel->gen < 6)
2323      message_header_scale *= 16;
2324
2325   if (reladdr) {
2326      src_reg index = src_reg(this, glsl_type::int_type);
2327
2328      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2329      emit_before(inst, MUL(dst_reg(index),
2330			    index, src_reg(message_header_scale)));
2331
2332      return index;
2333   } else {
2334      return src_reg(reg_offset * message_header_scale);
2335   }
2336}
2337
2338src_reg
2339vec4_visitor::get_pull_constant_offset(vec4_instruction *inst,
2340				       src_reg *reladdr, int reg_offset)
2341{
2342   if (reladdr) {
2343      src_reg index = src_reg(this, glsl_type::int_type);
2344
2345      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2346
2347      /* Pre-gen6, the message header uses byte offsets instead of vec4
2348       * (16-byte) offset units.
2349       */
2350      if (intel->gen < 6) {
2351	 emit_before(inst, MUL(dst_reg(index), index, src_reg(16)));
2352      }
2353
2354      return index;
2355   } else {
2356      int message_header_scale = intel->gen < 6 ? 16 : 1;
2357      return src_reg(reg_offset * message_header_scale);
2358   }
2359}
2360
2361/**
2362 * Emits an instruction before @inst to load the value named by @orig_src
2363 * from scratch space at @base_offset to @temp.
2364 */
2365void
2366vec4_visitor::emit_scratch_read(vec4_instruction *inst,
2367				dst_reg temp, src_reg orig_src,
2368				int base_offset)
2369{
2370   int reg_offset = base_offset + orig_src.reg_offset;
2371   src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset);
2372
2373   emit_before(inst, SCRATCH_READ(temp, index));
2374}
2375
2376/**
2377 * Emits an instruction after @inst to store the value to be written
2378 * to @orig_dst to scratch space at @base_offset, from @temp.
2379 */
2380void
2381vec4_visitor::emit_scratch_write(vec4_instruction *inst,
2382				 src_reg temp, dst_reg orig_dst,
2383				 int base_offset)
2384{
2385   int reg_offset = base_offset + orig_dst.reg_offset;
2386   src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset);
2387
2388   dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0),
2389				       orig_dst.writemask));
2390   vec4_instruction *write = SCRATCH_WRITE(dst, temp, index);
2391   write->predicate = inst->predicate;
2392   write->ir = inst->ir;
2393   write->annotation = inst->annotation;
2394   inst->insert_after(write);
2395}
2396
2397/**
2398 * We can't generally support array access in GRF space, because a
2399 * single instruction's destination can only span 2 contiguous
2400 * registers.  So, we send all GRF arrays that get variable index
2401 * access to scratch space.
2402 */
2403void
2404vec4_visitor::move_grf_array_access_to_scratch()
2405{
2406   int scratch_loc[this->virtual_grf_count];
2407
2408   for (int i = 0; i < this->virtual_grf_count; i++) {
2409      scratch_loc[i] = -1;
2410   }
2411
2412   /* First, calculate the set of virtual GRFs that need to be punted
2413    * to scratch due to having any array access on them, and where in
2414    * scratch.
2415    */
2416   foreach_list(node, &this->instructions) {
2417      vec4_instruction *inst = (vec4_instruction *)node;
2418
2419      if (inst->dst.file == GRF && inst->dst.reladdr &&
2420	  scratch_loc[inst->dst.reg] == -1) {
2421	 scratch_loc[inst->dst.reg] = c->last_scratch;
2422	 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4;
2423      }
2424
2425      for (int i = 0 ; i < 3; i++) {
2426	 src_reg *src = &inst->src[i];
2427
2428	 if (src->file == GRF && src->reladdr &&
2429	     scratch_loc[src->reg] == -1) {
2430	    scratch_loc[src->reg] = c->last_scratch;
2431	    c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4;
2432	 }
2433      }
2434   }
2435
2436   /* Now, for anything that will be accessed through scratch, rewrite
2437    * it to load/store.  Note that this is a _safe list walk, because
2438    * we may generate a new scratch_write instruction after the one
2439    * we're processing.
2440    */
2441   foreach_list_safe(node, &this->instructions) {
2442      vec4_instruction *inst = (vec4_instruction *)node;
2443
2444      /* Set up the annotation tracking for new generated instructions. */
2445      base_ir = inst->ir;
2446      current_annotation = inst->annotation;
2447
2448      if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) {
2449	 src_reg temp = src_reg(this, glsl_type::vec4_type);
2450
2451	 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]);
2452
2453	 inst->dst.file = temp.file;
2454	 inst->dst.reg = temp.reg;
2455	 inst->dst.reg_offset = temp.reg_offset;
2456	 inst->dst.reladdr = NULL;
2457      }
2458
2459      for (int i = 0 ; i < 3; i++) {
2460	 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1)
2461	    continue;
2462
2463	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2464
2465	 emit_scratch_read(inst, temp, inst->src[i],
2466			   scratch_loc[inst->src[i].reg]);
2467
2468	 inst->src[i].file = temp.file;
2469	 inst->src[i].reg = temp.reg;
2470	 inst->src[i].reg_offset = temp.reg_offset;
2471	 inst->src[i].reladdr = NULL;
2472      }
2473   }
2474}
2475
2476/**
2477 * Emits an instruction before @inst to load the value named by @orig_src
2478 * from the pull constant buffer (surface) at @base_offset to @temp.
2479 */
2480void
2481vec4_visitor::emit_pull_constant_load(vec4_instruction *inst,
2482				      dst_reg temp, src_reg orig_src,
2483				      int base_offset)
2484{
2485   int reg_offset = base_offset + orig_src.reg_offset;
2486   src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
2487   vec4_instruction *load;
2488
2489   load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD,
2490					temp, index);
2491   load->base_mrf = 14;
2492   load->mlen = 1;
2493   emit_before(inst, load);
2494}
2495
2496/**
2497 * Implements array access of uniforms by inserting a
2498 * PULL_CONSTANT_LOAD instruction.
2499 *
2500 * Unlike temporary GRF array access (where we don't support it due to
2501 * the difficulty of doing relative addressing on instruction
2502 * destinations), we could potentially do array access of uniforms
2503 * that were loaded in GRF space as push constants.  In real-world
2504 * usage we've seen, though, the arrays being used are always larger
2505 * than we could load as push constants, so just always move all
2506 * uniform array access out to a pull constant buffer.
2507 */
2508void
2509vec4_visitor::move_uniform_array_access_to_pull_constants()
2510{
2511   int pull_constant_loc[this->uniforms];
2512
2513   for (int i = 0; i < this->uniforms; i++) {
2514      pull_constant_loc[i] = -1;
2515   }
2516
2517   /* Walk through and find array access of uniforms.  Put a copy of that
2518    * uniform in the pull constant buffer.
2519    *
2520    * Note that we don't move constant-indexed accesses to arrays.  No
2521    * testing has been done of the performance impact of this choice.
2522    */
2523   foreach_list_safe(node, &this->instructions) {
2524      vec4_instruction *inst = (vec4_instruction *)node;
2525
2526      for (int i = 0 ; i < 3; i++) {
2527	 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
2528	    continue;
2529
2530	 int uniform = inst->src[i].reg;
2531
2532	 /* If this array isn't already present in the pull constant buffer,
2533	  * add it.
2534	  */
2535	 if (pull_constant_loc[uniform] == -1) {
2536	    const float **values = &prog_data->param[uniform * 4];
2537
2538	    pull_constant_loc[uniform] = prog_data->nr_pull_params / 4;
2539
2540	    for (int j = 0; j < uniform_size[uniform] * 4; j++) {
2541	       prog_data->pull_param[prog_data->nr_pull_params++] = values[j];
2542	    }
2543	 }
2544
2545	 /* Set up the annotation tracking for new generated instructions. */
2546	 base_ir = inst->ir;
2547	 current_annotation = inst->annotation;
2548
2549	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2550
2551	 emit_pull_constant_load(inst, temp, inst->src[i],
2552				 pull_constant_loc[uniform]);
2553
2554	 inst->src[i].file = temp.file;
2555	 inst->src[i].reg = temp.reg;
2556	 inst->src[i].reg_offset = temp.reg_offset;
2557	 inst->src[i].reladdr = NULL;
2558      }
2559   }
2560
2561   /* Now there are no accesses of the UNIFORM file with a reladdr, so
2562    * no need to track them as larger-than-vec4 objects.  This will be
2563    * relied on in cutting out unused uniform vectors from push
2564    * constants.
2565    */
2566   split_uniform_registers();
2567}
2568
2569void
2570vec4_visitor::resolve_ud_negate(src_reg *reg)
2571{
2572   if (reg->type != BRW_REGISTER_TYPE_UD ||
2573       !reg->negate)
2574      return;
2575
2576   src_reg temp = src_reg(this, glsl_type::uvec4_type);
2577   emit(BRW_OPCODE_MOV, dst_reg(temp), *reg);
2578   *reg = temp;
2579}
2580
2581vec4_visitor::vec4_visitor(struct brw_vs_compile *c,
2582			   struct gl_shader_program *prog,
2583			   struct brw_shader *shader)
2584{
2585   this->c = c;
2586   this->p = &c->func;
2587   this->brw = p->brw;
2588   this->intel = &brw->intel;
2589   this->ctx = &intel->ctx;
2590   this->prog = prog;
2591   this->shader = shader;
2592
2593   this->mem_ctx = ralloc_context(NULL);
2594   this->failed = false;
2595
2596   this->base_ir = NULL;
2597   this->current_annotation = NULL;
2598
2599   this->c = c;
2600   this->vp = (struct gl_vertex_program *)
2601     prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
2602   this->prog_data = &c->prog_data;
2603
2604   this->variable_ht = hash_table_ctor(0,
2605				       hash_table_pointer_hash,
2606				       hash_table_pointer_compare);
2607
2608   this->virtual_grf_def = NULL;
2609   this->virtual_grf_use = NULL;
2610   this->virtual_grf_sizes = NULL;
2611   this->virtual_grf_count = 0;
2612   this->virtual_grf_reg_map = NULL;
2613   this->virtual_grf_reg_count = 0;
2614   this->virtual_grf_array_size = 0;
2615   this->live_intervals_valid = false;
2616
2617   this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
2618
2619   this->uniforms = 0;
2620}
2621
2622vec4_visitor::~vec4_visitor()
2623{
2624   ralloc_free(this->mem_ctx);
2625   hash_table_dtor(this->variable_ht);
2626}
2627
2628
2629void
2630vec4_visitor::fail(const char *format, ...)
2631{
2632   va_list va;
2633   char *msg;
2634
2635   if (failed)
2636      return;
2637
2638   failed = true;
2639
2640   va_start(va, format);
2641   msg = ralloc_vasprintf(mem_ctx, format, va);
2642   va_end(va);
2643   msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg);
2644
2645   this->fail_msg = msg;
2646
2647   if (INTEL_DEBUG & DEBUG_VS) {
2648      fprintf(stderr, "%s",  msg);
2649   }
2650}
2651
2652} /* namespace brw */
2653