brw_vec4_visitor.cpp revision 25ca9cc8236845a4be32a6f39b4a6d1664d4b403
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "brw_vec4.h"
25extern "C" {
26#include "main/macros.h"
27#include "program/prog_parameter.h"
28#include "program/sampler.h"
29}
30
31namespace brw {
32
33vec4_instruction::vec4_instruction(vec4_visitor *v,
34				   enum opcode opcode, dst_reg dst,
35				   src_reg src0, src_reg src1, src_reg src2)
36{
37   this->opcode = opcode;
38   this->dst = dst;
39   this->src[0] = src0;
40   this->src[1] = src1;
41   this->src[2] = src2;
42   this->ir = v->base_ir;
43   this->annotation = v->current_annotation;
44}
45
46vec4_instruction *
47vec4_visitor::emit(vec4_instruction *inst)
48{
49   this->instructions.push_tail(inst);
50
51   return inst;
52}
53
54vec4_instruction *
55vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst)
56{
57   new_inst->ir = inst->ir;
58   new_inst->annotation = inst->annotation;
59
60   inst->insert_before(new_inst);
61
62   return inst;
63}
64
65vec4_instruction *
66vec4_visitor::emit(enum opcode opcode, dst_reg dst,
67		   src_reg src0, src_reg src1, src_reg src2)
68{
69   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst,
70					     src0, src1, src2));
71}
72
73
74vec4_instruction *
75vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1)
76{
77   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1));
78}
79
80vec4_instruction *
81vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0)
82{
83   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0));
84}
85
86vec4_instruction *
87vec4_visitor::emit(enum opcode opcode)
88{
89   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg()));
90}
91
92#define ALU1(op)							\
93   vec4_instruction *							\
94   vec4_visitor::op(dst_reg dst, src_reg src0)				\
95   {									\
96      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
97					   src0);			\
98   }
99
100#define ALU2(op)							\
101   vec4_instruction *							\
102   vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1)		\
103   {									\
104      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
105					   src0, src1);			\
106   }
107
108ALU1(NOT)
109ALU1(MOV)
110ALU1(FRC)
111ALU1(RNDD)
112ALU1(RNDE)
113ALU1(RNDZ)
114ALU2(ADD)
115ALU2(MUL)
116ALU2(MACH)
117ALU2(AND)
118ALU2(OR)
119ALU2(XOR)
120ALU2(DP3)
121ALU2(DP4)
122
123/** Gen4 predicated IF. */
124vec4_instruction *
125vec4_visitor::IF(uint32_t predicate)
126{
127   vec4_instruction *inst;
128
129   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF);
130   inst->predicate = predicate;
131
132   return inst;
133}
134
135/** Gen6+ IF with embedded comparison. */
136vec4_instruction *
137vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition)
138{
139   assert(intel->gen >= 6);
140
141   vec4_instruction *inst;
142
143   resolve_ud_negate(&src0);
144   resolve_ud_negate(&src1);
145
146   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(),
147					src0, src1);
148   inst->conditional_mod = condition;
149
150   return inst;
151}
152
153/**
154 * CMP: Sets the low bit of the destination channels with the result
155 * of the comparison, while the upper bits are undefined, and updates
156 * the flag register with the packed 16 bits of the result.
157 */
158vec4_instruction *
159vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition)
160{
161   vec4_instruction *inst;
162
163   /* original gen4 does type conversion to the destination type
164    * before before comparison, producing garbage results for floating
165    * point comparisons.
166    */
167   if (intel->gen == 4) {
168      dst.type = src0.type;
169      if (dst.file == HW_REG)
170	 dst.fixed_hw_reg.type = dst.type;
171   }
172
173   resolve_ud_negate(&src0);
174   resolve_ud_negate(&src1);
175
176   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1);
177   inst->conditional_mod = condition;
178
179   return inst;
180}
181
182vec4_instruction *
183vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index)
184{
185   vec4_instruction *inst;
186
187   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ,
188					dst, index);
189   inst->base_mrf = 14;
190   inst->mlen = 1;
191
192   return inst;
193}
194
195vec4_instruction *
196vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index)
197{
198   vec4_instruction *inst;
199
200   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE,
201					dst, src, index);
202   inst->base_mrf = 13;
203   inst->mlen = 2;
204
205   return inst;
206}
207
208void
209vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements)
210{
211   static enum opcode dot_opcodes[] = {
212      BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4
213   };
214
215   emit(dot_opcodes[elements - 2], dst, src0, src1);
216}
217
218void
219vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src)
220{
221   /* The gen6 math instruction ignores the source modifiers --
222    * swizzle, abs, negate, and at least some parts of the register
223    * region description.
224    *
225    * While it would seem that this MOV could be avoided at this point
226    * in the case that the swizzle is matched up with the destination
227    * writemask, note that uniform packing and register allocation
228    * could rearrange our swizzle, so let's leave this matter up to
229    * copy propagation later.
230    */
231   src_reg temp_src = src_reg(this, glsl_type::vec4_type);
232   emit(MOV(dst_reg(temp_src), src));
233
234   if (dst.writemask != WRITEMASK_XYZW) {
235      /* The gen6 math instruction must be align1, so we can't do
236       * writemasks.
237       */
238      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
239
240      emit(opcode, temp_dst, temp_src);
241
242      emit(MOV(dst, src_reg(temp_dst)));
243   } else {
244      emit(opcode, dst, temp_src);
245   }
246}
247
248void
249vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src)
250{
251   vec4_instruction *inst = emit(opcode, dst, src);
252   inst->base_mrf = 1;
253   inst->mlen = 1;
254}
255
256void
257vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src)
258{
259   switch (opcode) {
260   case SHADER_OPCODE_RCP:
261   case SHADER_OPCODE_RSQ:
262   case SHADER_OPCODE_SQRT:
263   case SHADER_OPCODE_EXP2:
264   case SHADER_OPCODE_LOG2:
265   case SHADER_OPCODE_SIN:
266   case SHADER_OPCODE_COS:
267      break;
268   default:
269      assert(!"not reached: bad math opcode");
270      return;
271   }
272
273   if (intel->gen >= 7) {
274      emit(opcode, dst, src);
275   } else if (intel->gen == 6) {
276      return emit_math1_gen6(opcode, dst, src);
277   } else {
278      return emit_math1_gen4(opcode, dst, src);
279   }
280}
281
282void
283vec4_visitor::emit_math2_gen6(enum opcode opcode,
284			      dst_reg dst, src_reg src0, src_reg src1)
285{
286   src_reg expanded;
287
288   /* The gen6 math instruction ignores the source modifiers --
289    * swizzle, abs, negate, and at least some parts of the register
290    * region description.  Move the sources to temporaries to make it
291    * generally work.
292    */
293
294   expanded = src_reg(this, glsl_type::vec4_type);
295   expanded.type = src0.type;
296   emit(MOV(dst_reg(expanded), src0));
297   src0 = expanded;
298
299   expanded = src_reg(this, glsl_type::vec4_type);
300   expanded.type = src1.type;
301   emit(MOV(dst_reg(expanded), src1));
302   src1 = expanded;
303
304   if (dst.writemask != WRITEMASK_XYZW) {
305      /* The gen6 math instruction must be align1, so we can't do
306       * writemasks.
307       */
308      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
309      temp_dst.type = dst.type;
310
311      emit(opcode, temp_dst, src0, src1);
312
313      emit(MOV(dst, src_reg(temp_dst)));
314   } else {
315      emit(opcode, dst, src0, src1);
316   }
317}
318
319void
320vec4_visitor::emit_math2_gen4(enum opcode opcode,
321			      dst_reg dst, src_reg src0, src_reg src1)
322{
323   vec4_instruction *inst = emit(opcode, dst, src0, src1);
324   inst->base_mrf = 1;
325   inst->mlen = 2;
326}
327
328void
329vec4_visitor::emit_math(enum opcode opcode,
330			dst_reg dst, src_reg src0, src_reg src1)
331{
332   switch (opcode) {
333   case SHADER_OPCODE_POW:
334   case SHADER_OPCODE_INT_QUOTIENT:
335   case SHADER_OPCODE_INT_REMAINDER:
336      break;
337   default:
338      assert(!"not reached: unsupported binary math opcode");
339      return;
340   }
341
342   if (intel->gen >= 7) {
343      emit(opcode, dst, src0, src1);
344   } else if (intel->gen == 6) {
345      return emit_math2_gen6(opcode, dst, src0, src1);
346   } else {
347      return emit_math2_gen4(opcode, dst, src0, src1);
348   }
349}
350
351void
352vec4_visitor::visit_instructions(const exec_list *list)
353{
354   foreach_list(node, list) {
355      ir_instruction *ir = (ir_instruction *)node;
356
357      base_ir = ir;
358      ir->accept(this);
359   }
360}
361
362
363static int
364type_size(const struct glsl_type *type)
365{
366   unsigned int i;
367   int size;
368
369   switch (type->base_type) {
370   case GLSL_TYPE_UINT:
371   case GLSL_TYPE_INT:
372   case GLSL_TYPE_FLOAT:
373   case GLSL_TYPE_BOOL:
374      if (type->is_matrix()) {
375	 return type->matrix_columns;
376      } else {
377	 /* Regardless of size of vector, it gets a vec4. This is bad
378	  * packing for things like floats, but otherwise arrays become a
379	  * mess.  Hopefully a later pass over the code can pack scalars
380	  * down if appropriate.
381	  */
382	 return 1;
383      }
384   case GLSL_TYPE_ARRAY:
385      assert(type->length > 0);
386      return type_size(type->fields.array) * type->length;
387   case GLSL_TYPE_STRUCT:
388      size = 0;
389      for (i = 0; i < type->length; i++) {
390	 size += type_size(type->fields.structure[i].type);
391      }
392      return size;
393   case GLSL_TYPE_SAMPLER:
394      /* Samplers take up one slot in UNIFORMS[], but they're baked in
395       * at link time.
396       */
397      return 1;
398   default:
399      assert(0);
400      return 0;
401   }
402}
403
404int
405vec4_visitor::virtual_grf_alloc(int size)
406{
407   if (virtual_grf_array_size <= virtual_grf_count) {
408      if (virtual_grf_array_size == 0)
409	 virtual_grf_array_size = 16;
410      else
411	 virtual_grf_array_size *= 2;
412      virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
413				   virtual_grf_array_size);
414      virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int,
415				     virtual_grf_array_size);
416   }
417   virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count;
418   virtual_grf_reg_count += size;
419   virtual_grf_sizes[virtual_grf_count] = size;
420   return virtual_grf_count++;
421}
422
423src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type)
424{
425   init();
426
427   this->file = GRF;
428   this->reg = v->virtual_grf_alloc(type_size(type));
429
430   if (type->is_array() || type->is_record()) {
431      this->swizzle = BRW_SWIZZLE_NOOP;
432   } else {
433      this->swizzle = swizzle_for_size(type->vector_elements);
434   }
435
436   this->type = brw_type_for_base_type(type);
437}
438
439dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type)
440{
441   init();
442
443   this->file = GRF;
444   this->reg = v->virtual_grf_alloc(type_size(type));
445
446   if (type->is_array() || type->is_record()) {
447      this->writemask = WRITEMASK_XYZW;
448   } else {
449      this->writemask = (1 << type->vector_elements) - 1;
450   }
451
452   this->type = brw_type_for_base_type(type);
453}
454
455/* Our support for uniforms is piggy-backed on the struct
456 * gl_fragment_program, because that's where the values actually
457 * get stored, rather than in some global gl_shader_program uniform
458 * store.
459 */
460int
461vec4_visitor::setup_uniform_values(int loc, const glsl_type *type)
462{
463   unsigned int offset = 0;
464   float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f;
465
466   if (type->is_matrix()) {
467      const glsl_type *column = type->column_type();
468
469      for (unsigned int i = 0; i < type->matrix_columns; i++) {
470	 offset += setup_uniform_values(loc + offset, column);
471      }
472
473      return offset;
474   }
475
476   switch (type->base_type) {
477   case GLSL_TYPE_FLOAT:
478   case GLSL_TYPE_UINT:
479   case GLSL_TYPE_INT:
480   case GLSL_TYPE_BOOL:
481      for (unsigned int i = 0; i < type->vector_elements; i++) {
482	 c->prog_data.param[this->uniforms * 4 + i] = &values[i];
483      }
484
485      /* Set up pad elements to get things aligned to a vec4 boundary. */
486      for (unsigned int i = type->vector_elements; i < 4; i++) {
487	 static float zero = 0;
488
489	 c->prog_data.param[this->uniforms * 4 + i] = &zero;
490      }
491
492      /* Track the size of this uniform vector, for future packing of
493       * uniforms.
494       */
495      this->uniform_vector_size[this->uniforms] = type->vector_elements;
496      this->uniforms++;
497
498      return 1;
499
500   case GLSL_TYPE_STRUCT:
501      for (unsigned int i = 0; i < type->length; i++) {
502	 offset += setup_uniform_values(loc + offset,
503					type->fields.structure[i].type);
504      }
505      return offset;
506
507   case GLSL_TYPE_ARRAY:
508      for (unsigned int i = 0; i < type->length; i++) {
509	 offset += setup_uniform_values(loc + offset, type->fields.array);
510      }
511      return offset;
512
513   case GLSL_TYPE_SAMPLER:
514      /* The sampler takes up a slot, but we don't use any values from it. */
515      return 1;
516
517   default:
518      assert(!"not reached");
519      return 0;
520   }
521}
522
523void
524vec4_visitor::setup_uniform_clipplane_values()
525{
526   gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
527
528   /* Pre-Gen6, we compact clip planes.  For example, if the user
529    * enables just clip planes 0, 1, and 3, we will enable clip planes
530    * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip
531    * plane 2.  This simplifies the implementation of the Gen6 clip
532    * thread.
533    *
534    * In Gen6 and later, we don't compact clip planes, because this
535    * simplifies the implementation of gl_ClipDistance.
536    */
537   int compacted_clipplane_index = 0;
538   for (int i = 0; i < c->key.nr_userclip_plane_consts; ++i) {
539      if (intel->gen < 6 &&
540          !(c->key.userclip_planes_enabled_gen_4_5 & (1 << i))) {
541         continue;
542      }
543      this->uniform_vector_size[this->uniforms] = 4;
544      this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms);
545      this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F;
546      for (int j = 0; j < 4; ++j) {
547         c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j];
548      }
549      ++compacted_clipplane_index;
550      ++this->uniforms;
551   }
552}
553
554/* Our support for builtin uniforms is even scarier than non-builtin.
555 * It sits on top of the PROG_STATE_VAR parameters that are
556 * automatically updated from GL context state.
557 */
558void
559vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
560{
561   const ir_state_slot *const slots = ir->state_slots;
562   assert(ir->state_slots != NULL);
563
564   for (unsigned int i = 0; i < ir->num_state_slots; i++) {
565      /* This state reference has already been setup by ir_to_mesa,
566       * but we'll get the same index back here.  We can reference
567       * ParameterValues directly, since unlike brw_fs.cpp, we never
568       * add new state references during compile.
569       */
570      int index = _mesa_add_state_reference(this->vp->Base.Parameters,
571					    (gl_state_index *)slots[i].tokens);
572      float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f;
573
574      this->uniform_vector_size[this->uniforms] = 0;
575      /* Add each of the unique swizzled channels of the element.
576       * This will end up matching the size of the glsl_type of this field.
577       */
578      int last_swiz = -1;
579      for (unsigned int j = 0; j < 4; j++) {
580	 int swiz = GET_SWZ(slots[i].swizzle, j);
581	 last_swiz = swiz;
582
583	 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz];
584	 if (swiz <= last_swiz)
585	    this->uniform_vector_size[this->uniforms]++;
586      }
587      this->uniforms++;
588   }
589}
590
591dst_reg *
592vec4_visitor::variable_storage(ir_variable *var)
593{
594   return (dst_reg *)hash_table_find(this->variable_ht, var);
595}
596
597void
598vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
599{
600   ir_expression *expr = ir->as_expression();
601
602   *predicate = BRW_PREDICATE_NORMAL;
603
604   if (expr) {
605      src_reg op[2];
606      vec4_instruction *inst;
607
608      assert(expr->get_num_operands() <= 2);
609      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
610	 expr->operands[i]->accept(this);
611	 op[i] = this->result;
612
613	 resolve_ud_negate(&op[i]);
614      }
615
616      switch (expr->operation) {
617      case ir_unop_logic_not:
618	 inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
619	 inst->conditional_mod = BRW_CONDITIONAL_Z;
620	 break;
621
622      case ir_binop_logic_xor:
623	 inst = emit(XOR(dst_null_d(), op[0], op[1]));
624	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
625	 break;
626
627      case ir_binop_logic_or:
628	 inst = emit(OR(dst_null_d(), op[0], op[1]));
629	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
630	 break;
631
632      case ir_binop_logic_and:
633	 inst = emit(AND(dst_null_d(), op[0], op[1]));
634	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
635	 break;
636
637      case ir_unop_f2b:
638	 if (intel->gen >= 6) {
639	    emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
640	 } else {
641	    inst = emit(MOV(dst_null_f(), op[0]));
642	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
643	 }
644	 break;
645
646      case ir_unop_i2b:
647	 if (intel->gen >= 6) {
648	    emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
649	 } else {
650	    inst = emit(MOV(dst_null_d(), op[0]));
651	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
652	 }
653	 break;
654
655      case ir_binop_all_equal:
656	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
657	 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
658	 break;
659
660      case ir_binop_any_nequal:
661	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
662	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
663	 break;
664
665      case ir_unop_any:
666	 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
667	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
668	 break;
669
670      case ir_binop_greater:
671      case ir_binop_gequal:
672      case ir_binop_less:
673      case ir_binop_lequal:
674      case ir_binop_equal:
675      case ir_binop_nequal:
676	 emit(CMP(dst_null_d(), op[0], op[1],
677		  brw_conditional_for_comparison(expr->operation)));
678	 break;
679
680      default:
681	 assert(!"not reached");
682	 break;
683      }
684      return;
685   }
686
687   ir->accept(this);
688
689   resolve_ud_negate(&this->result);
690
691   if (intel->gen >= 6) {
692      vec4_instruction *inst = emit(AND(dst_null_d(),
693					this->result, src_reg(1)));
694      inst->conditional_mod = BRW_CONDITIONAL_NZ;
695   } else {
696      vec4_instruction *inst = emit(MOV(dst_null_d(), this->result));
697      inst->conditional_mod = BRW_CONDITIONAL_NZ;
698   }
699}
700
701/**
702 * Emit a gen6 IF statement with the comparison folded into the IF
703 * instruction.
704 */
705void
706vec4_visitor::emit_if_gen6(ir_if *ir)
707{
708   ir_expression *expr = ir->condition->as_expression();
709
710   if (expr) {
711      src_reg op[2];
712      dst_reg temp;
713
714      assert(expr->get_num_operands() <= 2);
715      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
716	 expr->operands[i]->accept(this);
717	 op[i] = this->result;
718      }
719
720      switch (expr->operation) {
721      case ir_unop_logic_not:
722	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
723	 return;
724
725      case ir_binop_logic_xor:
726	 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
727	 return;
728
729      case ir_binop_logic_or:
730	 temp = dst_reg(this, glsl_type::bool_type);
731	 emit(OR(temp, op[0], op[1]));
732	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
733	 return;
734
735      case ir_binop_logic_and:
736	 temp = dst_reg(this, glsl_type::bool_type);
737	 emit(AND(temp, op[0], op[1]));
738	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
739	 return;
740
741      case ir_unop_f2b:
742	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
743	 return;
744
745      case ir_unop_i2b:
746	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
747	 return;
748
749      case ir_binop_greater:
750      case ir_binop_gequal:
751      case ir_binop_less:
752      case ir_binop_lequal:
753      case ir_binop_equal:
754      case ir_binop_nequal:
755	 emit(IF(op[0], op[1],
756		 brw_conditional_for_comparison(expr->operation)));
757	 return;
758
759      case ir_binop_all_equal:
760	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
761	 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
762	 return;
763
764      case ir_binop_any_nequal:
765	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
766	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
767	 return;
768
769      case ir_unop_any:
770	 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
771	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
772	 return;
773
774      default:
775	 assert(!"not reached");
776	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
777	 return;
778      }
779      return;
780   }
781
782   ir->condition->accept(this);
783
784   emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
785}
786
787void
788vec4_visitor::visit(ir_variable *ir)
789{
790   dst_reg *reg = NULL;
791
792   if (variable_storage(ir))
793      return;
794
795   switch (ir->mode) {
796   case ir_var_in:
797      reg = new(mem_ctx) dst_reg(ATTR, ir->location);
798
799      /* Do GL_FIXED rescaling for GLES2.0.  Our GL_FIXED attributes
800       * come in as floating point conversions of the integer values.
801       */
802      for (int i = ir->location; i < ir->location + type_size(ir->type); i++) {
803	 if (!c->key.gl_fixed_input_size[i])
804	    continue;
805
806	 dst_reg dst = *reg;
807         dst.type = brw_type_for_base_type(ir->type);
808	 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1;
809	 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f)));
810      }
811      break;
812
813   case ir_var_out:
814      reg = new(mem_ctx) dst_reg(this, ir->type);
815
816      for (int i = 0; i < type_size(ir->type); i++) {
817	 output_reg[ir->location + i] = *reg;
818	 output_reg[ir->location + i].reg_offset = i;
819	 output_reg[ir->location + i].type =
820            brw_type_for_base_type(ir->type->get_scalar_type());
821	 output_reg_annotation[ir->location + i] = ir->name;
822      }
823      break;
824
825   case ir_var_auto:
826   case ir_var_temporary:
827      reg = new(mem_ctx) dst_reg(this, ir->type);
828      break;
829
830   case ir_var_uniform:
831      reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
832
833      /* Track how big the whole uniform variable is, in case we need to put a
834       * copy of its data into pull constants for array access.
835       */
836      this->uniform_size[this->uniforms] = type_size(ir->type);
837
838      if (!strncmp(ir->name, "gl_", 3)) {
839	 setup_builtin_uniform_values(ir);
840      } else {
841	 setup_uniform_values(ir->location, ir->type);
842      }
843      break;
844
845   case ir_var_system_value:
846      /* VertexID is stored by the VF as the last vertex element, but
847       * we don't represent it with a flag in inputs_read, so we call
848       * it VERT_ATTRIB_MAX, which setup_attributes() picks up on.
849       */
850      reg = new(mem_ctx) dst_reg(ATTR, VERT_ATTRIB_MAX);
851      prog_data->uses_vertexid = true;
852
853      switch (ir->location) {
854      case SYSTEM_VALUE_VERTEX_ID:
855	 reg->writemask = WRITEMASK_X;
856	 break;
857      case SYSTEM_VALUE_INSTANCE_ID:
858	 reg->writemask = WRITEMASK_Y;
859	 break;
860      default:
861	 assert(!"not reached");
862	 break;
863      }
864      break;
865
866   default:
867      assert(!"not reached");
868   }
869
870   reg->type = brw_type_for_base_type(ir->type);
871   hash_table_insert(this->variable_ht, reg, ir);
872}
873
874void
875vec4_visitor::visit(ir_loop *ir)
876{
877   dst_reg counter;
878
879   /* We don't want debugging output to print the whole body of the
880    * loop as the annotation.
881    */
882   this->base_ir = NULL;
883
884   if (ir->counter != NULL) {
885      this->base_ir = ir->counter;
886      ir->counter->accept(this);
887      counter = *(variable_storage(ir->counter));
888
889      if (ir->from != NULL) {
890	 this->base_ir = ir->from;
891	 ir->from->accept(this);
892
893	 emit(MOV(counter, this->result));
894      }
895   }
896
897   emit(BRW_OPCODE_DO);
898
899   if (ir->to) {
900      this->base_ir = ir->to;
901      ir->to->accept(this);
902
903      emit(CMP(dst_null_d(), src_reg(counter), this->result,
904	       brw_conditional_for_comparison(ir->cmp)));
905
906      vec4_instruction *inst = emit(BRW_OPCODE_BREAK);
907      inst->predicate = BRW_PREDICATE_NORMAL;
908   }
909
910   visit_instructions(&ir->body_instructions);
911
912
913   if (ir->increment) {
914      this->base_ir = ir->increment;
915      ir->increment->accept(this);
916      emit(ADD(counter, src_reg(counter), this->result));
917   }
918
919   emit(BRW_OPCODE_WHILE);
920}
921
922void
923vec4_visitor::visit(ir_loop_jump *ir)
924{
925   switch (ir->mode) {
926   case ir_loop_jump::jump_break:
927      emit(BRW_OPCODE_BREAK);
928      break;
929   case ir_loop_jump::jump_continue:
930      emit(BRW_OPCODE_CONTINUE);
931      break;
932   }
933}
934
935
936void
937vec4_visitor::visit(ir_function_signature *ir)
938{
939   assert(0);
940   (void)ir;
941}
942
943void
944vec4_visitor::visit(ir_function *ir)
945{
946   /* Ignore function bodies other than main() -- we shouldn't see calls to
947    * them since they should all be inlined.
948    */
949   if (strcmp(ir->name, "main") == 0) {
950      const ir_function_signature *sig;
951      exec_list empty;
952
953      sig = ir->matching_signature(&empty);
954
955      assert(sig);
956
957      visit_instructions(&sig->body);
958   }
959}
960
961bool
962vec4_visitor::try_emit_sat(ir_expression *ir)
963{
964   ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
965   if (!sat_src)
966      return false;
967
968   sat_src->accept(this);
969   src_reg src = this->result;
970
971   this->result = src_reg(this, ir->type);
972   vec4_instruction *inst;
973   inst = emit(MOV(dst_reg(this->result), src));
974   inst->saturate = true;
975
976   return true;
977}
978
979void
980vec4_visitor::emit_bool_comparison(unsigned int op,
981				 dst_reg dst, src_reg src0, src_reg src1)
982{
983   /* original gen4 does destination conversion before comparison. */
984   if (intel->gen < 5)
985      dst.type = src0.type;
986
987   emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
988
989   dst.type = BRW_REGISTER_TYPE_D;
990   emit(AND(dst, src_reg(dst), src_reg(0x1)));
991}
992
993void
994vec4_visitor::visit(ir_expression *ir)
995{
996   unsigned int operand;
997   src_reg op[Elements(ir->operands)];
998   src_reg result_src;
999   dst_reg result_dst;
1000   vec4_instruction *inst;
1001
1002   if (try_emit_sat(ir))
1003      return;
1004
1005   for (operand = 0; operand < ir->get_num_operands(); operand++) {
1006      this->result.file = BAD_FILE;
1007      ir->operands[operand]->accept(this);
1008      if (this->result.file == BAD_FILE) {
1009	 printf("Failed to get tree for expression operand:\n");
1010	 ir->operands[operand]->print();
1011	 exit(1);
1012      }
1013      op[operand] = this->result;
1014
1015      /* Matrix expression operands should have been broken down to vector
1016       * operations already.
1017       */
1018      assert(!ir->operands[operand]->type->is_matrix());
1019   }
1020
1021   int vector_elements = ir->operands[0]->type->vector_elements;
1022   if (ir->operands[1]) {
1023      vector_elements = MAX2(vector_elements,
1024			     ir->operands[1]->type->vector_elements);
1025   }
1026
1027   this->result.file = BAD_FILE;
1028
1029   /* Storage for our result.  Ideally for an assignment we'd be using
1030    * the actual storage for the result here, instead.
1031    */
1032   result_src = src_reg(this, ir->type);
1033   /* convenience for the emit functions below. */
1034   result_dst = dst_reg(result_src);
1035   /* If nothing special happens, this is the result. */
1036   this->result = result_src;
1037   /* Limit writes to the channels that will be used by result_src later.
1038    * This does limit this temp's use as a temporary for multi-instruction
1039    * sequences.
1040    */
1041   result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1042
1043   switch (ir->operation) {
1044   case ir_unop_logic_not:
1045      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
1046       * ones complement of the whole register, not just bit 0.
1047       */
1048      emit(XOR(result_dst, op[0], src_reg(1)));
1049      break;
1050   case ir_unop_neg:
1051      op[0].negate = !op[0].negate;
1052      this->result = op[0];
1053      break;
1054   case ir_unop_abs:
1055      op[0].abs = true;
1056      op[0].negate = false;
1057      this->result = op[0];
1058      break;
1059
1060   case ir_unop_sign:
1061      emit(MOV(result_dst, src_reg(0.0f)));
1062
1063      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G));
1064      inst = emit(MOV(result_dst, src_reg(1.0f)));
1065      inst->predicate = BRW_PREDICATE_NORMAL;
1066
1067      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L));
1068      inst = emit(MOV(result_dst, src_reg(-1.0f)));
1069      inst->predicate = BRW_PREDICATE_NORMAL;
1070
1071      break;
1072
1073   case ir_unop_rcp:
1074      emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
1075      break;
1076
1077   case ir_unop_exp2:
1078      emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
1079      break;
1080   case ir_unop_log2:
1081      emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
1082      break;
1083   case ir_unop_exp:
1084   case ir_unop_log:
1085      assert(!"not reached: should be handled by ir_explog_to_explog2");
1086      break;
1087   case ir_unop_sin:
1088   case ir_unop_sin_reduced:
1089      emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
1090      break;
1091   case ir_unop_cos:
1092   case ir_unop_cos_reduced:
1093      emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
1094      break;
1095
1096   case ir_unop_dFdx:
1097   case ir_unop_dFdy:
1098      assert(!"derivatives not valid in vertex shader");
1099      break;
1100
1101   case ir_unop_noise:
1102      assert(!"not reached: should be handled by lower_noise");
1103      break;
1104
1105   case ir_binop_add:
1106      emit(ADD(result_dst, op[0], op[1]));
1107      break;
1108   case ir_binop_sub:
1109      assert(!"not reached: should be handled by ir_sub_to_add_neg");
1110      break;
1111
1112   case ir_binop_mul:
1113      if (ir->type->is_integer()) {
1114	 /* For integer multiplication, the MUL uses the low 16 bits
1115	  * of one of the operands (src0 on gen6, src1 on gen7).  The
1116	  * MACH accumulates in the contribution of the upper 16 bits
1117	  * of that operand.
1118	  *
1119	  * FINISHME: Emit just the MUL if we know an operand is small
1120	  * enough.
1121	  */
1122	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
1123
1124	 emit(MUL(acc, op[0], op[1]));
1125	 emit(MACH(dst_null_d(), op[0], op[1]));
1126	 emit(MOV(result_dst, src_reg(acc)));
1127      } else {
1128	 emit(MUL(result_dst, op[0], op[1]));
1129      }
1130      break;
1131   case ir_binop_div:
1132      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
1133      assert(ir->type->is_integer());
1134      emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
1135      break;
1136   case ir_binop_mod:
1137      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
1138      assert(ir->type->is_integer());
1139      emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
1140      break;
1141
1142   case ir_binop_less:
1143   case ir_binop_greater:
1144   case ir_binop_lequal:
1145   case ir_binop_gequal:
1146   case ir_binop_equal:
1147   case ir_binop_nequal: {
1148      emit(CMP(result_dst, op[0], op[1],
1149	       brw_conditional_for_comparison(ir->operation)));
1150      emit(AND(result_dst, result_src, src_reg(0x1)));
1151      break;
1152   }
1153
1154   case ir_binop_all_equal:
1155      /* "==" operator producing a scalar boolean. */
1156      if (ir->operands[0]->type->is_vector() ||
1157	  ir->operands[1]->type->is_vector()) {
1158	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
1159	 emit(MOV(result_dst, src_reg(0)));
1160	 inst = emit(MOV(result_dst, src_reg(1)));
1161	 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1162      } else {
1163	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
1164	 emit(AND(result_dst, result_src, src_reg(0x1)));
1165      }
1166      break;
1167   case ir_binop_any_nequal:
1168      /* "!=" operator producing a scalar boolean. */
1169      if (ir->operands[0]->type->is_vector() ||
1170	  ir->operands[1]->type->is_vector()) {
1171	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
1172
1173	 emit(MOV(result_dst, src_reg(0)));
1174	 inst = emit(MOV(result_dst, src_reg(1)));
1175	 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1176      } else {
1177	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
1178	 emit(AND(result_dst, result_src, src_reg(0x1)));
1179      }
1180      break;
1181
1182   case ir_unop_any:
1183      emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1184      emit(MOV(result_dst, src_reg(0)));
1185
1186      inst = emit(MOV(result_dst, src_reg(1)));
1187      inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1188      break;
1189
1190   case ir_binop_logic_xor:
1191      emit(XOR(result_dst, op[0], op[1]));
1192      break;
1193
1194   case ir_binop_logic_or:
1195      emit(OR(result_dst, op[0], op[1]));
1196      break;
1197
1198   case ir_binop_logic_and:
1199      emit(AND(result_dst, op[0], op[1]));
1200      break;
1201
1202   case ir_binop_dot:
1203      assert(ir->operands[0]->type->is_vector());
1204      assert(ir->operands[0]->type == ir->operands[1]->type);
1205      emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
1206      break;
1207
1208   case ir_unop_sqrt:
1209      emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
1210      break;
1211   case ir_unop_rsq:
1212      emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
1213      break;
1214
1215   case ir_unop_bitcast_i2f:
1216   case ir_unop_bitcast_u2f:
1217      this->result = op[0];
1218      this->result.type = BRW_REGISTER_TYPE_F;
1219      break;
1220
1221   case ir_unop_bitcast_f2i:
1222      this->result = op[0];
1223      this->result.type = BRW_REGISTER_TYPE_D;
1224      break;
1225
1226   case ir_unop_bitcast_f2u:
1227      this->result = op[0];
1228      this->result.type = BRW_REGISTER_TYPE_UD;
1229      break;
1230
1231   case ir_unop_i2f:
1232   case ir_unop_i2u:
1233   case ir_unop_u2i:
1234   case ir_unop_u2f:
1235   case ir_unop_b2f:
1236   case ir_unop_b2i:
1237   case ir_unop_f2i:
1238   case ir_unop_f2u:
1239      emit(MOV(result_dst, op[0]));
1240      break;
1241   case ir_unop_f2b:
1242   case ir_unop_i2b: {
1243      emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1244      emit(AND(result_dst, result_src, src_reg(1)));
1245      break;
1246   }
1247
1248   case ir_unop_trunc:
1249      emit(RNDZ(result_dst, op[0]));
1250      break;
1251   case ir_unop_ceil:
1252      op[0].negate = !op[0].negate;
1253      inst = emit(RNDD(result_dst, op[0]));
1254      this->result.negate = true;
1255      break;
1256   case ir_unop_floor:
1257      inst = emit(RNDD(result_dst, op[0]));
1258      break;
1259   case ir_unop_fract:
1260      inst = emit(FRC(result_dst, op[0]));
1261      break;
1262   case ir_unop_round_even:
1263      emit(RNDE(result_dst, op[0]));
1264      break;
1265
1266   case ir_binop_min:
1267      if (intel->gen >= 6) {
1268	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1269	 inst->conditional_mod = BRW_CONDITIONAL_L;
1270      } else {
1271	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L));
1272
1273	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1274	 inst->predicate = BRW_PREDICATE_NORMAL;
1275      }
1276      break;
1277   case ir_binop_max:
1278      if (intel->gen >= 6) {
1279	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1280	 inst->conditional_mod = BRW_CONDITIONAL_G;
1281      } else {
1282	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G));
1283
1284	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1285	 inst->predicate = BRW_PREDICATE_NORMAL;
1286      }
1287      break;
1288
1289   case ir_binop_pow:
1290      emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
1291      break;
1292
1293   case ir_unop_bit_not:
1294      inst = emit(NOT(result_dst, op[0]));
1295      break;
1296   case ir_binop_bit_and:
1297      inst = emit(AND(result_dst, op[0], op[1]));
1298      break;
1299   case ir_binop_bit_xor:
1300      inst = emit(XOR(result_dst, op[0], op[1]));
1301      break;
1302   case ir_binop_bit_or:
1303      inst = emit(OR(result_dst, op[0], op[1]));
1304      break;
1305
1306   case ir_binop_lshift:
1307      inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]);
1308      break;
1309
1310   case ir_binop_rshift:
1311      if (ir->type->base_type == GLSL_TYPE_INT)
1312	 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]);
1313      else
1314	 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]);
1315      break;
1316
1317   case ir_quadop_vector:
1318      assert(!"not reached: should be handled by lower_quadop_vector");
1319      break;
1320   }
1321}
1322
1323
1324void
1325vec4_visitor::visit(ir_swizzle *ir)
1326{
1327   src_reg src;
1328   int i = 0;
1329   int swizzle[4];
1330
1331   /* Note that this is only swizzles in expressions, not those on the left
1332    * hand side of an assignment, which do write masking.  See ir_assignment
1333    * for that.
1334    */
1335
1336   ir->val->accept(this);
1337   src = this->result;
1338   assert(src.file != BAD_FILE);
1339
1340   for (i = 0; i < ir->type->vector_elements; i++) {
1341      switch (i) {
1342      case 0:
1343	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x);
1344	 break;
1345      case 1:
1346	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y);
1347	 break;
1348      case 2:
1349	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z);
1350	 break;
1351      case 3:
1352	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w);
1353	    break;
1354      }
1355   }
1356   for (; i < 4; i++) {
1357      /* Replicate the last channel out. */
1358      swizzle[i] = swizzle[ir->type->vector_elements - 1];
1359   }
1360
1361   src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1362
1363   this->result = src;
1364}
1365
1366void
1367vec4_visitor::visit(ir_dereference_variable *ir)
1368{
1369   const struct glsl_type *type = ir->type;
1370   dst_reg *reg = variable_storage(ir->var);
1371
1372   if (!reg) {
1373      fail("Failed to find variable storage for %s\n", ir->var->name);
1374      this->result = src_reg(brw_null_reg());
1375      return;
1376   }
1377
1378   this->result = src_reg(*reg);
1379
1380   /* System values get their swizzle from the dst_reg writemask */
1381   if (ir->var->mode == ir_var_system_value)
1382      return;
1383
1384   if (type->is_scalar() || type->is_vector() || type->is_matrix())
1385      this->result.swizzle = swizzle_for_size(type->vector_elements);
1386}
1387
1388void
1389vec4_visitor::visit(ir_dereference_array *ir)
1390{
1391   ir_constant *constant_index;
1392   src_reg src;
1393   int element_size = type_size(ir->type);
1394
1395   constant_index = ir->array_index->constant_expression_value();
1396
1397   ir->array->accept(this);
1398   src = this->result;
1399
1400   if (constant_index) {
1401      src.reg_offset += constant_index->value.i[0] * element_size;
1402   } else {
1403      /* Variable index array dereference.  It eats the "vec4" of the
1404       * base of the array and an index that offsets the Mesa register
1405       * index.
1406       */
1407      ir->array_index->accept(this);
1408
1409      src_reg index_reg;
1410
1411      if (element_size == 1) {
1412	 index_reg = this->result;
1413      } else {
1414	 index_reg = src_reg(this, glsl_type::int_type);
1415
1416	 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size)));
1417      }
1418
1419      if (src.reladdr) {
1420	 src_reg temp = src_reg(this, glsl_type::int_type);
1421
1422	 emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
1423
1424	 index_reg = temp;
1425      }
1426
1427      src.reladdr = ralloc(mem_ctx, src_reg);
1428      memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1429   }
1430
1431   /* If the type is smaller than a vec4, replicate the last channel out. */
1432   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1433      src.swizzle = swizzle_for_size(ir->type->vector_elements);
1434   else
1435      src.swizzle = BRW_SWIZZLE_NOOP;
1436   src.type = brw_type_for_base_type(ir->type);
1437
1438   this->result = src;
1439}
1440
1441void
1442vec4_visitor::visit(ir_dereference_record *ir)
1443{
1444   unsigned int i;
1445   const glsl_type *struct_type = ir->record->type;
1446   int offset = 0;
1447
1448   ir->record->accept(this);
1449
1450   for (i = 0; i < struct_type->length; i++) {
1451      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1452	 break;
1453      offset += type_size(struct_type->fields.structure[i].type);
1454   }
1455
1456   /* If the type is smaller than a vec4, replicate the last channel out. */
1457   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1458      this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1459   else
1460      this->result.swizzle = BRW_SWIZZLE_NOOP;
1461   this->result.type = brw_type_for_base_type(ir->type);
1462
1463   this->result.reg_offset += offset;
1464}
1465
1466/**
1467 * We want to be careful in assignment setup to hit the actual storage
1468 * instead of potentially using a temporary like we might with the
1469 * ir_dereference handler.
1470 */
1471static dst_reg
1472get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
1473{
1474   /* The LHS must be a dereference.  If the LHS is a variable indexed array
1475    * access of a vector, it must be separated into a series conditional moves
1476    * before reaching this point (see ir_vec_index_to_cond_assign).
1477    */
1478   assert(ir->as_dereference());
1479   ir_dereference_array *deref_array = ir->as_dereference_array();
1480   if (deref_array) {
1481      assert(!deref_array->array->type->is_vector());
1482   }
1483
1484   /* Use the rvalue deref handler for the most part.  We'll ignore
1485    * swizzles in it and write swizzles using writemask, though.
1486    */
1487   ir->accept(v);
1488   return dst_reg(v->result);
1489}
1490
1491void
1492vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
1493			      const struct glsl_type *type, uint32_t predicate)
1494{
1495   if (type->base_type == GLSL_TYPE_STRUCT) {
1496      for (unsigned int i = 0; i < type->length; i++) {
1497	 emit_block_move(dst, src, type->fields.structure[i].type, predicate);
1498      }
1499      return;
1500   }
1501
1502   if (type->is_array()) {
1503      for (unsigned int i = 0; i < type->length; i++) {
1504	 emit_block_move(dst, src, type->fields.array, predicate);
1505      }
1506      return;
1507   }
1508
1509   if (type->is_matrix()) {
1510      const struct glsl_type *vec_type;
1511
1512      vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
1513					 type->vector_elements, 1);
1514
1515      for (int i = 0; i < type->matrix_columns; i++) {
1516	 emit_block_move(dst, src, vec_type, predicate);
1517      }
1518      return;
1519   }
1520
1521   assert(type->is_scalar() || type->is_vector());
1522
1523   dst->type = brw_type_for_base_type(type);
1524   src->type = dst->type;
1525
1526   dst->writemask = (1 << type->vector_elements) - 1;
1527
1528   src->swizzle = swizzle_for_size(type->vector_elements);
1529
1530   vec4_instruction *inst = emit(MOV(*dst, *src));
1531   inst->predicate = predicate;
1532
1533   dst->reg_offset++;
1534   src->reg_offset++;
1535}
1536
1537
1538/* If the RHS processing resulted in an instruction generating a
1539 * temporary value, and it would be easy to rewrite the instruction to
1540 * generate its result right into the LHS instead, do so.  This ends
1541 * up reliably removing instructions where it can be tricky to do so
1542 * later without real UD chain information.
1543 */
1544bool
1545vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
1546				     dst_reg dst,
1547				     src_reg src,
1548				     vec4_instruction *pre_rhs_inst,
1549				     vec4_instruction *last_rhs_inst)
1550{
1551   /* This could be supported, but it would take more smarts. */
1552   if (ir->condition)
1553      return false;
1554
1555   if (pre_rhs_inst == last_rhs_inst)
1556      return false; /* No instructions generated to work with. */
1557
1558   /* Make sure the last instruction generated our source reg. */
1559   if (src.file != GRF ||
1560       src.file != last_rhs_inst->dst.file ||
1561       src.reg != last_rhs_inst->dst.reg ||
1562       src.reg_offset != last_rhs_inst->dst.reg_offset ||
1563       src.reladdr ||
1564       src.abs ||
1565       src.negate ||
1566       last_rhs_inst->predicate != BRW_PREDICATE_NONE)
1567      return false;
1568
1569   /* Check that that last instruction fully initialized the channels
1570    * we want to use, in the order we want to use them.  We could
1571    * potentially reswizzle the operands of many instructions so that
1572    * we could handle out of order channels, but don't yet.
1573    */
1574
1575   for (unsigned i = 0; i < 4; i++) {
1576      if (dst.writemask & (1 << i)) {
1577	 if (!(last_rhs_inst->dst.writemask & (1 << i)))
1578	    return false;
1579
1580	 if (BRW_GET_SWZ(src.swizzle, i) != i)
1581	    return false;
1582      }
1583   }
1584
1585   /* Success!  Rewrite the instruction. */
1586   last_rhs_inst->dst.file = dst.file;
1587   last_rhs_inst->dst.reg = dst.reg;
1588   last_rhs_inst->dst.reg_offset = dst.reg_offset;
1589   last_rhs_inst->dst.reladdr = dst.reladdr;
1590   last_rhs_inst->dst.writemask &= dst.writemask;
1591
1592   return true;
1593}
1594
1595void
1596vec4_visitor::visit(ir_assignment *ir)
1597{
1598   dst_reg dst = get_assignment_lhs(ir->lhs, this);
1599   uint32_t predicate = BRW_PREDICATE_NONE;
1600
1601   if (!ir->lhs->type->is_scalar() &&
1602       !ir->lhs->type->is_vector()) {
1603      ir->rhs->accept(this);
1604      src_reg src = this->result;
1605
1606      if (ir->condition) {
1607	 emit_bool_to_cond_code(ir->condition, &predicate);
1608      }
1609
1610      /* emit_block_move doesn't account for swizzles in the source register.
1611       * This should be ok, since the source register is a structure or an
1612       * array, and those can't be swizzled.  But double-check to be sure.
1613       */
1614      assert(src.swizzle ==
1615             (ir->rhs->type->is_matrix()
1616              ? swizzle_for_size(ir->rhs->type->vector_elements)
1617              : BRW_SWIZZLE_NOOP));
1618
1619      emit_block_move(&dst, &src, ir->rhs->type, predicate);
1620      return;
1621   }
1622
1623   /* Now we're down to just a scalar/vector with writemasks. */
1624   int i;
1625
1626   vec4_instruction *pre_rhs_inst, *last_rhs_inst;
1627   pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1628
1629   ir->rhs->accept(this);
1630
1631   last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1632
1633   src_reg src = this->result;
1634
1635   int swizzles[4];
1636   int first_enabled_chan = 0;
1637   int src_chan = 0;
1638
1639   assert(ir->lhs->type->is_vector() ||
1640	  ir->lhs->type->is_scalar());
1641   dst.writemask = ir->write_mask;
1642
1643   for (int i = 0; i < 4; i++) {
1644      if (dst.writemask & (1 << i)) {
1645	 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i);
1646	 break;
1647      }
1648   }
1649
1650   /* Swizzle a small RHS vector into the channels being written.
1651    *
1652    * glsl ir treats write_mask as dictating how many channels are
1653    * present on the RHS while in our instructions we need to make
1654    * those channels appear in the slots of the vec4 they're written to.
1655    */
1656   for (int i = 0; i < 4; i++) {
1657      if (dst.writemask & (1 << i))
1658	 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++);
1659      else
1660	 swizzles[i] = first_enabled_chan;
1661   }
1662   src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
1663			      swizzles[2], swizzles[3]);
1664
1665   if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
1666      return;
1667   }
1668
1669   if (ir->condition) {
1670      emit_bool_to_cond_code(ir->condition, &predicate);
1671   }
1672
1673   for (i = 0; i < type_size(ir->lhs->type); i++) {
1674      vec4_instruction *inst = emit(MOV(dst, src));
1675      inst->predicate = predicate;
1676
1677      dst.reg_offset++;
1678      src.reg_offset++;
1679   }
1680}
1681
1682void
1683vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
1684{
1685   if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1686      foreach_list(node, &ir->components) {
1687	 ir_constant *field_value = (ir_constant *)node;
1688
1689	 emit_constant_values(dst, field_value);
1690      }
1691      return;
1692   }
1693
1694   if (ir->type->is_array()) {
1695      for (unsigned int i = 0; i < ir->type->length; i++) {
1696	 emit_constant_values(dst, ir->array_elements[i]);
1697      }
1698      return;
1699   }
1700
1701   if (ir->type->is_matrix()) {
1702      for (int i = 0; i < ir->type->matrix_columns; i++) {
1703	 float *vec = &ir->value.f[i * ir->type->vector_elements];
1704
1705	 for (int j = 0; j < ir->type->vector_elements; j++) {
1706	    dst->writemask = 1 << j;
1707	    dst->type = BRW_REGISTER_TYPE_F;
1708
1709	    emit(MOV(*dst, src_reg(vec[j])));
1710	 }
1711	 dst->reg_offset++;
1712      }
1713      return;
1714   }
1715
1716   int remaining_writemask = (1 << ir->type->vector_elements) - 1;
1717
1718   for (int i = 0; i < ir->type->vector_elements; i++) {
1719      if (!(remaining_writemask & (1 << i)))
1720	 continue;
1721
1722      dst->writemask = 1 << i;
1723      dst->type = brw_type_for_base_type(ir->type);
1724
1725      /* Find other components that match the one we're about to
1726       * write.  Emits fewer instructions for things like vec4(0.5,
1727       * 1.5, 1.5, 1.5).
1728       */
1729      for (int j = i + 1; j < ir->type->vector_elements; j++) {
1730	 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1731	    if (ir->value.b[i] == ir->value.b[j])
1732	       dst->writemask |= (1 << j);
1733	 } else {
1734	    /* u, i, and f storage all line up, so no need for a
1735	     * switch case for comparing each type.
1736	     */
1737	    if (ir->value.u[i] == ir->value.u[j])
1738	       dst->writemask |= (1 << j);
1739	 }
1740      }
1741
1742      switch (ir->type->base_type) {
1743      case GLSL_TYPE_FLOAT:
1744	 emit(MOV(*dst, src_reg(ir->value.f[i])));
1745	 break;
1746      case GLSL_TYPE_INT:
1747	 emit(MOV(*dst, src_reg(ir->value.i[i])));
1748	 break;
1749      case GLSL_TYPE_UINT:
1750	 emit(MOV(*dst, src_reg(ir->value.u[i])));
1751	 break;
1752      case GLSL_TYPE_BOOL:
1753	 emit(MOV(*dst, src_reg(ir->value.b[i])));
1754	 break;
1755      default:
1756	 assert(!"Non-float/uint/int/bool constant");
1757	 break;
1758      }
1759
1760      remaining_writemask &= ~dst->writemask;
1761   }
1762   dst->reg_offset++;
1763}
1764
1765void
1766vec4_visitor::visit(ir_constant *ir)
1767{
1768   dst_reg dst = dst_reg(this, ir->type);
1769   this->result = src_reg(dst);
1770
1771   emit_constant_values(&dst, ir);
1772}
1773
1774void
1775vec4_visitor::visit(ir_call *ir)
1776{
1777   assert(!"not reached");
1778}
1779
1780void
1781vec4_visitor::visit(ir_texture *ir)
1782{
1783   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &vp->Base);
1784   sampler = vp->Base.SamplerUnits[sampler];
1785
1786   /* Should be lowered by do_lower_texture_projection */
1787   assert(!ir->projector);
1788
1789   vec4_instruction *inst = NULL;
1790   switch (ir->op) {
1791   case ir_tex:
1792   case ir_txl:
1793      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXL);
1794      break;
1795   case ir_txd:
1796      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXD);
1797      break;
1798   case ir_txf:
1799      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF);
1800      break;
1801   case ir_txs:
1802      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
1803      break;
1804   case ir_txb:
1805      assert(!"TXB is not valid for vertex shaders.");
1806   }
1807
1808   /* Texel offsets go in the message header; Gen4 also requires headers. */
1809   inst->header_present = ir->offset || intel->gen < 5;
1810   inst->base_mrf = 2;
1811   inst->mlen = inst->header_present + 1; /* always at least one */
1812   inst->sampler = sampler;
1813   inst->dst = dst_reg(this, ir->type);
1814   inst->shadow_compare = ir->shadow_comparitor != NULL;
1815
1816   if (ir->offset != NULL && ir->op != ir_txf)
1817      inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
1818
1819   /* MRF for the first parameter */
1820   int param_base = inst->base_mrf + inst->header_present;
1821
1822   if (ir->op == ir_txs) {
1823      ir->lod_info.lod->accept(this);
1824      int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
1825      emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, writemask),
1826	   this->result));
1827   } else {
1828      int i, coord_mask = 0, zero_mask = 0;
1829      /* Load the coordinate */
1830      /* FINISHME: gl_clamp_mask and saturate */
1831      for (i = 0; i < ir->coordinate->type->vector_elements; i++)
1832	 coord_mask |= (1 << i);
1833      for (; i < 4; i++)
1834	 zero_mask |= (1 << i);
1835
1836      ir->coordinate->accept(this);
1837      if (ir->offset && ir->op == ir_txf) {
1838	 /* It appears that the ld instruction used for txf does its
1839	  * address bounds check before adding in the offset.  To work
1840	  * around this, just add the integer offset to the integer
1841	  * texel coordinate, and don't put the offset in the header.
1842	  */
1843	 ir_constant *offset = ir->offset->as_constant();
1844	 assert(offset);
1845
1846	 for (int j = 0; j < ir->coordinate->type->vector_elements; j++) {
1847	    src_reg src = this->result;
1848	    src.swizzle = BRW_SWIZZLE4(BRW_GET_SWZ(src.swizzle, j),
1849				       BRW_GET_SWZ(src.swizzle, j),
1850				       BRW_GET_SWZ(src.swizzle, j),
1851				       BRW_GET_SWZ(src.swizzle, j));
1852	    emit(ADD(dst_reg(MRF, param_base, ir->coordinate->type, 1 << j),
1853		     src, offset->value.i[j]));
1854	 }
1855      } else {
1856	 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
1857		  this->result));
1858      }
1859      emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
1860	       src_reg(0)));
1861      /* Load the shadow comparitor */
1862      if (ir->shadow_comparitor) {
1863	 ir->shadow_comparitor->accept(this);
1864	 emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
1865			  WRITEMASK_X),
1866		  this->result));
1867	 inst->mlen++;
1868      }
1869
1870      /* Load the LOD info */
1871      if (ir->op == ir_txl) {
1872	 int mrf, writemask;
1873	 if (intel->gen >= 5) {
1874	    mrf = param_base + 1;
1875	    if (ir->shadow_comparitor) {
1876	       writemask = WRITEMASK_Y;
1877	       /* mlen already incremented */
1878	    } else {
1879	       writemask = WRITEMASK_X;
1880	       inst->mlen++;
1881	    }
1882	 } else /* intel->gen == 4 */ {
1883	    mrf = param_base;
1884	    writemask = WRITEMASK_Z;
1885	 }
1886	 ir->lod_info.lod->accept(this);
1887	 emit(MOV(dst_reg(MRF, mrf, ir->lod_info.lod->type, writemask),
1888		  this->result));
1889      } else if (ir->op == ir_txf) {
1890	 ir->lod_info.lod->accept(this);
1891	 emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, WRITEMASK_W),
1892		  this->result));
1893      } else if (ir->op == ir_txd) {
1894	 const glsl_type *type = ir->lod_info.grad.dPdx->type;
1895
1896	 ir->lod_info.grad.dPdx->accept(this);
1897	 src_reg dPdx = this->result;
1898	 ir->lod_info.grad.dPdy->accept(this);
1899	 src_reg dPdy = this->result;
1900
1901	 if (intel->gen >= 5) {
1902	    dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1903	    dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1904	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
1905	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
1906	    inst->mlen++;
1907
1908	    if (ir->type->vector_elements == 3) {
1909	       dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
1910	       dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
1911	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
1912	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
1913	       inst->mlen++;
1914	    }
1915	 } else /* intel->gen == 4 */ {
1916	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
1917	    emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
1918	    inst->mlen += 2;
1919	 }
1920      }
1921   }
1922
1923   emit(inst);
1924
1925   swizzle_result(ir, src_reg(inst->dst), sampler);
1926}
1927
1928void
1929vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
1930{
1931   this->result = orig_val;
1932
1933   int s = c->key.tex.swizzles[sampler];
1934
1935   if (ir->op == ir_txs || ir->type == glsl_type::float_type
1936			|| s == SWIZZLE_NOOP)
1937      return;
1938
1939   int zero_mask = 0, one_mask = 0, copy_mask = 0;
1940   int swizzle[4];
1941
1942   for (int i = 0; i < 4; i++) {
1943      switch (GET_SWZ(s, i)) {
1944      case SWIZZLE_ZERO:
1945	 zero_mask |= (1 << i);
1946	 break;
1947      case SWIZZLE_ONE:
1948	 one_mask |= (1 << i);
1949	 break;
1950      default:
1951	 copy_mask |= (1 << i);
1952	 swizzle[i] = GET_SWZ(s, i);
1953	 break;
1954      }
1955   }
1956
1957   this->result = src_reg(this, ir->type);
1958   dst_reg swizzled_result(this->result);
1959
1960   if (copy_mask) {
1961      orig_val.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1962      swizzled_result.writemask = copy_mask;
1963      emit(MOV(swizzled_result, orig_val));
1964   }
1965
1966   if (zero_mask) {
1967      swizzled_result.writemask = zero_mask;
1968      emit(MOV(swizzled_result, src_reg(0.0f)));
1969   }
1970
1971   if (one_mask) {
1972      swizzled_result.writemask = one_mask;
1973      emit(MOV(swizzled_result, src_reg(1.0f)));
1974   }
1975}
1976
1977void
1978vec4_visitor::visit(ir_return *ir)
1979{
1980   assert(!"not reached");
1981}
1982
1983void
1984vec4_visitor::visit(ir_discard *ir)
1985{
1986   assert(!"not reached");
1987}
1988
1989void
1990vec4_visitor::visit(ir_if *ir)
1991{
1992   /* Don't point the annotation at the if statement, because then it plus
1993    * the then and else blocks get printed.
1994    */
1995   this->base_ir = ir->condition;
1996
1997   if (intel->gen == 6) {
1998      emit_if_gen6(ir);
1999   } else {
2000      uint32_t predicate;
2001      emit_bool_to_cond_code(ir->condition, &predicate);
2002      emit(IF(predicate));
2003   }
2004
2005   visit_instructions(&ir->then_instructions);
2006
2007   if (!ir->else_instructions.is_empty()) {
2008      this->base_ir = ir->condition;
2009      emit(BRW_OPCODE_ELSE);
2010
2011      visit_instructions(&ir->else_instructions);
2012   }
2013
2014   this->base_ir = ir->condition;
2015   emit(BRW_OPCODE_ENDIF);
2016}
2017
2018void
2019vec4_visitor::emit_ndc_computation()
2020{
2021   /* Get the position */
2022   src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]);
2023
2024   /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */
2025   dst_reg ndc = dst_reg(this, glsl_type::vec4_type);
2026   output_reg[BRW_VERT_RESULT_NDC] = ndc;
2027
2028   current_annotation = "NDC";
2029   dst_reg ndc_w = ndc;
2030   ndc_w.writemask = WRITEMASK_W;
2031   src_reg pos_w = pos;
2032   pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
2033   emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w);
2034
2035   dst_reg ndc_xyz = ndc;
2036   ndc_xyz.writemask = WRITEMASK_XYZ;
2037
2038   emit(MUL(ndc_xyz, pos, src_reg(ndc_w)));
2039}
2040
2041void
2042vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
2043{
2044   if (intel->gen < 6 &&
2045       ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
2046        c->key.userclip_active || brw->has_negative_rhw_bug)) {
2047      dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
2048      dst_reg header1_w = header1;
2049      header1_w.writemask = WRITEMASK_W;
2050      GLuint i;
2051
2052      emit(MOV(header1, 0u));
2053
2054      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2055	 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]);
2056
2057	 current_annotation = "Point size";
2058	 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11))));
2059	 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8));
2060      }
2061
2062      current_annotation = "Clipping flags";
2063      for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
2064	 vec4_instruction *inst;
2065
2066	 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]),
2067                         src_reg(this->userplane[i])));
2068	 inst->conditional_mod = BRW_CONDITIONAL_L;
2069
2070	 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i));
2071	 inst->predicate = BRW_PREDICATE_NORMAL;
2072      }
2073
2074      /* i965 clipping workaround:
2075       * 1) Test for -ve rhw
2076       * 2) If set,
2077       *      set ndc = (0,0,0,0)
2078       *      set ucp[6] = 1
2079       *
2080       * Later, clipping will detect ucp[6] and ensure the primitive is
2081       * clipped against all fixed planes.
2082       */
2083      if (brw->has_negative_rhw_bug) {
2084#if 0
2085	 /* FINISHME */
2086	 brw_CMP(p,
2087		 vec8(brw_null_reg()),
2088		 BRW_CONDITIONAL_L,
2089		 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3),
2090		 brw_imm_f(0));
2091
2092	 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
2093	 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0));
2094	 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2095#endif
2096      }
2097
2098      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
2099   } else if (intel->gen < 6) {
2100      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
2101   } else {
2102      emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
2103      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2104         emit(MOV(brw_writemask(reg, WRITEMASK_W),
2105                  src_reg(output_reg[VERT_RESULT_PSIZ])));
2106      }
2107   }
2108}
2109
2110void
2111vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset)
2112{
2113   if (intel->gen < 6) {
2114      /* Clip distance slots are set aside in gen5, but they are not used.  It
2115       * is not clear whether we actually need to set aside space for them,
2116       * but the performance cost is negligible.
2117       */
2118      return;
2119   }
2120
2121   /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
2122    *
2123    *     "If a linked set of shaders forming the vertex stage contains no
2124    *     static write to gl_ClipVertex or gl_ClipDistance, but the
2125    *     application has requested clipping against user clip planes through
2126    *     the API, then the coordinate written to gl_Position is used for
2127    *     comparison against the user clip planes."
2128    *
2129    * This function is only called if the shader didn't write to
2130    * gl_ClipDistance.  Accordingly, we use gl_ClipVertex to perform clipping
2131    * if the user wrote to it; otherwise we use gl_Position.
2132    */
2133   gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX;
2134   if (!(c->prog_data.outputs_written
2135         & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) {
2136      clip_vertex = VERT_RESULT_HPOS;
2137   }
2138
2139   for (int i = 0; i + offset < c->key.nr_userclip_plane_consts && i < 4;
2140        ++i) {
2141      emit(DP4(dst_reg(brw_writemask(reg, 1 << i)),
2142               src_reg(output_reg[clip_vertex]),
2143               src_reg(this->userplane[i + offset])));
2144   }
2145}
2146
2147void
2148vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result)
2149{
2150   assert (vert_result < VERT_RESULT_MAX);
2151   reg.type = output_reg[vert_result].type;
2152   current_annotation = output_reg_annotation[vert_result];
2153   /* Copy the register, saturating if necessary */
2154   vec4_instruction *inst = emit(MOV(reg,
2155                                     src_reg(output_reg[vert_result])));
2156   if ((vert_result == VERT_RESULT_COL0 ||
2157        vert_result == VERT_RESULT_COL1 ||
2158        vert_result == VERT_RESULT_BFC0 ||
2159        vert_result == VERT_RESULT_BFC1) &&
2160       c->key.clamp_vertex_color) {
2161      inst->saturate = true;
2162   }
2163}
2164
2165void
2166vec4_visitor::emit_urb_slot(int mrf, int vert_result)
2167{
2168   struct brw_reg hw_reg = brw_message_reg(mrf);
2169   dst_reg reg = dst_reg(MRF, mrf);
2170   reg.type = BRW_REGISTER_TYPE_F;
2171
2172   switch (vert_result) {
2173   case VERT_RESULT_PSIZ:
2174      /* PSIZ is always in slot 0, and is coupled with other flags. */
2175      current_annotation = "indices, point width, clip flags";
2176      emit_psiz_and_flags(hw_reg);
2177      break;
2178   case BRW_VERT_RESULT_NDC:
2179      current_annotation = "NDC";
2180      emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC])));
2181      break;
2182   case BRW_VERT_RESULT_HPOS_DUPLICATE:
2183   case VERT_RESULT_HPOS:
2184      current_annotation = "gl_Position";
2185      emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS])));
2186      break;
2187   case VERT_RESULT_CLIP_DIST0:
2188   case VERT_RESULT_CLIP_DIST1:
2189      if (this->c->key.uses_clip_distance) {
2190         emit_generic_urb_slot(reg, vert_result);
2191      } else {
2192         current_annotation = "user clip distances";
2193         emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4);
2194      }
2195      break;
2196   case BRW_VERT_RESULT_PAD:
2197      /* No need to write to this slot */
2198      break;
2199   default:
2200      emit_generic_urb_slot(reg, vert_result);
2201      break;
2202   }
2203}
2204
2205static int
2206align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
2207{
2208   struct intel_context *intel = &brw->intel;
2209
2210   if (intel->gen >= 6) {
2211      /* URB data written (does not include the message header reg) must
2212       * be a multiple of 256 bits, or 2 VS registers.  See vol5c.5,
2213       * section 5.4.3.2.2: URB_INTERLEAVED.
2214       *
2215       * URB entries are allocated on a multiple of 1024 bits, so an
2216       * extra 128 bits written here to make the end align to 256 is
2217       * no problem.
2218       */
2219      if ((mlen % 2) != 1)
2220	 mlen++;
2221   }
2222
2223   return mlen;
2224}
2225
2226/**
2227 * Generates the VUE payload plus the 1 or 2 URB write instructions to
2228 * complete the VS thread.
2229 *
2230 * The VUE layout is documented in Volume 2a.
2231 */
2232void
2233vec4_visitor::emit_urb_writes()
2234{
2235   /* MRF 0 is reserved for the debugger, so start with message header
2236    * in MRF 1.
2237    */
2238   int base_mrf = 1;
2239   int mrf = base_mrf;
2240   /* In the process of generating our URB write message contents, we
2241    * may need to unspill a register or load from an array.  Those
2242    * reads would use MRFs 14-15.
2243    */
2244   int max_usable_mrf = 13;
2245
2246   /* The following assertion verifies that max_usable_mrf causes an
2247    * even-numbered amount of URB write data, which will meet gen6's
2248    * requirements for length alignment.
2249    */
2250   assert ((max_usable_mrf - base_mrf) % 2 == 0);
2251
2252   /* FINISHME: edgeflag */
2253
2254   /* First mrf is the g0-based message header containing URB handles and such,
2255    * which is implied in VS_OPCODE_URB_WRITE.
2256    */
2257   mrf++;
2258
2259   if (intel->gen < 6) {
2260      emit_ndc_computation();
2261   }
2262
2263   /* Set up the VUE data for the first URB write */
2264   int slot;
2265   for (slot = 0; slot < c->prog_data.vue_map.num_slots; ++slot) {
2266      emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2267
2268      /* If this was max_usable_mrf, we can't fit anything more into this URB
2269       * WRITE.
2270       */
2271      if (mrf > max_usable_mrf) {
2272	 slot++;
2273	 break;
2274      }
2275   }
2276
2277   current_annotation = "URB write";
2278   vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
2279   inst->base_mrf = base_mrf;
2280   inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2281   inst->eot = (slot >= c->prog_data.vue_map.num_slots);
2282
2283   /* Optional second URB write */
2284   if (!inst->eot) {
2285      mrf = base_mrf + 1;
2286
2287      for (; slot < c->prog_data.vue_map.num_slots; ++slot) {
2288	 assert(mrf < max_usable_mrf);
2289
2290         emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2291      }
2292
2293      current_annotation = "URB write";
2294      inst = emit(VS_OPCODE_URB_WRITE);
2295      inst->base_mrf = base_mrf;
2296      inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2297      inst->eot = true;
2298      /* URB destination offset.  In the previous write, we got MRFs
2299       * 2-13 minus the one header MRF, so 12 regs.  URB offset is in
2300       * URB row increments, and each of our MRFs is half of one of
2301       * those, since we're doing interleaved writes.
2302       */
2303      inst->offset = (max_usable_mrf - base_mrf) / 2;
2304   }
2305}
2306
2307src_reg
2308vec4_visitor::get_scratch_offset(vec4_instruction *inst,
2309				 src_reg *reladdr, int reg_offset)
2310{
2311   /* Because we store the values to scratch interleaved like our
2312    * vertex data, we need to scale the vec4 index by 2.
2313    */
2314   int message_header_scale = 2;
2315
2316   /* Pre-gen6, the message header uses byte offsets instead of vec4
2317    * (16-byte) offset units.
2318    */
2319   if (intel->gen < 6)
2320      message_header_scale *= 16;
2321
2322   if (reladdr) {
2323      src_reg index = src_reg(this, glsl_type::int_type);
2324
2325      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2326      emit_before(inst, MUL(dst_reg(index),
2327			    index, src_reg(message_header_scale)));
2328
2329      return index;
2330   } else {
2331      return src_reg(reg_offset * message_header_scale);
2332   }
2333}
2334
2335src_reg
2336vec4_visitor::get_pull_constant_offset(vec4_instruction *inst,
2337				       src_reg *reladdr, int reg_offset)
2338{
2339   if (reladdr) {
2340      src_reg index = src_reg(this, glsl_type::int_type);
2341
2342      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2343
2344      /* Pre-gen6, the message header uses byte offsets instead of vec4
2345       * (16-byte) offset units.
2346       */
2347      if (intel->gen < 6) {
2348	 emit_before(inst, MUL(dst_reg(index), index, src_reg(16)));
2349      }
2350
2351      return index;
2352   } else {
2353      int message_header_scale = intel->gen < 6 ? 16 : 1;
2354      return src_reg(reg_offset * message_header_scale);
2355   }
2356}
2357
2358/**
2359 * Emits an instruction before @inst to load the value named by @orig_src
2360 * from scratch space at @base_offset to @temp.
2361 */
2362void
2363vec4_visitor::emit_scratch_read(vec4_instruction *inst,
2364				dst_reg temp, src_reg orig_src,
2365				int base_offset)
2366{
2367   int reg_offset = base_offset + orig_src.reg_offset;
2368   src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset);
2369
2370   emit_before(inst, SCRATCH_READ(temp, index));
2371}
2372
2373/**
2374 * Emits an instruction after @inst to store the value to be written
2375 * to @orig_dst to scratch space at @base_offset, from @temp.
2376 */
2377void
2378vec4_visitor::emit_scratch_write(vec4_instruction *inst,
2379				 src_reg temp, dst_reg orig_dst,
2380				 int base_offset)
2381{
2382   int reg_offset = base_offset + orig_dst.reg_offset;
2383   src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset);
2384
2385   dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0),
2386				       orig_dst.writemask));
2387   vec4_instruction *write = SCRATCH_WRITE(dst, temp, index);
2388   write->predicate = inst->predicate;
2389   write->ir = inst->ir;
2390   write->annotation = inst->annotation;
2391   inst->insert_after(write);
2392}
2393
2394/**
2395 * We can't generally support array access in GRF space, because a
2396 * single instruction's destination can only span 2 contiguous
2397 * registers.  So, we send all GRF arrays that get variable index
2398 * access to scratch space.
2399 */
2400void
2401vec4_visitor::move_grf_array_access_to_scratch()
2402{
2403   int scratch_loc[this->virtual_grf_count];
2404
2405   for (int i = 0; i < this->virtual_grf_count; i++) {
2406      scratch_loc[i] = -1;
2407   }
2408
2409   /* First, calculate the set of virtual GRFs that need to be punted
2410    * to scratch due to having any array access on them, and where in
2411    * scratch.
2412    */
2413   foreach_list(node, &this->instructions) {
2414      vec4_instruction *inst = (vec4_instruction *)node;
2415
2416      if (inst->dst.file == GRF && inst->dst.reladdr &&
2417	  scratch_loc[inst->dst.reg] == -1) {
2418	 scratch_loc[inst->dst.reg] = c->last_scratch;
2419	 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4;
2420      }
2421
2422      for (int i = 0 ; i < 3; i++) {
2423	 src_reg *src = &inst->src[i];
2424
2425	 if (src->file == GRF && src->reladdr &&
2426	     scratch_loc[src->reg] == -1) {
2427	    scratch_loc[src->reg] = c->last_scratch;
2428	    c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4;
2429	 }
2430      }
2431   }
2432
2433   /* Now, for anything that will be accessed through scratch, rewrite
2434    * it to load/store.  Note that this is a _safe list walk, because
2435    * we may generate a new scratch_write instruction after the one
2436    * we're processing.
2437    */
2438   foreach_list_safe(node, &this->instructions) {
2439      vec4_instruction *inst = (vec4_instruction *)node;
2440
2441      /* Set up the annotation tracking for new generated instructions. */
2442      base_ir = inst->ir;
2443      current_annotation = inst->annotation;
2444
2445      if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) {
2446	 src_reg temp = src_reg(this, glsl_type::vec4_type);
2447
2448	 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]);
2449
2450	 inst->dst.file = temp.file;
2451	 inst->dst.reg = temp.reg;
2452	 inst->dst.reg_offset = temp.reg_offset;
2453	 inst->dst.reladdr = NULL;
2454      }
2455
2456      for (int i = 0 ; i < 3; i++) {
2457	 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1)
2458	    continue;
2459
2460	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2461
2462	 emit_scratch_read(inst, temp, inst->src[i],
2463			   scratch_loc[inst->src[i].reg]);
2464
2465	 inst->src[i].file = temp.file;
2466	 inst->src[i].reg = temp.reg;
2467	 inst->src[i].reg_offset = temp.reg_offset;
2468	 inst->src[i].reladdr = NULL;
2469      }
2470   }
2471}
2472
2473/**
2474 * Emits an instruction before @inst to load the value named by @orig_src
2475 * from the pull constant buffer (surface) at @base_offset to @temp.
2476 */
2477void
2478vec4_visitor::emit_pull_constant_load(vec4_instruction *inst,
2479				      dst_reg temp, src_reg orig_src,
2480				      int base_offset)
2481{
2482   int reg_offset = base_offset + orig_src.reg_offset;
2483   src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
2484   vec4_instruction *load;
2485
2486   load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD,
2487					temp, index);
2488   load->base_mrf = 14;
2489   load->mlen = 1;
2490   emit_before(inst, load);
2491}
2492
2493/**
2494 * Implements array access of uniforms by inserting a
2495 * PULL_CONSTANT_LOAD instruction.
2496 *
2497 * Unlike temporary GRF array access (where we don't support it due to
2498 * the difficulty of doing relative addressing on instruction
2499 * destinations), we could potentially do array access of uniforms
2500 * that were loaded in GRF space as push constants.  In real-world
2501 * usage we've seen, though, the arrays being used are always larger
2502 * than we could load as push constants, so just always move all
2503 * uniform array access out to a pull constant buffer.
2504 */
2505void
2506vec4_visitor::move_uniform_array_access_to_pull_constants()
2507{
2508   int pull_constant_loc[this->uniforms];
2509
2510   for (int i = 0; i < this->uniforms; i++) {
2511      pull_constant_loc[i] = -1;
2512   }
2513
2514   /* Walk through and find array access of uniforms.  Put a copy of that
2515    * uniform in the pull constant buffer.
2516    *
2517    * Note that we don't move constant-indexed accesses to arrays.  No
2518    * testing has been done of the performance impact of this choice.
2519    */
2520   foreach_list_safe(node, &this->instructions) {
2521      vec4_instruction *inst = (vec4_instruction *)node;
2522
2523      for (int i = 0 ; i < 3; i++) {
2524	 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
2525	    continue;
2526
2527	 int uniform = inst->src[i].reg;
2528
2529	 /* If this array isn't already present in the pull constant buffer,
2530	  * add it.
2531	  */
2532	 if (pull_constant_loc[uniform] == -1) {
2533	    const float **values = &prog_data->param[uniform * 4];
2534
2535	    pull_constant_loc[uniform] = prog_data->nr_pull_params / 4;
2536
2537	    for (int j = 0; j < uniform_size[uniform] * 4; j++) {
2538	       prog_data->pull_param[prog_data->nr_pull_params++] = values[j];
2539	    }
2540	 }
2541
2542	 /* Set up the annotation tracking for new generated instructions. */
2543	 base_ir = inst->ir;
2544	 current_annotation = inst->annotation;
2545
2546	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2547
2548	 emit_pull_constant_load(inst, temp, inst->src[i],
2549				 pull_constant_loc[uniform]);
2550
2551	 inst->src[i].file = temp.file;
2552	 inst->src[i].reg = temp.reg;
2553	 inst->src[i].reg_offset = temp.reg_offset;
2554	 inst->src[i].reladdr = NULL;
2555      }
2556   }
2557
2558   /* Now there are no accesses of the UNIFORM file with a reladdr, so
2559    * no need to track them as larger-than-vec4 objects.  This will be
2560    * relied on in cutting out unused uniform vectors from push
2561    * constants.
2562    */
2563   split_uniform_registers();
2564}
2565
2566void
2567vec4_visitor::resolve_ud_negate(src_reg *reg)
2568{
2569   if (reg->type != BRW_REGISTER_TYPE_UD ||
2570       !reg->negate)
2571      return;
2572
2573   src_reg temp = src_reg(this, glsl_type::uvec4_type);
2574   emit(BRW_OPCODE_MOV, dst_reg(temp), *reg);
2575   *reg = temp;
2576}
2577
2578vec4_visitor::vec4_visitor(struct brw_vs_compile *c,
2579			   struct gl_shader_program *prog,
2580			   struct brw_shader *shader)
2581{
2582   this->c = c;
2583   this->p = &c->func;
2584   this->brw = p->brw;
2585   this->intel = &brw->intel;
2586   this->ctx = &intel->ctx;
2587   this->prog = prog;
2588   this->shader = shader;
2589
2590   this->mem_ctx = ralloc_context(NULL);
2591   this->failed = false;
2592
2593   this->base_ir = NULL;
2594   this->current_annotation = NULL;
2595
2596   this->c = c;
2597   this->vp = (struct gl_vertex_program *)
2598     prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
2599   this->prog_data = &c->prog_data;
2600
2601   this->variable_ht = hash_table_ctor(0,
2602				       hash_table_pointer_hash,
2603				       hash_table_pointer_compare);
2604
2605   this->virtual_grf_def = NULL;
2606   this->virtual_grf_use = NULL;
2607   this->virtual_grf_sizes = NULL;
2608   this->virtual_grf_count = 0;
2609   this->virtual_grf_reg_map = NULL;
2610   this->virtual_grf_reg_count = 0;
2611   this->virtual_grf_array_size = 0;
2612   this->live_intervals_valid = false;
2613
2614   this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
2615
2616   this->uniforms = 0;
2617}
2618
2619vec4_visitor::~vec4_visitor()
2620{
2621   ralloc_free(this->mem_ctx);
2622   hash_table_dtor(this->variable_ht);
2623}
2624
2625
2626void
2627vec4_visitor::fail(const char *format, ...)
2628{
2629   va_list va;
2630   char *msg;
2631
2632   if (failed)
2633      return;
2634
2635   failed = true;
2636
2637   va_start(va, format);
2638   msg = ralloc_vasprintf(mem_ctx, format, va);
2639   va_end(va);
2640   msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg);
2641
2642   this->fail_msg = msg;
2643
2644   if (INTEL_DEBUG & DEBUG_VS) {
2645      fprintf(stderr, "%s",  msg);
2646   }
2647}
2648
2649} /* namespace brw */
2650