brw_vec4_visitor.cpp revision 05790746df077183d6c3caf87ca2d276a60302a8
1/*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "brw_vec4.h"
25extern "C" {
26#include "main/macros.h"
27#include "program/prog_parameter.h"
28#include "program/sampler.h"
29}
30
31namespace brw {
32
33src_reg::src_reg(dst_reg reg)
34{
35   init();
36
37   this->file = reg.file;
38   this->reg = reg.reg;
39   this->reg_offset = reg.reg_offset;
40   this->type = reg.type;
41   this->reladdr = reg.reladdr;
42   this->fixed_hw_reg = reg.fixed_hw_reg;
43
44   int swizzles[4];
45   int next_chan = 0;
46   int last = 0;
47
48   for (int i = 0; i < 4; i++) {
49      if (!(reg.writemask & (1 << i)))
50	 continue;
51
52      swizzles[next_chan++] = last = i;
53   }
54
55   for (; next_chan < 4; next_chan++) {
56      swizzles[next_chan] = last;
57   }
58
59   this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
60				swizzles[2], swizzles[3]);
61}
62
63dst_reg::dst_reg(src_reg reg)
64{
65   init();
66
67   this->file = reg.file;
68   this->reg = reg.reg;
69   this->reg_offset = reg.reg_offset;
70   this->type = reg.type;
71   this->writemask = WRITEMASK_XYZW;
72   this->reladdr = reg.reladdr;
73   this->fixed_hw_reg = reg.fixed_hw_reg;
74}
75
76vec4_instruction::vec4_instruction(vec4_visitor *v,
77				   enum opcode opcode, dst_reg dst,
78				   src_reg src0, src_reg src1, src_reg src2)
79{
80   this->opcode = opcode;
81   this->dst = dst;
82   this->src[0] = src0;
83   this->src[1] = src1;
84   this->src[2] = src2;
85   this->ir = v->base_ir;
86   this->annotation = v->current_annotation;
87}
88
89vec4_instruction *
90vec4_visitor::emit(vec4_instruction *inst)
91{
92   this->instructions.push_tail(inst);
93
94   return inst;
95}
96
97vec4_instruction *
98vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst)
99{
100   new_inst->ir = inst->ir;
101   new_inst->annotation = inst->annotation;
102
103   inst->insert_before(new_inst);
104
105   return inst;
106}
107
108vec4_instruction *
109vec4_visitor::emit(enum opcode opcode, dst_reg dst,
110		   src_reg src0, src_reg src1, src_reg src2)
111{
112   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst,
113					     src0, src1, src2));
114}
115
116
117vec4_instruction *
118vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1)
119{
120   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1));
121}
122
123vec4_instruction *
124vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0)
125{
126   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0));
127}
128
129vec4_instruction *
130vec4_visitor::emit(enum opcode opcode)
131{
132   return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg()));
133}
134
135#define ALU1(op)							\
136   vec4_instruction *							\
137   vec4_visitor::op(dst_reg dst, src_reg src0)				\
138   {									\
139      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
140					   src0);			\
141   }
142
143#define ALU2(op)							\
144   vec4_instruction *							\
145   vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1)		\
146   {									\
147      return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst,	\
148					   src0, src1);			\
149   }
150
151ALU1(NOT)
152ALU1(MOV)
153ALU1(FRC)
154ALU1(RNDD)
155ALU1(RNDE)
156ALU1(RNDZ)
157ALU2(ADD)
158ALU2(MUL)
159ALU2(MACH)
160ALU2(AND)
161ALU2(OR)
162ALU2(XOR)
163ALU2(DP3)
164ALU2(DP4)
165
166/** Gen4 predicated IF. */
167vec4_instruction *
168vec4_visitor::IF(uint32_t predicate)
169{
170   vec4_instruction *inst;
171
172   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF);
173   inst->predicate = predicate;
174
175   return inst;
176}
177
178/** Gen6+ IF with embedded comparison. */
179vec4_instruction *
180vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition)
181{
182   assert(intel->gen >= 6);
183
184   vec4_instruction *inst;
185
186   resolve_ud_negate(&src0);
187   resolve_ud_negate(&src1);
188
189   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(),
190					src0, src1);
191   inst->conditional_mod = condition;
192
193   return inst;
194}
195
196/**
197 * CMP: Sets the low bit of the destination channels with the result
198 * of the comparison, while the upper bits are undefined, and updates
199 * the flag register with the packed 16 bits of the result.
200 */
201vec4_instruction *
202vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition)
203{
204   vec4_instruction *inst;
205
206   /* original gen4 does type conversion to the destination type
207    * before before comparison, producing garbage results for floating
208    * point comparisons.
209    */
210   if (intel->gen == 4) {
211      dst.type = src0.type;
212      if (dst.file == HW_REG)
213	 dst.fixed_hw_reg.type = dst.type;
214   }
215
216   resolve_ud_negate(&src0);
217   resolve_ud_negate(&src1);
218
219   inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1);
220   inst->conditional_mod = condition;
221
222   return inst;
223}
224
225vec4_instruction *
226vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index)
227{
228   vec4_instruction *inst;
229
230   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ,
231					dst, index);
232   inst->base_mrf = 14;
233   inst->mlen = 1;
234
235   return inst;
236}
237
238vec4_instruction *
239vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index)
240{
241   vec4_instruction *inst;
242
243   inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE,
244					dst, src, index);
245   inst->base_mrf = 13;
246   inst->mlen = 2;
247
248   return inst;
249}
250
251void
252vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements)
253{
254   static enum opcode dot_opcodes[] = {
255      BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4
256   };
257
258   emit(dot_opcodes[elements - 2], dst, src0, src1);
259}
260
261void
262vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src)
263{
264   /* The gen6 math instruction ignores the source modifiers --
265    * swizzle, abs, negate, and at least some parts of the register
266    * region description.
267    *
268    * While it would seem that this MOV could be avoided at this point
269    * in the case that the swizzle is matched up with the destination
270    * writemask, note that uniform packing and register allocation
271    * could rearrange our swizzle, so let's leave this matter up to
272    * copy propagation later.
273    */
274   src_reg temp_src = src_reg(this, glsl_type::vec4_type);
275   emit(MOV(dst_reg(temp_src), src));
276
277   if (dst.writemask != WRITEMASK_XYZW) {
278      /* The gen6 math instruction must be align1, so we can't do
279       * writemasks.
280       */
281      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
282
283      emit(opcode, temp_dst, temp_src);
284
285      emit(MOV(dst, src_reg(temp_dst)));
286   } else {
287      emit(opcode, dst, temp_src);
288   }
289}
290
291void
292vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src)
293{
294   vec4_instruction *inst = emit(opcode, dst, src);
295   inst->base_mrf = 1;
296   inst->mlen = 1;
297}
298
299void
300vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src)
301{
302   switch (opcode) {
303   case SHADER_OPCODE_RCP:
304   case SHADER_OPCODE_RSQ:
305   case SHADER_OPCODE_SQRT:
306   case SHADER_OPCODE_EXP2:
307   case SHADER_OPCODE_LOG2:
308   case SHADER_OPCODE_SIN:
309   case SHADER_OPCODE_COS:
310      break;
311   default:
312      assert(!"not reached: bad math opcode");
313      return;
314   }
315
316   if (intel->gen >= 7) {
317      emit(opcode, dst, src);
318   } else if (intel->gen == 6) {
319      return emit_math1_gen6(opcode, dst, src);
320   } else {
321      return emit_math1_gen4(opcode, dst, src);
322   }
323}
324
325void
326vec4_visitor::emit_math2_gen6(enum opcode opcode,
327			      dst_reg dst, src_reg src0, src_reg src1)
328{
329   src_reg expanded;
330
331   /* The gen6 math instruction ignores the source modifiers --
332    * swizzle, abs, negate, and at least some parts of the register
333    * region description.  Move the sources to temporaries to make it
334    * generally work.
335    */
336
337   expanded = src_reg(this, glsl_type::vec4_type);
338   expanded.type = src0.type;
339   emit(MOV(dst_reg(expanded), src0));
340   src0 = expanded;
341
342   expanded = src_reg(this, glsl_type::vec4_type);
343   expanded.type = src1.type;
344   emit(MOV(dst_reg(expanded), src1));
345   src1 = expanded;
346
347   if (dst.writemask != WRITEMASK_XYZW) {
348      /* The gen6 math instruction must be align1, so we can't do
349       * writemasks.
350       */
351      dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type);
352      temp_dst.type = dst.type;
353
354      emit(opcode, temp_dst, src0, src1);
355
356      emit(MOV(dst, src_reg(temp_dst)));
357   } else {
358      emit(opcode, dst, src0, src1);
359   }
360}
361
362void
363vec4_visitor::emit_math2_gen4(enum opcode opcode,
364			      dst_reg dst, src_reg src0, src_reg src1)
365{
366   vec4_instruction *inst = emit(opcode, dst, src0, src1);
367   inst->base_mrf = 1;
368   inst->mlen = 2;
369}
370
371void
372vec4_visitor::emit_math(enum opcode opcode,
373			dst_reg dst, src_reg src0, src_reg src1)
374{
375   switch (opcode) {
376   case SHADER_OPCODE_POW:
377   case SHADER_OPCODE_INT_QUOTIENT:
378   case SHADER_OPCODE_INT_REMAINDER:
379      break;
380   default:
381      assert(!"not reached: unsupported binary math opcode");
382      return;
383   }
384
385   if (intel->gen >= 7) {
386      emit(opcode, dst, src0, src1);
387   } else if (intel->gen == 6) {
388      return emit_math2_gen6(opcode, dst, src0, src1);
389   } else {
390      return emit_math2_gen4(opcode, dst, src0, src1);
391   }
392}
393
394void
395vec4_visitor::visit_instructions(const exec_list *list)
396{
397   foreach_list(node, list) {
398      ir_instruction *ir = (ir_instruction *)node;
399
400      base_ir = ir;
401      ir->accept(this);
402   }
403}
404
405
406static int
407type_size(const struct glsl_type *type)
408{
409   unsigned int i;
410   int size;
411
412   switch (type->base_type) {
413   case GLSL_TYPE_UINT:
414   case GLSL_TYPE_INT:
415   case GLSL_TYPE_FLOAT:
416   case GLSL_TYPE_BOOL:
417      if (type->is_matrix()) {
418	 return type->matrix_columns;
419      } else {
420	 /* Regardless of size of vector, it gets a vec4. This is bad
421	  * packing for things like floats, but otherwise arrays become a
422	  * mess.  Hopefully a later pass over the code can pack scalars
423	  * down if appropriate.
424	  */
425	 return 1;
426      }
427   case GLSL_TYPE_ARRAY:
428      assert(type->length > 0);
429      return type_size(type->fields.array) * type->length;
430   case GLSL_TYPE_STRUCT:
431      size = 0;
432      for (i = 0; i < type->length; i++) {
433	 size += type_size(type->fields.structure[i].type);
434      }
435      return size;
436   case GLSL_TYPE_SAMPLER:
437      /* Samplers take up one slot in UNIFORMS[], but they're baked in
438       * at link time.
439       */
440      return 1;
441   default:
442      assert(0);
443      return 0;
444   }
445}
446
447int
448vec4_visitor::virtual_grf_alloc(int size)
449{
450   if (virtual_grf_array_size <= virtual_grf_count) {
451      if (virtual_grf_array_size == 0)
452	 virtual_grf_array_size = 16;
453      else
454	 virtual_grf_array_size *= 2;
455      virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
456				   virtual_grf_array_size);
457      virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int,
458				     virtual_grf_array_size);
459   }
460   virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count;
461   virtual_grf_reg_count += size;
462   virtual_grf_sizes[virtual_grf_count] = size;
463   return virtual_grf_count++;
464}
465
466src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type)
467{
468   init();
469
470   this->file = GRF;
471   this->reg = v->virtual_grf_alloc(type_size(type));
472
473   if (type->is_array() || type->is_record()) {
474      this->swizzle = BRW_SWIZZLE_NOOP;
475   } else {
476      this->swizzle = swizzle_for_size(type->vector_elements);
477   }
478
479   this->type = brw_type_for_base_type(type);
480}
481
482dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type)
483{
484   init();
485
486   this->file = GRF;
487   this->reg = v->virtual_grf_alloc(type_size(type));
488
489   if (type->is_array() || type->is_record()) {
490      this->writemask = WRITEMASK_XYZW;
491   } else {
492      this->writemask = (1 << type->vector_elements) - 1;
493   }
494
495   this->type = brw_type_for_base_type(type);
496}
497
498/* Our support for uniforms is piggy-backed on the struct
499 * gl_fragment_program, because that's where the values actually
500 * get stored, rather than in some global gl_shader_program uniform
501 * store.
502 */
503int
504vec4_visitor::setup_uniform_values(int loc, const glsl_type *type)
505{
506   unsigned int offset = 0;
507   float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f;
508
509   if (type->is_matrix()) {
510      const glsl_type *column = type->column_type();
511
512      for (unsigned int i = 0; i < type->matrix_columns; i++) {
513	 offset += setup_uniform_values(loc + offset, column);
514      }
515
516      return offset;
517   }
518
519   switch (type->base_type) {
520   case GLSL_TYPE_FLOAT:
521   case GLSL_TYPE_UINT:
522   case GLSL_TYPE_INT:
523   case GLSL_TYPE_BOOL:
524      for (unsigned int i = 0; i < type->vector_elements; i++) {
525	 c->prog_data.param[this->uniforms * 4 + i] = &values[i];
526      }
527
528      /* Set up pad elements to get things aligned to a vec4 boundary. */
529      for (unsigned int i = type->vector_elements; i < 4; i++) {
530	 static float zero = 0;
531
532	 c->prog_data.param[this->uniforms * 4 + i] = &zero;
533      }
534
535      /* Track the size of this uniform vector, for future packing of
536       * uniforms.
537       */
538      this->uniform_vector_size[this->uniforms] = type->vector_elements;
539      this->uniforms++;
540
541      return 1;
542
543   case GLSL_TYPE_STRUCT:
544      for (unsigned int i = 0; i < type->length; i++) {
545	 offset += setup_uniform_values(loc + offset,
546					type->fields.structure[i].type);
547      }
548      return offset;
549
550   case GLSL_TYPE_ARRAY:
551      for (unsigned int i = 0; i < type->length; i++) {
552	 offset += setup_uniform_values(loc + offset, type->fields.array);
553      }
554      return offset;
555
556   case GLSL_TYPE_SAMPLER:
557      /* The sampler takes up a slot, but we don't use any values from it. */
558      return 1;
559
560   default:
561      assert(!"not reached");
562      return 0;
563   }
564}
565
566void
567vec4_visitor::setup_uniform_clipplane_values()
568{
569   gl_clip_plane *clip_planes = brw_select_clip_planes(ctx);
570
571   /* Pre-Gen6, we compact clip planes.  For example, if the user
572    * enables just clip planes 0, 1, and 3, we will enable clip planes
573    * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip
574    * plane 2.  This simplifies the implementation of the Gen6 clip
575    * thread.
576    *
577    * In Gen6 and later, we don't compact clip planes, because this
578    * simplifies the implementation of gl_ClipDistance.
579    */
580   int compacted_clipplane_index = 0;
581   for (int i = 0; i < c->key.nr_userclip_plane_consts; ++i) {
582      if (intel->gen < 6 &&
583          !(c->key.userclip_planes_enabled_gen_4_5 & (1 << i))) {
584         continue;
585      }
586      this->uniform_vector_size[this->uniforms] = 4;
587      this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms);
588      this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F;
589      for (int j = 0; j < 4; ++j) {
590         c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j];
591      }
592      ++compacted_clipplane_index;
593      ++this->uniforms;
594   }
595}
596
597/* Our support for builtin uniforms is even scarier than non-builtin.
598 * It sits on top of the PROG_STATE_VAR parameters that are
599 * automatically updated from GL context state.
600 */
601void
602vec4_visitor::setup_builtin_uniform_values(ir_variable *ir)
603{
604   const ir_state_slot *const slots = ir->state_slots;
605   assert(ir->state_slots != NULL);
606
607   for (unsigned int i = 0; i < ir->num_state_slots; i++) {
608      /* This state reference has already been setup by ir_to_mesa,
609       * but we'll get the same index back here.  We can reference
610       * ParameterValues directly, since unlike brw_fs.cpp, we never
611       * add new state references during compile.
612       */
613      int index = _mesa_add_state_reference(this->vp->Base.Parameters,
614					    (gl_state_index *)slots[i].tokens);
615      float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f;
616
617      this->uniform_vector_size[this->uniforms] = 0;
618      /* Add each of the unique swizzled channels of the element.
619       * This will end up matching the size of the glsl_type of this field.
620       */
621      int last_swiz = -1;
622      for (unsigned int j = 0; j < 4; j++) {
623	 int swiz = GET_SWZ(slots[i].swizzle, j);
624	 last_swiz = swiz;
625
626	 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz];
627	 if (swiz <= last_swiz)
628	    this->uniform_vector_size[this->uniforms]++;
629      }
630      this->uniforms++;
631   }
632}
633
634dst_reg *
635vec4_visitor::variable_storage(ir_variable *var)
636{
637   return (dst_reg *)hash_table_find(this->variable_ht, var);
638}
639
640void
641vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate)
642{
643   ir_expression *expr = ir->as_expression();
644
645   *predicate = BRW_PREDICATE_NORMAL;
646
647   if (expr) {
648      src_reg op[2];
649      vec4_instruction *inst;
650
651      assert(expr->get_num_operands() <= 2);
652      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
653	 expr->operands[i]->accept(this);
654	 op[i] = this->result;
655
656	 resolve_ud_negate(&op[i]);
657      }
658
659      switch (expr->operation) {
660      case ir_unop_logic_not:
661	 inst = emit(AND(dst_null_d(), op[0], src_reg(1)));
662	 inst->conditional_mod = BRW_CONDITIONAL_Z;
663	 break;
664
665      case ir_binop_logic_xor:
666	 inst = emit(XOR(dst_null_d(), op[0], op[1]));
667	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
668	 break;
669
670      case ir_binop_logic_or:
671	 inst = emit(OR(dst_null_d(), op[0], op[1]));
672	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
673	 break;
674
675      case ir_binop_logic_and:
676	 inst = emit(AND(dst_null_d(), op[0], op[1]));
677	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
678	 break;
679
680      case ir_unop_f2b:
681	 if (intel->gen >= 6) {
682	    emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
683	 } else {
684	    inst = emit(MOV(dst_null_f(), op[0]));
685	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
686	 }
687	 break;
688
689      case ir_unop_i2b:
690	 if (intel->gen >= 6) {
691	    emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
692	 } else {
693	    inst = emit(MOV(dst_null_d(), op[0]));
694	    inst->conditional_mod = BRW_CONDITIONAL_NZ;
695	 }
696	 break;
697
698      case ir_binop_all_equal:
699	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
700	 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
701	 break;
702
703      case ir_binop_any_nequal:
704	 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
705	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
706	 break;
707
708      case ir_unop_any:
709	 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
710	 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
711	 break;
712
713      case ir_binop_greater:
714      case ir_binop_gequal:
715      case ir_binop_less:
716      case ir_binop_lequal:
717      case ir_binop_equal:
718      case ir_binop_nequal:
719	 emit(CMP(dst_null_d(), op[0], op[1],
720		  brw_conditional_for_comparison(expr->operation)));
721	 break;
722
723      default:
724	 assert(!"not reached");
725	 break;
726      }
727      return;
728   }
729
730   ir->accept(this);
731
732   resolve_ud_negate(&this->result);
733
734   if (intel->gen >= 6) {
735      vec4_instruction *inst = emit(AND(dst_null_d(),
736					this->result, src_reg(1)));
737      inst->conditional_mod = BRW_CONDITIONAL_NZ;
738   } else {
739      vec4_instruction *inst = emit(MOV(dst_null_d(), this->result));
740      inst->conditional_mod = BRW_CONDITIONAL_NZ;
741   }
742}
743
744/**
745 * Emit a gen6 IF statement with the comparison folded into the IF
746 * instruction.
747 */
748void
749vec4_visitor::emit_if_gen6(ir_if *ir)
750{
751   ir_expression *expr = ir->condition->as_expression();
752
753   if (expr) {
754      src_reg op[2];
755      dst_reg temp;
756
757      assert(expr->get_num_operands() <= 2);
758      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
759	 expr->operands[i]->accept(this);
760	 op[i] = this->result;
761      }
762
763      switch (expr->operation) {
764      case ir_unop_logic_not:
765	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z));
766	 return;
767
768      case ir_binop_logic_xor:
769	 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ));
770	 return;
771
772      case ir_binop_logic_or:
773	 temp = dst_reg(this, glsl_type::bool_type);
774	 emit(OR(temp, op[0], op[1]));
775	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
776	 return;
777
778      case ir_binop_logic_and:
779	 temp = dst_reg(this, glsl_type::bool_type);
780	 emit(AND(temp, op[0], op[1]));
781	 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ));
782	 return;
783
784      case ir_unop_f2b:
785	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
786	 return;
787
788      case ir_unop_i2b:
789	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
790	 return;
791
792      case ir_binop_greater:
793      case ir_binop_gequal:
794      case ir_binop_less:
795      case ir_binop_lequal:
796      case ir_binop_equal:
797      case ir_binop_nequal:
798	 emit(IF(op[0], op[1],
799		 brw_conditional_for_comparison(expr->operation)));
800	 return;
801
802      case ir_binop_all_equal:
803	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
804	 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H));
805	 return;
806
807      case ir_binop_any_nequal:
808	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
809	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
810	 return;
811
812      case ir_unop_any:
813	 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
814	 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H));
815	 return;
816
817      default:
818	 assert(!"not reached");
819	 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ));
820	 return;
821      }
822      return;
823   }
824
825   ir->condition->accept(this);
826
827   emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ));
828}
829
830void
831vec4_visitor::visit(ir_variable *ir)
832{
833   dst_reg *reg = NULL;
834
835   if (variable_storage(ir))
836      return;
837
838   switch (ir->mode) {
839   case ir_var_in:
840      reg = new(mem_ctx) dst_reg(ATTR, ir->location);
841
842      /* Do GL_FIXED rescaling for GLES2.0.  Our GL_FIXED attributes
843       * come in as floating point conversions of the integer values.
844       */
845      for (int i = ir->location; i < ir->location + type_size(ir->type); i++) {
846	 if (!c->key.gl_fixed_input_size[i])
847	    continue;
848
849	 dst_reg dst = *reg;
850         dst.type = brw_type_for_base_type(ir->type);
851	 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1;
852	 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f)));
853      }
854      break;
855
856   case ir_var_out:
857      reg = new(mem_ctx) dst_reg(this, ir->type);
858
859      for (int i = 0; i < type_size(ir->type); i++) {
860	 output_reg[ir->location + i] = *reg;
861	 output_reg[ir->location + i].reg_offset = i;
862	 output_reg[ir->location + i].type =
863            brw_type_for_base_type(ir->type->get_scalar_type());
864	 output_reg_annotation[ir->location + i] = ir->name;
865      }
866      break;
867
868   case ir_var_auto:
869   case ir_var_temporary:
870      reg = new(mem_ctx) dst_reg(this, ir->type);
871      break;
872
873   case ir_var_uniform:
874      reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms);
875
876      /* Track how big the whole uniform variable is, in case we need to put a
877       * copy of its data into pull constants for array access.
878       */
879      this->uniform_size[this->uniforms] = type_size(ir->type);
880
881      if (!strncmp(ir->name, "gl_", 3)) {
882	 setup_builtin_uniform_values(ir);
883      } else {
884	 setup_uniform_values(ir->location, ir->type);
885      }
886      break;
887
888   case ir_var_system_value:
889      /* VertexID is stored by the VF as the last vertex element, but
890       * we don't represent it with a flag in inputs_read, so we call
891       * it VERT_ATTRIB_MAX, which setup_attributes() picks up on.
892       */
893      reg = new(mem_ctx) dst_reg(ATTR, VERT_ATTRIB_MAX);
894      prog_data->uses_vertexid = true;
895
896      switch (ir->location) {
897      case SYSTEM_VALUE_VERTEX_ID:
898	 reg->writemask = WRITEMASK_X;
899	 break;
900      case SYSTEM_VALUE_INSTANCE_ID:
901	 reg->writemask = WRITEMASK_Y;
902	 break;
903      default:
904	 assert(!"not reached");
905	 break;
906      }
907      break;
908
909   default:
910      assert(!"not reached");
911   }
912
913   reg->type = brw_type_for_base_type(ir->type);
914   hash_table_insert(this->variable_ht, reg, ir);
915}
916
917void
918vec4_visitor::visit(ir_loop *ir)
919{
920   dst_reg counter;
921
922   /* We don't want debugging output to print the whole body of the
923    * loop as the annotation.
924    */
925   this->base_ir = NULL;
926
927   if (ir->counter != NULL) {
928      this->base_ir = ir->counter;
929      ir->counter->accept(this);
930      counter = *(variable_storage(ir->counter));
931
932      if (ir->from != NULL) {
933	 this->base_ir = ir->from;
934	 ir->from->accept(this);
935
936	 emit(MOV(counter, this->result));
937      }
938   }
939
940   emit(BRW_OPCODE_DO);
941
942   if (ir->to) {
943      this->base_ir = ir->to;
944      ir->to->accept(this);
945
946      emit(CMP(dst_null_d(), src_reg(counter), this->result,
947	       brw_conditional_for_comparison(ir->cmp)));
948
949      vec4_instruction *inst = emit(BRW_OPCODE_BREAK);
950      inst->predicate = BRW_PREDICATE_NORMAL;
951   }
952
953   visit_instructions(&ir->body_instructions);
954
955
956   if (ir->increment) {
957      this->base_ir = ir->increment;
958      ir->increment->accept(this);
959      emit(ADD(counter, src_reg(counter), this->result));
960   }
961
962   emit(BRW_OPCODE_WHILE);
963}
964
965void
966vec4_visitor::visit(ir_loop_jump *ir)
967{
968   switch (ir->mode) {
969   case ir_loop_jump::jump_break:
970      emit(BRW_OPCODE_BREAK);
971      break;
972   case ir_loop_jump::jump_continue:
973      emit(BRW_OPCODE_CONTINUE);
974      break;
975   }
976}
977
978
979void
980vec4_visitor::visit(ir_function_signature *ir)
981{
982   assert(0);
983   (void)ir;
984}
985
986void
987vec4_visitor::visit(ir_function *ir)
988{
989   /* Ignore function bodies other than main() -- we shouldn't see calls to
990    * them since they should all be inlined.
991    */
992   if (strcmp(ir->name, "main") == 0) {
993      const ir_function_signature *sig;
994      exec_list empty;
995
996      sig = ir->matching_signature(&empty);
997
998      assert(sig);
999
1000      visit_instructions(&sig->body);
1001   }
1002}
1003
1004bool
1005vec4_visitor::try_emit_sat(ir_expression *ir)
1006{
1007   ir_rvalue *sat_src = ir->as_rvalue_to_saturate();
1008   if (!sat_src)
1009      return false;
1010
1011   sat_src->accept(this);
1012   src_reg src = this->result;
1013
1014   this->result = src_reg(this, ir->type);
1015   vec4_instruction *inst;
1016   inst = emit(MOV(dst_reg(this->result), src));
1017   inst->saturate = true;
1018
1019   return true;
1020}
1021
1022void
1023vec4_visitor::emit_bool_comparison(unsigned int op,
1024				 dst_reg dst, src_reg src0, src_reg src1)
1025{
1026   /* original gen4 does destination conversion before comparison. */
1027   if (intel->gen < 5)
1028      dst.type = src0.type;
1029
1030   emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op)));
1031
1032   dst.type = BRW_REGISTER_TYPE_D;
1033   emit(AND(dst, src_reg(dst), src_reg(0x1)));
1034}
1035
1036void
1037vec4_visitor::visit(ir_expression *ir)
1038{
1039   unsigned int operand;
1040   src_reg op[Elements(ir->operands)];
1041   src_reg result_src;
1042   dst_reg result_dst;
1043   vec4_instruction *inst;
1044
1045   if (try_emit_sat(ir))
1046      return;
1047
1048   for (operand = 0; operand < ir->get_num_operands(); operand++) {
1049      this->result.file = BAD_FILE;
1050      ir->operands[operand]->accept(this);
1051      if (this->result.file == BAD_FILE) {
1052	 printf("Failed to get tree for expression operand:\n");
1053	 ir->operands[operand]->print();
1054	 exit(1);
1055      }
1056      op[operand] = this->result;
1057
1058      /* Matrix expression operands should have been broken down to vector
1059       * operations already.
1060       */
1061      assert(!ir->operands[operand]->type->is_matrix());
1062   }
1063
1064   int vector_elements = ir->operands[0]->type->vector_elements;
1065   if (ir->operands[1]) {
1066      vector_elements = MAX2(vector_elements,
1067			     ir->operands[1]->type->vector_elements);
1068   }
1069
1070   this->result.file = BAD_FILE;
1071
1072   /* Storage for our result.  Ideally for an assignment we'd be using
1073    * the actual storage for the result here, instead.
1074    */
1075   result_src = src_reg(this, ir->type);
1076   /* convenience for the emit functions below. */
1077   result_dst = dst_reg(result_src);
1078   /* If nothing special happens, this is the result. */
1079   this->result = result_src;
1080   /* Limit writes to the channels that will be used by result_src later.
1081    * This does limit this temp's use as a temporary for multi-instruction
1082    * sequences.
1083    */
1084   result_dst.writemask = (1 << ir->type->vector_elements) - 1;
1085
1086   switch (ir->operation) {
1087   case ir_unop_logic_not:
1088      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
1089       * ones complement of the whole register, not just bit 0.
1090       */
1091      emit(XOR(result_dst, op[0], src_reg(1)));
1092      break;
1093   case ir_unop_neg:
1094      op[0].negate = !op[0].negate;
1095      this->result = op[0];
1096      break;
1097   case ir_unop_abs:
1098      op[0].abs = true;
1099      op[0].negate = false;
1100      this->result = op[0];
1101      break;
1102
1103   case ir_unop_sign:
1104      emit(MOV(result_dst, src_reg(0.0f)));
1105
1106      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G));
1107      inst = emit(MOV(result_dst, src_reg(1.0f)));
1108      inst->predicate = BRW_PREDICATE_NORMAL;
1109
1110      emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L));
1111      inst = emit(MOV(result_dst, src_reg(-1.0f)));
1112      inst->predicate = BRW_PREDICATE_NORMAL;
1113
1114      break;
1115
1116   case ir_unop_rcp:
1117      emit_math(SHADER_OPCODE_RCP, result_dst, op[0]);
1118      break;
1119
1120   case ir_unop_exp2:
1121      emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]);
1122      break;
1123   case ir_unop_log2:
1124      emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]);
1125      break;
1126   case ir_unop_exp:
1127   case ir_unop_log:
1128      assert(!"not reached: should be handled by ir_explog_to_explog2");
1129      break;
1130   case ir_unop_sin:
1131   case ir_unop_sin_reduced:
1132      emit_math(SHADER_OPCODE_SIN, result_dst, op[0]);
1133      break;
1134   case ir_unop_cos:
1135   case ir_unop_cos_reduced:
1136      emit_math(SHADER_OPCODE_COS, result_dst, op[0]);
1137      break;
1138
1139   case ir_unop_dFdx:
1140   case ir_unop_dFdy:
1141      assert(!"derivatives not valid in vertex shader");
1142      break;
1143
1144   case ir_unop_noise:
1145      assert(!"not reached: should be handled by lower_noise");
1146      break;
1147
1148   case ir_binop_add:
1149      emit(ADD(result_dst, op[0], op[1]));
1150      break;
1151   case ir_binop_sub:
1152      assert(!"not reached: should be handled by ir_sub_to_add_neg");
1153      break;
1154
1155   case ir_binop_mul:
1156      if (ir->type->is_integer()) {
1157	 /* For integer multiplication, the MUL uses the low 16 bits
1158	  * of one of the operands (src0 on gen6, src1 on gen7).  The
1159	  * MACH accumulates in the contribution of the upper 16 bits
1160	  * of that operand.
1161	  *
1162	  * FINISHME: Emit just the MUL if we know an operand is small
1163	  * enough.
1164	  */
1165	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
1166
1167	 emit(MUL(acc, op[0], op[1]));
1168	 emit(MACH(dst_null_d(), op[0], op[1]));
1169	 emit(MOV(result_dst, src_reg(acc)));
1170      } else {
1171	 emit(MUL(result_dst, op[0], op[1]));
1172      }
1173      break;
1174   case ir_binop_div:
1175      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
1176      assert(ir->type->is_integer());
1177      emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]);
1178      break;
1179   case ir_binop_mod:
1180      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
1181      assert(ir->type->is_integer());
1182      emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]);
1183      break;
1184
1185   case ir_binop_less:
1186   case ir_binop_greater:
1187   case ir_binop_lequal:
1188   case ir_binop_gequal:
1189   case ir_binop_equal:
1190   case ir_binop_nequal: {
1191      emit(CMP(result_dst, op[0], op[1],
1192	       brw_conditional_for_comparison(ir->operation)));
1193      emit(AND(result_dst, result_src, src_reg(0x1)));
1194      break;
1195   }
1196
1197   case ir_binop_all_equal:
1198      /* "==" operator producing a scalar boolean. */
1199      if (ir->operands[0]->type->is_vector() ||
1200	  ir->operands[1]->type->is_vector()) {
1201	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z));
1202	 emit(MOV(result_dst, src_reg(0)));
1203	 inst = emit(MOV(result_dst, src_reg(1)));
1204	 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1205      } else {
1206	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z));
1207	 emit(AND(result_dst, result_src, src_reg(0x1)));
1208      }
1209      break;
1210   case ir_binop_any_nequal:
1211      /* "!=" operator producing a scalar boolean. */
1212      if (ir->operands[0]->type->is_vector() ||
1213	  ir->operands[1]->type->is_vector()) {
1214	 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ));
1215
1216	 emit(MOV(result_dst, src_reg(0)));
1217	 inst = emit(MOV(result_dst, src_reg(1)));
1218	 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1219      } else {
1220	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ));
1221	 emit(AND(result_dst, result_src, src_reg(0x1)));
1222      }
1223      break;
1224
1225   case ir_unop_any:
1226      emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ));
1227      emit(MOV(result_dst, src_reg(0)));
1228
1229      inst = emit(MOV(result_dst, src_reg(1)));
1230      inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1231      break;
1232
1233   case ir_binop_logic_xor:
1234      emit(XOR(result_dst, op[0], op[1]));
1235      break;
1236
1237   case ir_binop_logic_or:
1238      emit(OR(result_dst, op[0], op[1]));
1239      break;
1240
1241   case ir_binop_logic_and:
1242      emit(AND(result_dst, op[0], op[1]));
1243      break;
1244
1245   case ir_binop_dot:
1246      assert(ir->operands[0]->type->is_vector());
1247      assert(ir->operands[0]->type == ir->operands[1]->type);
1248      emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements);
1249      break;
1250
1251   case ir_unop_sqrt:
1252      emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]);
1253      break;
1254   case ir_unop_rsq:
1255      emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]);
1256      break;
1257
1258   case ir_unop_bitcast_i2f:
1259   case ir_unop_bitcast_u2f:
1260      this->result = op[0];
1261      this->result.type = BRW_REGISTER_TYPE_F;
1262      break;
1263
1264   case ir_unop_bitcast_f2i:
1265      this->result = op[0];
1266      this->result.type = BRW_REGISTER_TYPE_D;
1267      break;
1268
1269   case ir_unop_bitcast_f2u:
1270      this->result = op[0];
1271      this->result.type = BRW_REGISTER_TYPE_UD;
1272      break;
1273
1274   case ir_unop_i2f:
1275   case ir_unop_i2u:
1276   case ir_unop_u2i:
1277   case ir_unop_u2f:
1278   case ir_unop_b2f:
1279   case ir_unop_b2i:
1280   case ir_unop_f2i:
1281      emit(MOV(result_dst, op[0]));
1282      break;
1283   case ir_unop_f2b:
1284   case ir_unop_i2b: {
1285      emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ));
1286      emit(AND(result_dst, result_src, src_reg(1)));
1287      break;
1288   }
1289
1290   case ir_unop_trunc:
1291      emit(RNDZ(result_dst, op[0]));
1292      break;
1293   case ir_unop_ceil:
1294      op[0].negate = !op[0].negate;
1295      inst = emit(RNDD(result_dst, op[0]));
1296      this->result.negate = true;
1297      break;
1298   case ir_unop_floor:
1299      inst = emit(RNDD(result_dst, op[0]));
1300      break;
1301   case ir_unop_fract:
1302      inst = emit(FRC(result_dst, op[0]));
1303      break;
1304   case ir_unop_round_even:
1305      emit(RNDE(result_dst, op[0]));
1306      break;
1307
1308   case ir_binop_min:
1309      if (intel->gen >= 6) {
1310	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1311	 inst->conditional_mod = BRW_CONDITIONAL_L;
1312      } else {
1313	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L));
1314
1315	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1316	 inst->predicate = BRW_PREDICATE_NORMAL;
1317      }
1318      break;
1319   case ir_binop_max:
1320      if (intel->gen >= 6) {
1321	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1322	 inst->conditional_mod = BRW_CONDITIONAL_G;
1323      } else {
1324	 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G));
1325
1326	 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]);
1327	 inst->predicate = BRW_PREDICATE_NORMAL;
1328      }
1329      break;
1330
1331   case ir_binop_pow:
1332      emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]);
1333      break;
1334
1335   case ir_unop_bit_not:
1336      inst = emit(NOT(result_dst, op[0]));
1337      break;
1338   case ir_binop_bit_and:
1339      inst = emit(AND(result_dst, op[0], op[1]));
1340      break;
1341   case ir_binop_bit_xor:
1342      inst = emit(XOR(result_dst, op[0], op[1]));
1343      break;
1344   case ir_binop_bit_or:
1345      inst = emit(OR(result_dst, op[0], op[1]));
1346      break;
1347
1348   case ir_binop_lshift:
1349      inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]);
1350      break;
1351
1352   case ir_binop_rshift:
1353      if (ir->type->base_type == GLSL_TYPE_INT)
1354	 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]);
1355      else
1356	 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]);
1357      break;
1358
1359   case ir_quadop_vector:
1360      assert(!"not reached: should be handled by lower_quadop_vector");
1361      break;
1362   }
1363}
1364
1365
1366void
1367vec4_visitor::visit(ir_swizzle *ir)
1368{
1369   src_reg src;
1370   int i = 0;
1371   int swizzle[4];
1372
1373   /* Note that this is only swizzles in expressions, not those on the left
1374    * hand side of an assignment, which do write masking.  See ir_assignment
1375    * for that.
1376    */
1377
1378   ir->val->accept(this);
1379   src = this->result;
1380   assert(src.file != BAD_FILE);
1381
1382   for (i = 0; i < ir->type->vector_elements; i++) {
1383      switch (i) {
1384      case 0:
1385	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x);
1386	 break;
1387      case 1:
1388	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y);
1389	 break;
1390      case 2:
1391	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z);
1392	 break;
1393      case 3:
1394	 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w);
1395	    break;
1396      }
1397   }
1398   for (; i < 4; i++) {
1399      /* Replicate the last channel out. */
1400      swizzle[i] = swizzle[ir->type->vector_elements - 1];
1401   }
1402
1403   src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
1404
1405   this->result = src;
1406}
1407
1408void
1409vec4_visitor::visit(ir_dereference_variable *ir)
1410{
1411   const struct glsl_type *type = ir->type;
1412   dst_reg *reg = variable_storage(ir->var);
1413
1414   if (!reg) {
1415      fail("Failed to find variable storage for %s\n", ir->var->name);
1416      this->result = src_reg(brw_null_reg());
1417      return;
1418   }
1419
1420   this->result = src_reg(*reg);
1421
1422   /* System values get their swizzle from the dst_reg writemask */
1423   if (ir->var->mode == ir_var_system_value)
1424      return;
1425
1426   if (type->is_scalar() || type->is_vector() || type->is_matrix())
1427      this->result.swizzle = swizzle_for_size(type->vector_elements);
1428}
1429
1430void
1431vec4_visitor::visit(ir_dereference_array *ir)
1432{
1433   ir_constant *constant_index;
1434   src_reg src;
1435   int element_size = type_size(ir->type);
1436
1437   constant_index = ir->array_index->constant_expression_value();
1438
1439   ir->array->accept(this);
1440   src = this->result;
1441
1442   if (constant_index) {
1443      src.reg_offset += constant_index->value.i[0] * element_size;
1444   } else {
1445      /* Variable index array dereference.  It eats the "vec4" of the
1446       * base of the array and an index that offsets the Mesa register
1447       * index.
1448       */
1449      ir->array_index->accept(this);
1450
1451      src_reg index_reg;
1452
1453      if (element_size == 1) {
1454	 index_reg = this->result;
1455      } else {
1456	 index_reg = src_reg(this, glsl_type::int_type);
1457
1458	 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size)));
1459      }
1460
1461      if (src.reladdr) {
1462	 src_reg temp = src_reg(this, glsl_type::int_type);
1463
1464	 emit(ADD(dst_reg(temp), *src.reladdr, index_reg));
1465
1466	 index_reg = temp;
1467      }
1468
1469      src.reladdr = ralloc(mem_ctx, src_reg);
1470      memcpy(src.reladdr, &index_reg, sizeof(index_reg));
1471   }
1472
1473   /* If the type is smaller than a vec4, replicate the last channel out. */
1474   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1475      src.swizzle = swizzle_for_size(ir->type->vector_elements);
1476   else
1477      src.swizzle = BRW_SWIZZLE_NOOP;
1478   src.type = brw_type_for_base_type(ir->type);
1479
1480   this->result = src;
1481}
1482
1483void
1484vec4_visitor::visit(ir_dereference_record *ir)
1485{
1486   unsigned int i;
1487   const glsl_type *struct_type = ir->record->type;
1488   int offset = 0;
1489
1490   ir->record->accept(this);
1491
1492   for (i = 0; i < struct_type->length; i++) {
1493      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
1494	 break;
1495      offset += type_size(struct_type->fields.structure[i].type);
1496   }
1497
1498   /* If the type is smaller than a vec4, replicate the last channel out. */
1499   if (ir->type->is_scalar() || ir->type->is_vector() || ir->type->is_matrix())
1500      this->result.swizzle = swizzle_for_size(ir->type->vector_elements);
1501   else
1502      this->result.swizzle = BRW_SWIZZLE_NOOP;
1503   this->result.type = brw_type_for_base_type(ir->type);
1504
1505   this->result.reg_offset += offset;
1506}
1507
1508/**
1509 * We want to be careful in assignment setup to hit the actual storage
1510 * instead of potentially using a temporary like we might with the
1511 * ir_dereference handler.
1512 */
1513static dst_reg
1514get_assignment_lhs(ir_dereference *ir, vec4_visitor *v)
1515{
1516   /* The LHS must be a dereference.  If the LHS is a variable indexed array
1517    * access of a vector, it must be separated into a series conditional moves
1518    * before reaching this point (see ir_vec_index_to_cond_assign).
1519    */
1520   assert(ir->as_dereference());
1521   ir_dereference_array *deref_array = ir->as_dereference_array();
1522   if (deref_array) {
1523      assert(!deref_array->array->type->is_vector());
1524   }
1525
1526   /* Use the rvalue deref handler for the most part.  We'll ignore
1527    * swizzles in it and write swizzles using writemask, though.
1528    */
1529   ir->accept(v);
1530   return dst_reg(v->result);
1531}
1532
1533void
1534vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src,
1535			      const struct glsl_type *type, uint32_t predicate)
1536{
1537   if (type->base_type == GLSL_TYPE_STRUCT) {
1538      for (unsigned int i = 0; i < type->length; i++) {
1539	 emit_block_move(dst, src, type->fields.structure[i].type, predicate);
1540      }
1541      return;
1542   }
1543
1544   if (type->is_array()) {
1545      for (unsigned int i = 0; i < type->length; i++) {
1546	 emit_block_move(dst, src, type->fields.array, predicate);
1547      }
1548      return;
1549   }
1550
1551   if (type->is_matrix()) {
1552      const struct glsl_type *vec_type;
1553
1554      vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
1555					 type->vector_elements, 1);
1556
1557      for (int i = 0; i < type->matrix_columns; i++) {
1558	 emit_block_move(dst, src, vec_type, predicate);
1559      }
1560      return;
1561   }
1562
1563   assert(type->is_scalar() || type->is_vector());
1564
1565   dst->type = brw_type_for_base_type(type);
1566   src->type = dst->type;
1567
1568   dst->writemask = (1 << type->vector_elements) - 1;
1569
1570   src->swizzle = swizzle_for_size(type->vector_elements);
1571
1572   vec4_instruction *inst = emit(MOV(*dst, *src));
1573   inst->predicate = predicate;
1574
1575   dst->reg_offset++;
1576   src->reg_offset++;
1577}
1578
1579
1580/* If the RHS processing resulted in an instruction generating a
1581 * temporary value, and it would be easy to rewrite the instruction to
1582 * generate its result right into the LHS instead, do so.  This ends
1583 * up reliably removing instructions where it can be tricky to do so
1584 * later without real UD chain information.
1585 */
1586bool
1587vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
1588				     dst_reg dst,
1589				     src_reg src,
1590				     vec4_instruction *pre_rhs_inst,
1591				     vec4_instruction *last_rhs_inst)
1592{
1593   /* This could be supported, but it would take more smarts. */
1594   if (ir->condition)
1595      return false;
1596
1597   if (pre_rhs_inst == last_rhs_inst)
1598      return false; /* No instructions generated to work with. */
1599
1600   /* Make sure the last instruction generated our source reg. */
1601   if (src.file != GRF ||
1602       src.file != last_rhs_inst->dst.file ||
1603       src.reg != last_rhs_inst->dst.reg ||
1604       src.reg_offset != last_rhs_inst->dst.reg_offset ||
1605       src.reladdr ||
1606       src.abs ||
1607       src.negate ||
1608       last_rhs_inst->predicate != BRW_PREDICATE_NONE)
1609      return false;
1610
1611   /* Check that that last instruction fully initialized the channels
1612    * we want to use, in the order we want to use them.  We could
1613    * potentially reswizzle the operands of many instructions so that
1614    * we could handle out of order channels, but don't yet.
1615    */
1616
1617   for (unsigned i = 0; i < 4; i++) {
1618      if (dst.writemask & (1 << i)) {
1619	 if (!(last_rhs_inst->dst.writemask & (1 << i)))
1620	    return false;
1621
1622	 if (BRW_GET_SWZ(src.swizzle, i) != i)
1623	    return false;
1624      }
1625   }
1626
1627   /* Success!  Rewrite the instruction. */
1628   last_rhs_inst->dst.file = dst.file;
1629   last_rhs_inst->dst.reg = dst.reg;
1630   last_rhs_inst->dst.reg_offset = dst.reg_offset;
1631   last_rhs_inst->dst.reladdr = dst.reladdr;
1632   last_rhs_inst->dst.writemask &= dst.writemask;
1633
1634   return true;
1635}
1636
1637void
1638vec4_visitor::visit(ir_assignment *ir)
1639{
1640   dst_reg dst = get_assignment_lhs(ir->lhs, this);
1641   uint32_t predicate = BRW_PREDICATE_NONE;
1642
1643   if (!ir->lhs->type->is_scalar() &&
1644       !ir->lhs->type->is_vector()) {
1645      ir->rhs->accept(this);
1646      src_reg src = this->result;
1647
1648      if (ir->condition) {
1649	 emit_bool_to_cond_code(ir->condition, &predicate);
1650      }
1651
1652      /* emit_block_move doesn't account for swizzles in the source register.
1653       * This should be ok, since the source register is a structure or an
1654       * array, and those can't be swizzled.  But double-check to be sure.
1655       */
1656      assert(src.swizzle ==
1657             (ir->rhs->type->is_matrix()
1658              ? swizzle_for_size(ir->rhs->type->vector_elements)
1659              : BRW_SWIZZLE_NOOP));
1660
1661      emit_block_move(&dst, &src, ir->rhs->type, predicate);
1662      return;
1663   }
1664
1665   /* Now we're down to just a scalar/vector with writemasks. */
1666   int i;
1667
1668   vec4_instruction *pre_rhs_inst, *last_rhs_inst;
1669   pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1670
1671   ir->rhs->accept(this);
1672
1673   last_rhs_inst = (vec4_instruction *)this->instructions.get_tail();
1674
1675   src_reg src = this->result;
1676
1677   int swizzles[4];
1678   int first_enabled_chan = 0;
1679   int src_chan = 0;
1680
1681   assert(ir->lhs->type->is_vector() ||
1682	  ir->lhs->type->is_scalar());
1683   dst.writemask = ir->write_mask;
1684
1685   for (int i = 0; i < 4; i++) {
1686      if (dst.writemask & (1 << i)) {
1687	 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i);
1688	 break;
1689      }
1690   }
1691
1692   /* Swizzle a small RHS vector into the channels being written.
1693    *
1694    * glsl ir treats write_mask as dictating how many channels are
1695    * present on the RHS while in our instructions we need to make
1696    * those channels appear in the slots of the vec4 they're written to.
1697    */
1698   for (int i = 0; i < 4; i++) {
1699      if (dst.writemask & (1 << i))
1700	 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++);
1701      else
1702	 swizzles[i] = first_enabled_chan;
1703   }
1704   src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1],
1705			      swizzles[2], swizzles[3]);
1706
1707   if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) {
1708      return;
1709   }
1710
1711   if (ir->condition) {
1712      emit_bool_to_cond_code(ir->condition, &predicate);
1713   }
1714
1715   for (i = 0; i < type_size(ir->lhs->type); i++) {
1716      vec4_instruction *inst = emit(MOV(dst, src));
1717      inst->predicate = predicate;
1718
1719      dst.reg_offset++;
1720      src.reg_offset++;
1721   }
1722}
1723
1724void
1725vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir)
1726{
1727   if (ir->type->base_type == GLSL_TYPE_STRUCT) {
1728      foreach_list(node, &ir->components) {
1729	 ir_constant *field_value = (ir_constant *)node;
1730
1731	 emit_constant_values(dst, field_value);
1732      }
1733      return;
1734   }
1735
1736   if (ir->type->is_array()) {
1737      for (unsigned int i = 0; i < ir->type->length; i++) {
1738	 emit_constant_values(dst, ir->array_elements[i]);
1739      }
1740      return;
1741   }
1742
1743   if (ir->type->is_matrix()) {
1744      for (int i = 0; i < ir->type->matrix_columns; i++) {
1745	 float *vec = &ir->value.f[i * ir->type->vector_elements];
1746
1747	 for (int j = 0; j < ir->type->vector_elements; j++) {
1748	    dst->writemask = 1 << j;
1749	    dst->type = BRW_REGISTER_TYPE_F;
1750
1751	    emit(MOV(*dst, src_reg(vec[j])));
1752	 }
1753	 dst->reg_offset++;
1754      }
1755      return;
1756   }
1757
1758   int remaining_writemask = (1 << ir->type->vector_elements) - 1;
1759
1760   for (int i = 0; i < ir->type->vector_elements; i++) {
1761      if (!(remaining_writemask & (1 << i)))
1762	 continue;
1763
1764      dst->writemask = 1 << i;
1765      dst->type = brw_type_for_base_type(ir->type);
1766
1767      /* Find other components that match the one we're about to
1768       * write.  Emits fewer instructions for things like vec4(0.5,
1769       * 1.5, 1.5, 1.5).
1770       */
1771      for (int j = i + 1; j < ir->type->vector_elements; j++) {
1772	 if (ir->type->base_type == GLSL_TYPE_BOOL) {
1773	    if (ir->value.b[i] == ir->value.b[j])
1774	       dst->writemask |= (1 << j);
1775	 } else {
1776	    /* u, i, and f storage all line up, so no need for a
1777	     * switch case for comparing each type.
1778	     */
1779	    if (ir->value.u[i] == ir->value.u[j])
1780	       dst->writemask |= (1 << j);
1781	 }
1782      }
1783
1784      switch (ir->type->base_type) {
1785      case GLSL_TYPE_FLOAT:
1786	 emit(MOV(*dst, src_reg(ir->value.f[i])));
1787	 break;
1788      case GLSL_TYPE_INT:
1789	 emit(MOV(*dst, src_reg(ir->value.i[i])));
1790	 break;
1791      case GLSL_TYPE_UINT:
1792	 emit(MOV(*dst, src_reg(ir->value.u[i])));
1793	 break;
1794      case GLSL_TYPE_BOOL:
1795	 emit(MOV(*dst, src_reg(ir->value.b[i])));
1796	 break;
1797      default:
1798	 assert(!"Non-float/uint/int/bool constant");
1799	 break;
1800      }
1801
1802      remaining_writemask &= ~dst->writemask;
1803   }
1804   dst->reg_offset++;
1805}
1806
1807void
1808vec4_visitor::visit(ir_constant *ir)
1809{
1810   dst_reg dst = dst_reg(this, ir->type);
1811   this->result = src_reg(dst);
1812
1813   emit_constant_values(&dst, ir);
1814}
1815
1816void
1817vec4_visitor::visit(ir_call *ir)
1818{
1819   assert(!"not reached");
1820}
1821
1822void
1823vec4_visitor::visit(ir_texture *ir)
1824{
1825   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &vp->Base);
1826   sampler = vp->Base.SamplerUnits[sampler];
1827
1828   /* Should be lowered by do_lower_texture_projection */
1829   assert(!ir->projector);
1830
1831   vec4_instruction *inst = NULL;
1832   switch (ir->op) {
1833   case ir_tex:
1834   case ir_txl:
1835      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXL);
1836      break;
1837   case ir_txd:
1838      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXD);
1839      break;
1840   case ir_txf:
1841      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXF);
1842      break;
1843   case ir_txs:
1844      inst = new(mem_ctx) vec4_instruction(this, SHADER_OPCODE_TXS);
1845      break;
1846   case ir_txb:
1847      assert(!"TXB is not valid for vertex shaders.");
1848   }
1849
1850   /* Texel offsets go in the message header; Gen4 also requires headers. */
1851   inst->header_present = ir->offset || intel->gen < 5;
1852   inst->base_mrf = 2;
1853   inst->mlen = inst->header_present + 1; /* always at least one */
1854   inst->sampler = sampler;
1855   inst->dst = dst_reg(this, ir->type);
1856   inst->shadow_compare = ir->shadow_comparitor != NULL;
1857
1858   if (ir->offset != NULL && ir->op != ir_txf)
1859      inst->texture_offset = brw_texture_offset(ir->offset->as_constant());
1860
1861   /* MRF for the first parameter */
1862   int param_base = inst->base_mrf + inst->header_present;
1863
1864   if (ir->op == ir_txs) {
1865      ir->lod_info.lod->accept(this);
1866      int writemask = intel->gen == 4 ? WRITEMASK_W : WRITEMASK_X;
1867      emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, writemask),
1868	   this->result));
1869   } else {
1870      int i, coord_mask = 0, zero_mask = 0;
1871      /* Load the coordinate */
1872      /* FINISHME: gl_clamp_mask and saturate */
1873      for (i = 0; i < ir->coordinate->type->vector_elements; i++)
1874	 coord_mask |= (1 << i);
1875      for (; i < 4; i++)
1876	 zero_mask |= (1 << i);
1877
1878      ir->coordinate->accept(this);
1879      if (ir->offset && ir->op == ir_txf) {
1880	 /* It appears that the ld instruction used for txf does its
1881	  * address bounds check before adding in the offset.  To work
1882	  * around this, just add the integer offset to the integer
1883	  * texel coordinate, and don't put the offset in the header.
1884	  */
1885	 ir_constant *offset = ir->offset->as_constant();
1886	 assert(offset);
1887
1888	 for (int j = 0; j < ir->coordinate->type->vector_elements; j++) {
1889	    src_reg src = this->result;
1890	    src.swizzle = BRW_SWIZZLE4(BRW_GET_SWZ(src.swizzle, j),
1891				       BRW_GET_SWZ(src.swizzle, j),
1892				       BRW_GET_SWZ(src.swizzle, j),
1893				       BRW_GET_SWZ(src.swizzle, j));
1894	    emit(ADD(dst_reg(MRF, param_base, ir->coordinate->type, 1 << j),
1895		     src, offset->value.i[j]));
1896	 }
1897      } else {
1898	 emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, coord_mask),
1899		  this->result));
1900      }
1901      emit(MOV(dst_reg(MRF, param_base, ir->coordinate->type, zero_mask),
1902	       src_reg(0)));
1903      /* Load the shadow comparitor */
1904      if (ir->shadow_comparitor) {
1905	 ir->shadow_comparitor->accept(this);
1906	 emit(MOV(dst_reg(MRF, param_base + 1, ir->shadow_comparitor->type,
1907			  WRITEMASK_X),
1908		  this->result));
1909	 inst->mlen++;
1910      }
1911
1912      /* Load the LOD info */
1913      if (ir->op == ir_txl) {
1914	 int mrf, writemask;
1915	 if (intel->gen >= 5) {
1916	    mrf = param_base + 1;
1917	    if (ir->shadow_comparitor) {
1918	       writemask = WRITEMASK_Y;
1919	       /* mlen already incremented */
1920	    } else {
1921	       writemask = WRITEMASK_X;
1922	       inst->mlen++;
1923	    }
1924	 } else /* intel->gen == 4 */ {
1925	    mrf = param_base;
1926	    writemask = WRITEMASK_Z;
1927	 }
1928	 ir->lod_info.lod->accept(this);
1929	 emit(MOV(dst_reg(MRF, mrf, ir->lod_info.lod->type, writemask),
1930		  this->result));
1931      } else if (ir->op == ir_txf) {
1932	 ir->lod_info.lod->accept(this);
1933	 emit(MOV(dst_reg(MRF, param_base, ir->lod_info.lod->type, WRITEMASK_W),
1934		  this->result));
1935      } else if (ir->op == ir_txd) {
1936	 const glsl_type *type = ir->lod_info.grad.dPdx->type;
1937
1938	 ir->lod_info.grad.dPdx->accept(this);
1939	 src_reg dPdx = this->result;
1940	 ir->lod_info.grad.dPdy->accept(this);
1941	 src_reg dPdy = this->result;
1942
1943	 if (intel->gen >= 5) {
1944	    dPdx.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1945	    dPdy.swizzle = BRW_SWIZZLE4(SWIZZLE_X,SWIZZLE_X,SWIZZLE_Y,SWIZZLE_Y);
1946	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XZ), dPdx));
1947	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_YW), dPdy));
1948	    inst->mlen++;
1949
1950	    if (ir->type->vector_elements == 3) {
1951	       dPdx.swizzle = BRW_SWIZZLE_ZZZZ;
1952	       dPdy.swizzle = BRW_SWIZZLE_ZZZZ;
1953	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_X), dPdx));
1954	       emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_Y), dPdy));
1955	       inst->mlen++;
1956	    }
1957	 } else /* intel->gen == 4 */ {
1958	    emit(MOV(dst_reg(MRF, param_base + 1, type, WRITEMASK_XYZ), dPdx));
1959	    emit(MOV(dst_reg(MRF, param_base + 2, type, WRITEMASK_XYZ), dPdy));
1960	    inst->mlen += 2;
1961	 }
1962      }
1963   }
1964
1965   emit(inst);
1966
1967   swizzle_result(ir, src_reg(inst->dst), sampler);
1968}
1969
1970void
1971vec4_visitor::swizzle_result(ir_texture *ir, src_reg orig_val, int sampler)
1972{
1973   this->result = orig_val;
1974
1975   int s = c->key.tex.swizzles[sampler];
1976
1977   if (ir->op == ir_txs || ir->type == glsl_type::float_type
1978			|| s == SWIZZLE_NOOP)
1979      return;
1980
1981   int zero_mask = 0, one_mask = 0, copy_mask = 0;
1982   int swizzle[4];
1983
1984   for (int i = 0; i < 4; i++) {
1985      switch (GET_SWZ(s, i)) {
1986      case SWIZZLE_ZERO:
1987	 zero_mask |= (1 << i);
1988	 break;
1989      case SWIZZLE_ONE:
1990	 one_mask |= (1 << i);
1991	 break;
1992      default:
1993	 copy_mask |= (1 << i);
1994	 swizzle[i] = GET_SWZ(s, i);
1995	 break;
1996      }
1997   }
1998
1999   this->result = src_reg(this, ir->type);
2000   dst_reg swizzled_result(this->result);
2001
2002   if (copy_mask) {
2003      orig_val.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
2004      swizzled_result.writemask = copy_mask;
2005      emit(MOV(swizzled_result, orig_val));
2006   }
2007
2008   if (zero_mask) {
2009      swizzled_result.writemask = zero_mask;
2010      emit(MOV(swizzled_result, src_reg(0.0f)));
2011   }
2012
2013   if (one_mask) {
2014      swizzled_result.writemask = one_mask;
2015      emit(MOV(swizzled_result, src_reg(1.0f)));
2016   }
2017}
2018
2019void
2020vec4_visitor::visit(ir_return *ir)
2021{
2022   assert(!"not reached");
2023}
2024
2025void
2026vec4_visitor::visit(ir_discard *ir)
2027{
2028   assert(!"not reached");
2029}
2030
2031void
2032vec4_visitor::visit(ir_if *ir)
2033{
2034   /* Don't point the annotation at the if statement, because then it plus
2035    * the then and else blocks get printed.
2036    */
2037   this->base_ir = ir->condition;
2038
2039   if (intel->gen == 6) {
2040      emit_if_gen6(ir);
2041   } else {
2042      uint32_t predicate;
2043      emit_bool_to_cond_code(ir->condition, &predicate);
2044      emit(IF(predicate));
2045   }
2046
2047   visit_instructions(&ir->then_instructions);
2048
2049   if (!ir->else_instructions.is_empty()) {
2050      this->base_ir = ir->condition;
2051      emit(BRW_OPCODE_ELSE);
2052
2053      visit_instructions(&ir->else_instructions);
2054   }
2055
2056   this->base_ir = ir->condition;
2057   emit(BRW_OPCODE_ENDIF);
2058}
2059
2060void
2061vec4_visitor::emit_ndc_computation()
2062{
2063   /* Get the position */
2064   src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]);
2065
2066   /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */
2067   dst_reg ndc = dst_reg(this, glsl_type::vec4_type);
2068   output_reg[BRW_VERT_RESULT_NDC] = ndc;
2069
2070   current_annotation = "NDC";
2071   dst_reg ndc_w = ndc;
2072   ndc_w.writemask = WRITEMASK_W;
2073   src_reg pos_w = pos;
2074   pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W);
2075   emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w);
2076
2077   dst_reg ndc_xyz = ndc;
2078   ndc_xyz.writemask = WRITEMASK_XYZ;
2079
2080   emit(MUL(ndc_xyz, pos, src_reg(ndc_w)));
2081}
2082
2083void
2084vec4_visitor::emit_psiz_and_flags(struct brw_reg reg)
2085{
2086   if (intel->gen < 6 &&
2087       ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
2088        c->key.userclip_active || brw->has_negative_rhw_bug)) {
2089      dst_reg header1 = dst_reg(this, glsl_type::uvec4_type);
2090      dst_reg header1_w = header1;
2091      header1_w.writemask = WRITEMASK_W;
2092      GLuint i;
2093
2094      emit(MOV(header1, 0u));
2095
2096      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2097	 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]);
2098
2099	 current_annotation = "Point size";
2100	 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11))));
2101	 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8));
2102      }
2103
2104      current_annotation = "Clipping flags";
2105      for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
2106	 vec4_instruction *inst;
2107
2108	 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]),
2109                         src_reg(this->userplane[i])));
2110	 inst->conditional_mod = BRW_CONDITIONAL_L;
2111
2112	 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i));
2113	 inst->predicate = BRW_PREDICATE_NORMAL;
2114      }
2115
2116      /* i965 clipping workaround:
2117       * 1) Test for -ve rhw
2118       * 2) If set,
2119       *      set ndc = (0,0,0,0)
2120       *      set ucp[6] = 1
2121       *
2122       * Later, clipping will detect ucp[6] and ensure the primitive is
2123       * clipped against all fixed planes.
2124       */
2125      if (brw->has_negative_rhw_bug) {
2126#if 0
2127	 /* FINISHME */
2128	 brw_CMP(p,
2129		 vec8(brw_null_reg()),
2130		 BRW_CONDITIONAL_L,
2131		 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3),
2132		 brw_imm_f(0));
2133
2134	 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
2135	 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0));
2136	 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2137#endif
2138      }
2139
2140      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1)));
2141   } else if (intel->gen < 6) {
2142      emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u));
2143   } else {
2144      emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0)));
2145      if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
2146         emit(MOV(brw_writemask(reg, WRITEMASK_W),
2147                  src_reg(output_reg[VERT_RESULT_PSIZ])));
2148      }
2149   }
2150}
2151
2152void
2153vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset)
2154{
2155   if (intel->gen < 6) {
2156      /* Clip distance slots are set aside in gen5, but they are not used.  It
2157       * is not clear whether we actually need to set aside space for them,
2158       * but the performance cost is negligible.
2159       */
2160      return;
2161   }
2162
2163   /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables):
2164    *
2165    *     "If a linked set of shaders forming the vertex stage contains no
2166    *     static write to gl_ClipVertex or gl_ClipDistance, but the
2167    *     application has requested clipping against user clip planes through
2168    *     the API, then the coordinate written to gl_Position is used for
2169    *     comparison against the user clip planes."
2170    *
2171    * This function is only called if the shader didn't write to
2172    * gl_ClipDistance.  Accordingly, we use gl_ClipVertex to perform clipping
2173    * if the user wrote to it; otherwise we use gl_Position.
2174    */
2175   gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX;
2176   if (!(c->prog_data.outputs_written
2177         & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) {
2178      clip_vertex = VERT_RESULT_HPOS;
2179   }
2180
2181   for (int i = 0; i + offset < c->key.nr_userclip_plane_consts && i < 4;
2182        ++i) {
2183      emit(DP4(dst_reg(brw_writemask(reg, 1 << i)),
2184               src_reg(output_reg[clip_vertex]),
2185               src_reg(this->userplane[i + offset])));
2186   }
2187}
2188
2189void
2190vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result)
2191{
2192   assert (vert_result < VERT_RESULT_MAX);
2193   reg.type = output_reg[vert_result].type;
2194   current_annotation = output_reg_annotation[vert_result];
2195   /* Copy the register, saturating if necessary */
2196   vec4_instruction *inst = emit(MOV(reg,
2197                                     src_reg(output_reg[vert_result])));
2198   if ((vert_result == VERT_RESULT_COL0 ||
2199        vert_result == VERT_RESULT_COL1 ||
2200        vert_result == VERT_RESULT_BFC0 ||
2201        vert_result == VERT_RESULT_BFC1) &&
2202       c->key.clamp_vertex_color) {
2203      inst->saturate = true;
2204   }
2205}
2206
2207void
2208vec4_visitor::emit_urb_slot(int mrf, int vert_result)
2209{
2210   struct brw_reg hw_reg = brw_message_reg(mrf);
2211   dst_reg reg = dst_reg(MRF, mrf);
2212   reg.type = BRW_REGISTER_TYPE_F;
2213
2214   switch (vert_result) {
2215   case VERT_RESULT_PSIZ:
2216      /* PSIZ is always in slot 0, and is coupled with other flags. */
2217      current_annotation = "indices, point width, clip flags";
2218      emit_psiz_and_flags(hw_reg);
2219      break;
2220   case BRW_VERT_RESULT_NDC:
2221      current_annotation = "NDC";
2222      emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC])));
2223      break;
2224   case BRW_VERT_RESULT_HPOS_DUPLICATE:
2225   case VERT_RESULT_HPOS:
2226      current_annotation = "gl_Position";
2227      emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS])));
2228      break;
2229   case VERT_RESULT_CLIP_DIST0:
2230   case VERT_RESULT_CLIP_DIST1:
2231      if (this->c->key.uses_clip_distance) {
2232         emit_generic_urb_slot(reg, vert_result);
2233      } else {
2234         current_annotation = "user clip distances";
2235         emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4);
2236      }
2237      break;
2238   case BRW_VERT_RESULT_PAD:
2239      /* No need to write to this slot */
2240      break;
2241   default:
2242      emit_generic_urb_slot(reg, vert_result);
2243      break;
2244   }
2245}
2246
2247static int
2248align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
2249{
2250   struct intel_context *intel = &brw->intel;
2251
2252   if (intel->gen >= 6) {
2253      /* URB data written (does not include the message header reg) must
2254       * be a multiple of 256 bits, or 2 VS registers.  See vol5c.5,
2255       * section 5.4.3.2.2: URB_INTERLEAVED.
2256       *
2257       * URB entries are allocated on a multiple of 1024 bits, so an
2258       * extra 128 bits written here to make the end align to 256 is
2259       * no problem.
2260       */
2261      if ((mlen % 2) != 1)
2262	 mlen++;
2263   }
2264
2265   return mlen;
2266}
2267
2268/**
2269 * Generates the VUE payload plus the 1 or 2 URB write instructions to
2270 * complete the VS thread.
2271 *
2272 * The VUE layout is documented in Volume 2a.
2273 */
2274void
2275vec4_visitor::emit_urb_writes()
2276{
2277   /* MRF 0 is reserved for the debugger, so start with message header
2278    * in MRF 1.
2279    */
2280   int base_mrf = 1;
2281   int mrf = base_mrf;
2282   /* In the process of generating our URB write message contents, we
2283    * may need to unspill a register or load from an array.  Those
2284    * reads would use MRFs 14-15.
2285    */
2286   int max_usable_mrf = 13;
2287
2288   /* The following assertion verifies that max_usable_mrf causes an
2289    * even-numbered amount of URB write data, which will meet gen6's
2290    * requirements for length alignment.
2291    */
2292   assert ((max_usable_mrf - base_mrf) % 2 == 0);
2293
2294   /* FINISHME: edgeflag */
2295
2296   /* First mrf is the g0-based message header containing URB handles and such,
2297    * which is implied in VS_OPCODE_URB_WRITE.
2298    */
2299   mrf++;
2300
2301   if (intel->gen < 6) {
2302      emit_ndc_computation();
2303   }
2304
2305   /* Set up the VUE data for the first URB write */
2306   int slot;
2307   for (slot = 0; slot < c->prog_data.vue_map.num_slots; ++slot) {
2308      emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2309
2310      /* If this was max_usable_mrf, we can't fit anything more into this URB
2311       * WRITE.
2312       */
2313      if (mrf > max_usable_mrf) {
2314	 slot++;
2315	 break;
2316      }
2317   }
2318
2319   current_annotation = "URB write";
2320   vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE);
2321   inst->base_mrf = base_mrf;
2322   inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2323   inst->eot = (slot >= c->prog_data.vue_map.num_slots);
2324
2325   /* Optional second URB write */
2326   if (!inst->eot) {
2327      mrf = base_mrf + 1;
2328
2329      for (; slot < c->prog_data.vue_map.num_slots; ++slot) {
2330	 assert(mrf < max_usable_mrf);
2331
2332         emit_urb_slot(mrf++, c->prog_data.vue_map.slot_to_vert_result[slot]);
2333      }
2334
2335      current_annotation = "URB write";
2336      inst = emit(VS_OPCODE_URB_WRITE);
2337      inst->base_mrf = base_mrf;
2338      inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf);
2339      inst->eot = true;
2340      /* URB destination offset.  In the previous write, we got MRFs
2341       * 2-13 minus the one header MRF, so 12 regs.  URB offset is in
2342       * URB row increments, and each of our MRFs is half of one of
2343       * those, since we're doing interleaved writes.
2344       */
2345      inst->offset = (max_usable_mrf - base_mrf) / 2;
2346   }
2347}
2348
2349src_reg
2350vec4_visitor::get_scratch_offset(vec4_instruction *inst,
2351				 src_reg *reladdr, int reg_offset)
2352{
2353   /* Because we store the values to scratch interleaved like our
2354    * vertex data, we need to scale the vec4 index by 2.
2355    */
2356   int message_header_scale = 2;
2357
2358   /* Pre-gen6, the message header uses byte offsets instead of vec4
2359    * (16-byte) offset units.
2360    */
2361   if (intel->gen < 6)
2362      message_header_scale *= 16;
2363
2364   if (reladdr) {
2365      src_reg index = src_reg(this, glsl_type::int_type);
2366
2367      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2368      emit_before(inst, MUL(dst_reg(index),
2369			    index, src_reg(message_header_scale)));
2370
2371      return index;
2372   } else {
2373      return src_reg(reg_offset * message_header_scale);
2374   }
2375}
2376
2377src_reg
2378vec4_visitor::get_pull_constant_offset(vec4_instruction *inst,
2379				       src_reg *reladdr, int reg_offset)
2380{
2381   if (reladdr) {
2382      src_reg index = src_reg(this, glsl_type::int_type);
2383
2384      emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset)));
2385
2386      /* Pre-gen6, the message header uses byte offsets instead of vec4
2387       * (16-byte) offset units.
2388       */
2389      if (intel->gen < 6) {
2390	 emit_before(inst, MUL(dst_reg(index), index, src_reg(16)));
2391      }
2392
2393      return index;
2394   } else {
2395      int message_header_scale = intel->gen < 6 ? 16 : 1;
2396      return src_reg(reg_offset * message_header_scale);
2397   }
2398}
2399
2400/**
2401 * Emits an instruction before @inst to load the value named by @orig_src
2402 * from scratch space at @base_offset to @temp.
2403 */
2404void
2405vec4_visitor::emit_scratch_read(vec4_instruction *inst,
2406				dst_reg temp, src_reg orig_src,
2407				int base_offset)
2408{
2409   int reg_offset = base_offset + orig_src.reg_offset;
2410   src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset);
2411
2412   emit_before(inst, SCRATCH_READ(temp, index));
2413}
2414
2415/**
2416 * Emits an instruction after @inst to store the value to be written
2417 * to @orig_dst to scratch space at @base_offset, from @temp.
2418 */
2419void
2420vec4_visitor::emit_scratch_write(vec4_instruction *inst,
2421				 src_reg temp, dst_reg orig_dst,
2422				 int base_offset)
2423{
2424   int reg_offset = base_offset + orig_dst.reg_offset;
2425   src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset);
2426
2427   dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0),
2428				       orig_dst.writemask));
2429   vec4_instruction *write = SCRATCH_WRITE(dst, temp, index);
2430   write->predicate = inst->predicate;
2431   write->ir = inst->ir;
2432   write->annotation = inst->annotation;
2433   inst->insert_after(write);
2434}
2435
2436/**
2437 * We can't generally support array access in GRF space, because a
2438 * single instruction's destination can only span 2 contiguous
2439 * registers.  So, we send all GRF arrays that get variable index
2440 * access to scratch space.
2441 */
2442void
2443vec4_visitor::move_grf_array_access_to_scratch()
2444{
2445   int scratch_loc[this->virtual_grf_count];
2446
2447   for (int i = 0; i < this->virtual_grf_count; i++) {
2448      scratch_loc[i] = -1;
2449   }
2450
2451   /* First, calculate the set of virtual GRFs that need to be punted
2452    * to scratch due to having any array access on them, and where in
2453    * scratch.
2454    */
2455   foreach_list(node, &this->instructions) {
2456      vec4_instruction *inst = (vec4_instruction *)node;
2457
2458      if (inst->dst.file == GRF && inst->dst.reladdr &&
2459	  scratch_loc[inst->dst.reg] == -1) {
2460	 scratch_loc[inst->dst.reg] = c->last_scratch;
2461	 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4;
2462      }
2463
2464      for (int i = 0 ; i < 3; i++) {
2465	 src_reg *src = &inst->src[i];
2466
2467	 if (src->file == GRF && src->reladdr &&
2468	     scratch_loc[src->reg] == -1) {
2469	    scratch_loc[src->reg] = c->last_scratch;
2470	    c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4;
2471	 }
2472      }
2473   }
2474
2475   /* Now, for anything that will be accessed through scratch, rewrite
2476    * it to load/store.  Note that this is a _safe list walk, because
2477    * we may generate a new scratch_write instruction after the one
2478    * we're processing.
2479    */
2480   foreach_list_safe(node, &this->instructions) {
2481      vec4_instruction *inst = (vec4_instruction *)node;
2482
2483      /* Set up the annotation tracking for new generated instructions. */
2484      base_ir = inst->ir;
2485      current_annotation = inst->annotation;
2486
2487      if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) {
2488	 src_reg temp = src_reg(this, glsl_type::vec4_type);
2489
2490	 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]);
2491
2492	 inst->dst.file = temp.file;
2493	 inst->dst.reg = temp.reg;
2494	 inst->dst.reg_offset = temp.reg_offset;
2495	 inst->dst.reladdr = NULL;
2496      }
2497
2498      for (int i = 0 ; i < 3; i++) {
2499	 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1)
2500	    continue;
2501
2502	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2503
2504	 emit_scratch_read(inst, temp, inst->src[i],
2505			   scratch_loc[inst->src[i].reg]);
2506
2507	 inst->src[i].file = temp.file;
2508	 inst->src[i].reg = temp.reg;
2509	 inst->src[i].reg_offset = temp.reg_offset;
2510	 inst->src[i].reladdr = NULL;
2511      }
2512   }
2513}
2514
2515/**
2516 * Emits an instruction before @inst to load the value named by @orig_src
2517 * from the pull constant buffer (surface) at @base_offset to @temp.
2518 */
2519void
2520vec4_visitor::emit_pull_constant_load(vec4_instruction *inst,
2521				      dst_reg temp, src_reg orig_src,
2522				      int base_offset)
2523{
2524   int reg_offset = base_offset + orig_src.reg_offset;
2525   src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset);
2526   vec4_instruction *load;
2527
2528   load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD,
2529					temp, index);
2530   load->base_mrf = 14;
2531   load->mlen = 1;
2532   emit_before(inst, load);
2533}
2534
2535/**
2536 * Implements array access of uniforms by inserting a
2537 * PULL_CONSTANT_LOAD instruction.
2538 *
2539 * Unlike temporary GRF array access (where we don't support it due to
2540 * the difficulty of doing relative addressing on instruction
2541 * destinations), we could potentially do array access of uniforms
2542 * that were loaded in GRF space as push constants.  In real-world
2543 * usage we've seen, though, the arrays being used are always larger
2544 * than we could load as push constants, so just always move all
2545 * uniform array access out to a pull constant buffer.
2546 */
2547void
2548vec4_visitor::move_uniform_array_access_to_pull_constants()
2549{
2550   int pull_constant_loc[this->uniforms];
2551
2552   for (int i = 0; i < this->uniforms; i++) {
2553      pull_constant_loc[i] = -1;
2554   }
2555
2556   /* Walk through and find array access of uniforms.  Put a copy of that
2557    * uniform in the pull constant buffer.
2558    *
2559    * Note that we don't move constant-indexed accesses to arrays.  No
2560    * testing has been done of the performance impact of this choice.
2561    */
2562   foreach_list_safe(node, &this->instructions) {
2563      vec4_instruction *inst = (vec4_instruction *)node;
2564
2565      for (int i = 0 ; i < 3; i++) {
2566	 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr)
2567	    continue;
2568
2569	 int uniform = inst->src[i].reg;
2570
2571	 /* If this array isn't already present in the pull constant buffer,
2572	  * add it.
2573	  */
2574	 if (pull_constant_loc[uniform] == -1) {
2575	    const float **values = &prog_data->param[uniform * 4];
2576
2577	    pull_constant_loc[uniform] = prog_data->nr_pull_params / 4;
2578
2579	    for (int j = 0; j < uniform_size[uniform] * 4; j++) {
2580	       prog_data->pull_param[prog_data->nr_pull_params++] = values[j];
2581	    }
2582	 }
2583
2584	 /* Set up the annotation tracking for new generated instructions. */
2585	 base_ir = inst->ir;
2586	 current_annotation = inst->annotation;
2587
2588	 dst_reg temp = dst_reg(this, glsl_type::vec4_type);
2589
2590	 emit_pull_constant_load(inst, temp, inst->src[i],
2591				 pull_constant_loc[uniform]);
2592
2593	 inst->src[i].file = temp.file;
2594	 inst->src[i].reg = temp.reg;
2595	 inst->src[i].reg_offset = temp.reg_offset;
2596	 inst->src[i].reladdr = NULL;
2597      }
2598   }
2599
2600   /* Now there are no accesses of the UNIFORM file with a reladdr, so
2601    * no need to track them as larger-than-vec4 objects.  This will be
2602    * relied on in cutting out unused uniform vectors from push
2603    * constants.
2604    */
2605   split_uniform_registers();
2606}
2607
2608void
2609vec4_visitor::resolve_ud_negate(src_reg *reg)
2610{
2611   if (reg->type != BRW_REGISTER_TYPE_UD ||
2612       !reg->negate)
2613      return;
2614
2615   src_reg temp = src_reg(this, glsl_type::uvec4_type);
2616   emit(BRW_OPCODE_MOV, dst_reg(temp), *reg);
2617   *reg = temp;
2618}
2619
2620vec4_visitor::vec4_visitor(struct brw_vs_compile *c,
2621			   struct gl_shader_program *prog,
2622			   struct brw_shader *shader)
2623{
2624   this->c = c;
2625   this->p = &c->func;
2626   this->brw = p->brw;
2627   this->intel = &brw->intel;
2628   this->ctx = &intel->ctx;
2629   this->prog = prog;
2630   this->shader = shader;
2631
2632   this->mem_ctx = ralloc_context(NULL);
2633   this->failed = false;
2634
2635   this->base_ir = NULL;
2636   this->current_annotation = NULL;
2637
2638   this->c = c;
2639   this->vp = (struct gl_vertex_program *)
2640     prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
2641   this->prog_data = &c->prog_data;
2642
2643   this->variable_ht = hash_table_ctor(0,
2644				       hash_table_pointer_hash,
2645				       hash_table_pointer_compare);
2646
2647   this->virtual_grf_def = NULL;
2648   this->virtual_grf_use = NULL;
2649   this->virtual_grf_sizes = NULL;
2650   this->virtual_grf_count = 0;
2651   this->virtual_grf_reg_map = NULL;
2652   this->virtual_grf_reg_count = 0;
2653   this->virtual_grf_array_size = 0;
2654   this->live_intervals_valid = false;
2655
2656   this->max_grf = intel->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
2657
2658   this->uniforms = 0;
2659}
2660
2661vec4_visitor::~vec4_visitor()
2662{
2663   ralloc_free(this->mem_ctx);
2664   hash_table_dtor(this->variable_ht);
2665}
2666
2667
2668void
2669vec4_visitor::fail(const char *format, ...)
2670{
2671   va_list va;
2672   char *msg;
2673
2674   if (failed)
2675      return;
2676
2677   failed = true;
2678
2679   va_start(va, format);
2680   msg = ralloc_vasprintf(mem_ctx, format, va);
2681   va_end(va);
2682   msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg);
2683
2684   this->fail_msg = msg;
2685
2686   if (INTEL_DEBUG & DEBUG_VS) {
2687      fprintf(stderr, "%s",  msg);
2688   }
2689}
2690
2691} /* namespace brw */
2692