brw_fs_visitor.cpp revision 2f18698220d8b27991fab550c4721590d17278e0
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/** @file brw_fs_visitor.cpp
25 *
26 * This file supports generating the FS LIR from the GLSL IR.  The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
29 */
30extern "C" {
31
32#include <sys/types.h>
33
34#include "main/macros.h"
35#include "main/shaderobj.h"
36#include "main/uniforms.h"
37#include "program/prog_parameter.h"
38#include "program/prog_print.h"
39#include "program/prog_optimize.h"
40#include "program/register_allocate.h"
41#include "program/sampler.h"
42#include "program/hash_table.h"
43#include "brw_context.h"
44#include "brw_eu.h"
45#include "brw_wm.h"
46}
47#include "brw_shader.h"
48#include "brw_fs.h"
49#include "glsl/glsl_types.h"
50#include "glsl/ir_optimization.h"
51#include "glsl/ir_print_visitor.h"
52
53void
54fs_visitor::visit(ir_variable *ir)
55{
56   fs_reg *reg = NULL;
57
58   if (variable_storage(ir))
59      return;
60
61   if (ir->mode == ir_var_in) {
62      if (!strcmp(ir->name, "gl_FragCoord")) {
63	 reg = emit_fragcoord_interpolation(ir);
64      } else if (!strcmp(ir->name, "gl_FrontFacing")) {
65	 reg = emit_frontfacing_interpolation(ir);
66      } else {
67	 reg = emit_general_interpolation(ir);
68      }
69      assert(reg);
70      hash_table_insert(this->variable_ht, reg, ir);
71      return;
72   } else if (ir->mode == ir_var_out) {
73      reg = new(this->mem_ctx) fs_reg(this, ir->type);
74
75      if (ir->index > 0) {
76	 assert(ir->location == FRAG_RESULT_DATA0);
77	 assert(ir->index == 1);
78	 this->dual_src_output = *reg;
79      } else if (ir->location == FRAG_RESULT_COLOR) {
80	 /* Writing gl_FragColor outputs to all color regions. */
81	 for (unsigned int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) {
82	    this->outputs[i] = *reg;
83	    this->output_components[i] = 4;
84	 }
85      } else if (ir->location == FRAG_RESULT_DEPTH) {
86	 this->frag_depth = ir;
87      } else {
88	 /* gl_FragData or a user-defined FS output */
89	 assert(ir->location >= FRAG_RESULT_DATA0 &&
90		ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
91
92	 int vector_elements =
93	    ir->type->is_array() ? ir->type->fields.array->vector_elements
94				 : ir->type->vector_elements;
95
96	 /* General color output. */
97	 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) {
98	    int output = ir->location - FRAG_RESULT_DATA0 + i;
99	    this->outputs[output] = *reg;
100	    this->outputs[output].reg_offset += vector_elements * i;
101	    this->output_components[output] = vector_elements;
102	 }
103      }
104   } else if (ir->mode == ir_var_uniform) {
105      int param_index = c->prog_data.nr_params;
106
107      if (c->dispatch_width == 16) {
108	 if (!variable_storage(ir)) {
109	    fail("Failed to find uniform '%s' in 16-wide\n", ir->name);
110	 }
111	 return;
112      }
113
114      if (!strncmp(ir->name, "gl_", 3)) {
115	 setup_builtin_uniform_values(ir);
116      } else {
117	 setup_uniform_values(ir->location, ir->type);
118      }
119
120      reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
121      reg->type = brw_type_for_base_type(ir->type);
122   }
123
124   if (!reg)
125      reg = new(this->mem_ctx) fs_reg(this, ir->type);
126
127   hash_table_insert(this->variable_ht, reg, ir);
128}
129
130void
131fs_visitor::visit(ir_dereference_variable *ir)
132{
133   fs_reg *reg = variable_storage(ir->var);
134   this->result = *reg;
135}
136
137void
138fs_visitor::visit(ir_dereference_record *ir)
139{
140   const glsl_type *struct_type = ir->record->type;
141
142   ir->record->accept(this);
143
144   unsigned int offset = 0;
145   for (unsigned int i = 0; i < struct_type->length; i++) {
146      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
147	 break;
148      offset += type_size(struct_type->fields.structure[i].type);
149   }
150   this->result.reg_offset += offset;
151   this->result.type = brw_type_for_base_type(ir->type);
152}
153
154void
155fs_visitor::visit(ir_dereference_array *ir)
156{
157   ir_constant *index;
158   int element_size;
159
160   ir->array->accept(this);
161   index = ir->array_index->as_constant();
162
163   element_size = type_size(ir->type);
164   this->result.type = brw_type_for_base_type(ir->type);
165
166   if (index) {
167      assert(this->result.file == UNIFORM || this->result.file == GRF);
168      this->result.reg_offset += index->value.i[0] * element_size;
169   } else {
170      assert(!"FINISHME: non-constant array element");
171   }
172}
173
174/* Instruction selection: Produce a MOV.sat instead of
175 * MIN(MAX(val, 0), 1) when possible.
176 */
177bool
178fs_visitor::try_emit_saturate(ir_expression *ir)
179{
180   ir_rvalue *sat_val = ir->as_rvalue_to_saturate();
181
182   if (!sat_val)
183      return false;
184
185   fs_inst *pre_inst = (fs_inst *) this->instructions.get_tail();
186
187   sat_val->accept(this);
188   fs_reg src = this->result;
189
190   fs_inst *last_inst = (fs_inst *) this->instructions.get_tail();
191
192   /* If the last instruction from our accept() didn't generate our
193    * src, generate a saturated MOV
194    */
195   fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src);
196   if (!modify || modify->regs_written() != 1) {
197      fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src);
198      inst->saturate = true;
199   } else {
200      modify->saturate = true;
201      this->result = src;
202   }
203
204
205   return true;
206}
207
208bool
209fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
210{
211   /* 3-src instructions were introduced in gen6. */
212   if (intel->gen < 6)
213      return false;
214
215   /* MAD can only handle floating-point data. */
216   if (ir->type != glsl_type::float_type)
217      return false;
218
219   ir_rvalue *nonmul = ir->operands[1 - mul_arg];
220   ir_expression *mul = ir->operands[mul_arg]->as_expression();
221
222   if (!mul || mul->operation != ir_binop_mul)
223      return false;
224
225   if (nonmul->as_constant() ||
226       mul->operands[0]->as_constant() ||
227       mul->operands[1]->as_constant())
228      return false;
229
230   nonmul->accept(this);
231   fs_reg src0 = this->result;
232
233   mul->operands[0]->accept(this);
234   fs_reg src1 = this->result;
235
236   mul->operands[1]->accept(this);
237   fs_reg src2 = this->result;
238
239   this->result = fs_reg(this, ir->type);
240   emit(BRW_OPCODE_MAD, this->result, src0, src1, src2);
241
242   return true;
243}
244
245void
246fs_visitor::visit(ir_expression *ir)
247{
248   unsigned int operand;
249   fs_reg op[2], temp;
250   fs_inst *inst;
251
252   assert(ir->get_num_operands() <= 2);
253
254   if (try_emit_saturate(ir))
255      return;
256   if (ir->operation == ir_binop_add) {
257      if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1))
258	 return;
259   }
260
261   for (operand = 0; operand < ir->get_num_operands(); operand++) {
262      ir->operands[operand]->accept(this);
263      if (this->result.file == BAD_FILE) {
264	 ir_print_visitor v;
265	 fail("Failed to get tree for expression operand:\n");
266	 ir->operands[operand]->accept(&v);
267      }
268      op[operand] = this->result;
269
270      /* Matrix expression operands should have been broken down to vector
271       * operations already.
272       */
273      assert(!ir->operands[operand]->type->is_matrix());
274      /* And then those vector operands should have been broken down to scalar.
275       */
276      assert(!ir->operands[operand]->type->is_vector());
277   }
278
279   /* Storage for our result.  If our result goes into an assignment, it will
280    * just get copy-propagated out, so no worries.
281    */
282   this->result = fs_reg(this, ir->type);
283
284   switch (ir->operation) {
285   case ir_unop_logic_not:
286      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
287       * ones complement of the whole register, not just bit 0.
288       */
289      emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1));
290      break;
291   case ir_unop_neg:
292      op[0].negate = !op[0].negate;
293      this->result = op[0];
294      break;
295   case ir_unop_abs:
296      op[0].abs = true;
297      op[0].negate = false;
298      this->result = op[0];
299      break;
300   case ir_unop_sign:
301      temp = fs_reg(this, ir->type);
302
303      emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f));
304
305      inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
306      inst->conditional_mod = BRW_CONDITIONAL_G;
307      inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f));
308      inst->predicated = true;
309
310      inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
311      inst->conditional_mod = BRW_CONDITIONAL_L;
312      inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f));
313      inst->predicated = true;
314
315      break;
316   case ir_unop_rcp:
317      emit_math(SHADER_OPCODE_RCP, this->result, op[0]);
318      break;
319
320   case ir_unop_exp2:
321      emit_math(SHADER_OPCODE_EXP2, this->result, op[0]);
322      break;
323   case ir_unop_log2:
324      emit_math(SHADER_OPCODE_LOG2, this->result, op[0]);
325      break;
326   case ir_unop_exp:
327   case ir_unop_log:
328      assert(!"not reached: should be handled by ir_explog_to_explog2");
329      break;
330   case ir_unop_sin:
331   case ir_unop_sin_reduced:
332      emit_math(SHADER_OPCODE_SIN, this->result, op[0]);
333      break;
334   case ir_unop_cos:
335   case ir_unop_cos_reduced:
336      emit_math(SHADER_OPCODE_COS, this->result, op[0]);
337      break;
338
339   case ir_unop_dFdx:
340      emit(FS_OPCODE_DDX, this->result, op[0]);
341      break;
342   case ir_unop_dFdy:
343      emit(FS_OPCODE_DDY, this->result, op[0]);
344      break;
345
346   case ir_binop_add:
347      emit(BRW_OPCODE_ADD, this->result, op[0], op[1]);
348      break;
349   case ir_binop_sub:
350      assert(!"not reached: should be handled by ir_sub_to_add_neg");
351      break;
352
353   case ir_binop_mul:
354      if (ir->type->is_integer()) {
355	 /* For integer multiplication, the MUL uses the low 16 bits
356	  * of one of the operands (src0 on gen6, src1 on gen7).  The
357	  * MACH accumulates in the contribution of the upper 16 bits
358	  * of that operand.
359	  *
360	  * FINISHME: Emit just the MUL if we know an operand is small
361	  * enough.
362	  */
363	 if (intel->gen >= 7 && c->dispatch_width == 16)
364	    fail("16-wide explicit accumulator operands unsupported\n");
365
366	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
367
368	 emit(BRW_OPCODE_MUL, acc, op[0], op[1]);
369	 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]);
370	 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc));
371      } else {
372	 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]);
373      }
374      break;
375   case ir_binop_div:
376      if (intel->gen >= 7 && c->dispatch_width == 16)
377	 fail("16-wide INTDIV unsupported\n");
378
379      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
380      assert(ir->type->is_integer());
381      emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]);
382      break;
383   case ir_binop_mod:
384      if (intel->gen >= 7 && c->dispatch_width == 16)
385	 fail("16-wide INTDIV unsupported\n");
386
387      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
388      assert(ir->type->is_integer());
389      emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]);
390      break;
391
392   case ir_binop_less:
393   case ir_binop_greater:
394   case ir_binop_lequal:
395   case ir_binop_gequal:
396   case ir_binop_equal:
397   case ir_binop_all_equal:
398   case ir_binop_nequal:
399   case ir_binop_any_nequal:
400      temp = this->result;
401      /* original gen4 does implicit conversion before comparison. */
402      if (intel->gen < 5)
403	 temp.type = op[0].type;
404
405      resolve_ud_negate(&op[0]);
406      resolve_ud_negate(&op[1]);
407
408      resolve_bool_comparison(ir->operands[0], &op[0]);
409      resolve_bool_comparison(ir->operands[1], &op[1]);
410
411      inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]);
412      inst->conditional_mod = brw_conditional_for_comparison(ir->operation);
413      break;
414
415   case ir_binop_logic_xor:
416      emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
417      break;
418
419   case ir_binop_logic_or:
420      emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
421      break;
422
423   case ir_binop_logic_and:
424      emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
425      break;
426
427   case ir_binop_dot:
428   case ir_unop_any:
429      assert(!"not reached: should be handled by brw_fs_channel_expressions");
430      break;
431
432   case ir_unop_noise:
433      assert(!"not reached: should be handled by lower_noise");
434      break;
435
436   case ir_quadop_vector:
437      assert(!"not reached: should be handled by lower_quadop_vector");
438      break;
439
440   case ir_unop_sqrt:
441      emit_math(SHADER_OPCODE_SQRT, this->result, op[0]);
442      break;
443
444   case ir_unop_rsq:
445      emit_math(SHADER_OPCODE_RSQ, this->result, op[0]);
446      break;
447
448   case ir_unop_i2u:
449      op[0].type = BRW_REGISTER_TYPE_UD;
450      this->result = op[0];
451      break;
452   case ir_unop_u2i:
453      op[0].type = BRW_REGISTER_TYPE_D;
454      this->result = op[0];
455      break;
456   case ir_unop_i2f:
457   case ir_unop_u2f:
458   case ir_unop_f2i:
459      emit(BRW_OPCODE_MOV, this->result, op[0]);
460      break;
461
462   case ir_unop_b2i:
463      inst = emit(BRW_OPCODE_AND, this->result, op[0], fs_reg(1));
464      break;
465   case ir_unop_b2f:
466      temp = fs_reg(this, glsl_type::int_type);
467      emit(BRW_OPCODE_AND, temp, op[0], fs_reg(1));
468      emit(BRW_OPCODE_MOV, this->result, temp);
469      break;
470
471   case ir_unop_f2b:
472   case ir_unop_i2b:
473      temp = this->result;
474      /* original gen4 does implicit conversion before comparison. */
475      if (intel->gen < 5)
476	 temp.type = op[0].type;
477
478      resolve_ud_negate(&op[0]);
479
480      inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f));
481      inst->conditional_mod = BRW_CONDITIONAL_NZ;
482      break;
483
484   case ir_unop_trunc:
485      emit(BRW_OPCODE_RNDZ, this->result, op[0]);
486      break;
487   case ir_unop_ceil:
488      op[0].negate = !op[0].negate;
489      inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
490      this->result.negate = true;
491      break;
492   case ir_unop_floor:
493      inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
494      break;
495   case ir_unop_fract:
496      inst = emit(BRW_OPCODE_FRC, this->result, op[0]);
497      break;
498   case ir_unop_round_even:
499      emit(BRW_OPCODE_RNDE, this->result, op[0]);
500      break;
501
502   case ir_binop_min:
503      resolve_ud_negate(&op[0]);
504      resolve_ud_negate(&op[1]);
505
506      if (intel->gen >= 6) {
507	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
508	 inst->conditional_mod = BRW_CONDITIONAL_L;
509      } else {
510	 /* Unalias the destination */
511	 this->result = fs_reg(this, ir->type);
512
513	 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
514	 inst->conditional_mod = BRW_CONDITIONAL_L;
515
516	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
517	 inst->predicated = true;
518      }
519      break;
520   case ir_binop_max:
521      resolve_ud_negate(&op[0]);
522      resolve_ud_negate(&op[1]);
523
524      if (intel->gen >= 6) {
525	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
526	 inst->conditional_mod = BRW_CONDITIONAL_GE;
527      } else {
528	 /* Unalias the destination */
529	 this->result = fs_reg(this, ir->type);
530
531	 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
532	 inst->conditional_mod = BRW_CONDITIONAL_G;
533
534	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
535	 inst->predicated = true;
536      }
537      break;
538
539   case ir_binop_pow:
540      emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]);
541      break;
542
543   case ir_unop_bit_not:
544      inst = emit(BRW_OPCODE_NOT, this->result, op[0]);
545      break;
546   case ir_binop_bit_and:
547      inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
548      break;
549   case ir_binop_bit_xor:
550      inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
551      break;
552   case ir_binop_bit_or:
553      inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
554      break;
555
556   case ir_binop_lshift:
557      inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]);
558      break;
559
560   case ir_binop_rshift:
561      if (ir->type->base_type == GLSL_TYPE_INT)
562	 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]);
563      else
564	 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]);
565      break;
566   }
567}
568
569void
570fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
571				   const glsl_type *type, bool predicated)
572{
573   switch (type->base_type) {
574   case GLSL_TYPE_FLOAT:
575   case GLSL_TYPE_UINT:
576   case GLSL_TYPE_INT:
577   case GLSL_TYPE_BOOL:
578      for (unsigned int i = 0; i < type->components(); i++) {
579	 l.type = brw_type_for_base_type(type);
580	 r.type = brw_type_for_base_type(type);
581
582	 if (predicated || !l.equals(r)) {
583	    fs_inst *inst = emit(BRW_OPCODE_MOV, l, r);
584	    inst->predicated = predicated;
585	 }
586
587	 l.reg_offset++;
588	 r.reg_offset++;
589      }
590      break;
591   case GLSL_TYPE_ARRAY:
592      for (unsigned int i = 0; i < type->length; i++) {
593	 emit_assignment_writes(l, r, type->fields.array, predicated);
594      }
595      break;
596
597   case GLSL_TYPE_STRUCT:
598      for (unsigned int i = 0; i < type->length; i++) {
599	 emit_assignment_writes(l, r, type->fields.structure[i].type,
600				predicated);
601      }
602      break;
603
604   case GLSL_TYPE_SAMPLER:
605      break;
606
607   default:
608      assert(!"not reached");
609      break;
610   }
611}
612
613/* If the RHS processing resulted in an instruction generating a
614 * temporary value, and it would be easy to rewrite the instruction to
615 * generate its result right into the LHS instead, do so.  This ends
616 * up reliably removing instructions where it can be tricky to do so
617 * later without real UD chain information.
618 */
619bool
620fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
621                                   fs_reg dst,
622                                   fs_reg src,
623                                   fs_inst *pre_rhs_inst,
624                                   fs_inst *last_rhs_inst)
625{
626   /* Only attempt if we're doing a direct assignment. */
627   if (ir->condition ||
628       !(ir->lhs->type->is_scalar() ||
629        (ir->lhs->type->is_vector() &&
630         ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1)))
631      return false;
632
633   /* Make sure the last instruction generated our source reg. */
634   fs_inst *modify = get_instruction_generating_reg(pre_rhs_inst,
635						    last_rhs_inst,
636						    src);
637   if (!modify)
638      return false;
639
640   /* If last_rhs_inst wrote a different number of components than our LHS,
641    * we can't safely rewrite it.
642    */
643   if (ir->lhs->type->vector_elements != modify->regs_written())
644      return false;
645
646   /* Success!  Rewrite the instruction. */
647   modify->dst = dst;
648
649   return true;
650}
651
652void
653fs_visitor::visit(ir_assignment *ir)
654{
655   fs_reg l, r;
656   fs_inst *inst;
657
658   /* FINISHME: arrays on the lhs */
659   ir->lhs->accept(this);
660   l = this->result;
661
662   fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail();
663
664   ir->rhs->accept(this);
665   r = this->result;
666
667   fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail();
668
669   assert(l.file != BAD_FILE);
670   assert(r.file != BAD_FILE);
671
672   if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst))
673      return;
674
675   if (ir->condition) {
676      emit_bool_to_cond_code(ir->condition);
677   }
678
679   if (ir->lhs->type->is_scalar() ||
680       ir->lhs->type->is_vector()) {
681      for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
682	 if (ir->write_mask & (1 << i)) {
683	    inst = emit(BRW_OPCODE_MOV, l, r);
684	    if (ir->condition)
685	       inst->predicated = true;
686	    r.reg_offset++;
687	 }
688	 l.reg_offset++;
689      }
690   } else {
691      emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
692   }
693}
694
695fs_inst *
696fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate,
697			      int sampler)
698{
699   int mlen;
700   int base_mrf = 1;
701   bool simd16 = false;
702   fs_reg orig_dst;
703
704   /* g0 header. */
705   mlen = 1;
706
707   if (ir->shadow_comparitor && ir->op != ir_txd) {
708      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
709	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
710	 coordinate.reg_offset++;
711      }
712      /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
713      mlen += 3;
714
715      if (ir->op == ir_tex) {
716	 /* There's no plain shadow compare message, so we use shadow
717	  * compare with a bias of 0.0.
718	  */
719	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f));
720	 mlen++;
721      } else if (ir->op == ir_txb) {
722	 ir->lod_info.bias->accept(this);
723	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
724	 mlen++;
725      } else {
726	 assert(ir->op == ir_txl);
727	 ir->lod_info.lod->accept(this);
728	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
729	 mlen++;
730      }
731
732      ir->shadow_comparitor->accept(this);
733      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
734      mlen++;
735   } else if (ir->op == ir_tex) {
736      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
737	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
738	 coordinate.reg_offset++;
739      }
740      /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
741      mlen += 3;
742   } else if (ir->op == ir_txd) {
743      ir->lod_info.grad.dPdx->accept(this);
744      fs_reg dPdx = this->result;
745
746      ir->lod_info.grad.dPdy->accept(this);
747      fs_reg dPdy = this->result;
748
749      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
750	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
751	 coordinate.reg_offset++;
752      }
753      /* the slots for u and v are always present, but r is optional */
754      mlen += MAX2(ir->coordinate->type->vector_elements, 2);
755
756      /*  P   = u, v, r
757       * dPdx = dudx, dvdx, drdx
758       * dPdy = dudy, dvdy, drdy
759       *
760       * 1-arg: Does not exist.
761       *
762       * 2-arg: dudx   dvdx   dudy   dvdy
763       *        dPdx.x dPdx.y dPdy.x dPdy.y
764       *        m4     m5     m6     m7
765       *
766       * 3-arg: dudx   dvdx   drdx   dudy   dvdy   drdy
767       *        dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
768       *        m5     m6     m7     m8     m9     m10
769       */
770      for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
771	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
772	 dPdx.reg_offset++;
773      }
774      mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2);
775
776      for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) {
777	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
778	 dPdy.reg_offset++;
779      }
780      mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2);
781   } else if (ir->op == ir_txs) {
782      /* There's no SIMD8 resinfo message on Gen4.  Use SIMD16 instead. */
783      simd16 = true;
784      ir->lod_info.lod->accept(this);
785      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
786      mlen += 2;
787   } else {
788      /* Oh joy.  gen4 doesn't have SIMD8 non-shadow-compare bias/lod
789       * instructions.  We'll need to do SIMD16 here.
790       */
791      simd16 = true;
792      assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf);
793
794      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
795	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type),
796	      coordinate);
797	 coordinate.reg_offset++;
798      }
799
800      /* Initialize the rest of u/v/r with 0.0.  Empirically, this seems to
801       * be necessary for TXF (ld), but seems wise to do for all messages.
802       */
803      for (int i = ir->coordinate->type->vector_elements; i < 3; i++) {
804	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f));
805      }
806
807      /* lod/bias appears after u/v/r. */
808      mlen += 6;
809
810      if (ir->op == ir_txb) {
811	 ir->lod_info.bias->accept(this);
812	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
813	 mlen++;
814      } else {
815	 ir->lod_info.lod->accept(this);
816	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type),
817			      this->result);
818	 mlen++;
819      }
820
821      /* The unused upper half. */
822      mlen++;
823   }
824
825   if (simd16) {
826      /* Now, since we're doing simd16, the return is 2 interleaved
827       * vec4s where the odd-indexed ones are junk. We'll need to move
828       * this weirdness around to the expected layout.
829       */
830      orig_dst = dst;
831      const glsl_type *vec_type =
832	 glsl_type::get_instance(ir->type->base_type, 4, 1);
833      dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2));
834      dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type)
835			       : BRW_REGISTER_TYPE_F;
836   }
837
838   fs_inst *inst = NULL;
839   switch (ir->op) {
840   case ir_tex:
841      inst = emit(SHADER_OPCODE_TEX, dst);
842      break;
843   case ir_txb:
844      inst = emit(FS_OPCODE_TXB, dst);
845      break;
846   case ir_txl:
847      inst = emit(SHADER_OPCODE_TXL, dst);
848      break;
849   case ir_txd:
850      inst = emit(SHADER_OPCODE_TXD, dst);
851      break;
852   case ir_txs:
853      inst = emit(SHADER_OPCODE_TXS, dst);
854      break;
855   case ir_txf:
856      inst = emit(SHADER_OPCODE_TXF, dst);
857      break;
858   }
859   inst->base_mrf = base_mrf;
860   inst->mlen = mlen;
861   inst->header_present = true;
862
863   if (simd16) {
864      for (int i = 0; i < 4; i++) {
865	 emit(BRW_OPCODE_MOV, orig_dst, dst);
866	 orig_dst.reg_offset++;
867	 dst.reg_offset += 2;
868      }
869   }
870
871   return inst;
872}
873
874/* gen5's sampler has slots for u, v, r, array index, then optional
875 * parameters like shadow comparitor or LOD bias.  If optional
876 * parameters aren't present, those base slots are optional and don't
877 * need to be included in the message.
878 *
879 * We don't fill in the unnecessary slots regardless, which may look
880 * surprising in the disassembly.
881 */
882fs_inst *
883fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate,
884			      int sampler)
885{
886   int mlen = 0;
887   int base_mrf = 2;
888   int reg_width = c->dispatch_width / 8;
889   bool header_present = false;
890   const int vector_elements =
891      ir->coordinate ? ir->coordinate->type->vector_elements : 0;
892
893   if (ir->offset != NULL && ir->op == ir_txf) {
894      /* It appears that the ld instruction used for txf does its
895       * address bounds check before adding in the offset.  To work
896       * around this, just add the integer offset to the integer texel
897       * coordinate, and don't put the offset in the header.
898       */
899      ir_constant *offset = ir->offset->as_constant();
900      for (int i = 0; i < vector_elements; i++) {
901	 emit(BRW_OPCODE_ADD,
902	      fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
903	      coordinate,
904	      offset->value.i[i]);
905	 coordinate.reg_offset++;
906      }
907   } else {
908      if (ir->offset) {
909	 /* The offsets set up by the ir_texture visitor are in the
910	  * m1 header, so we can't go headerless.
911	  */
912	 header_present = true;
913	 mlen++;
914	 base_mrf--;
915      }
916
917      for (int i = 0; i < vector_elements; i++) {
918	 emit(BRW_OPCODE_MOV,
919	      fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
920	      coordinate);
921	 coordinate.reg_offset++;
922      }
923   }
924   mlen += vector_elements * reg_width;
925
926   if (ir->shadow_comparitor && ir->op != ir_txd) {
927      mlen = MAX2(mlen, header_present + 4 * reg_width);
928
929      ir->shadow_comparitor->accept(this);
930      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
931      mlen += reg_width;
932   }
933
934   fs_inst *inst = NULL;
935   switch (ir->op) {
936   case ir_tex:
937      inst = emit(SHADER_OPCODE_TEX, dst);
938      break;
939   case ir_txb:
940      ir->lod_info.bias->accept(this);
941      mlen = MAX2(mlen, header_present + 4 * reg_width);
942      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
943      mlen += reg_width;
944
945      inst = emit(FS_OPCODE_TXB, dst);
946
947      break;
948   case ir_txl:
949      ir->lod_info.lod->accept(this);
950      mlen = MAX2(mlen, header_present + 4 * reg_width);
951      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
952      mlen += reg_width;
953
954      inst = emit(SHADER_OPCODE_TXL, dst);
955      break;
956   case ir_txd: {
957      ir->lod_info.grad.dPdx->accept(this);
958      fs_reg dPdx = this->result;
959
960      ir->lod_info.grad.dPdy->accept(this);
961      fs_reg dPdy = this->result;
962
963      mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */
964
965      /**
966       *  P   =  u,    v,    r
967       * dPdx = dudx, dvdx, drdx
968       * dPdy = dudy, dvdy, drdy
969       *
970       * Load up these values:
971       * - dudx   dudy   dvdx   dvdy   drdx   drdy
972       * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
973       */
974      for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
975	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
976	 dPdx.reg_offset++;
977	 mlen += reg_width;
978
979	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
980	 dPdy.reg_offset++;
981	 mlen += reg_width;
982      }
983
984      inst = emit(SHADER_OPCODE_TXD, dst);
985      break;
986   }
987   case ir_txs:
988      ir->lod_info.lod->accept(this);
989      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
990      mlen += reg_width;
991      inst = emit(SHADER_OPCODE_TXS, dst);
992      break;
993   case ir_txf:
994      mlen = header_present + 4 * reg_width;
995
996      ir->lod_info.lod->accept(this);
997      emit(BRW_OPCODE_MOV,
998	   fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD),
999	   this->result);
1000      inst = emit(SHADER_OPCODE_TXF, dst);
1001      break;
1002   }
1003   inst->base_mrf = base_mrf;
1004   inst->mlen = mlen;
1005   inst->header_present = header_present;
1006
1007   if (mlen > 11) {
1008      fail("Message length >11 disallowed by hardware\n");
1009   }
1010
1011   return inst;
1012}
1013
1014fs_inst *
1015fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate,
1016			      int sampler)
1017{
1018   int mlen = 0;
1019   int base_mrf = 2;
1020   int reg_width = c->dispatch_width / 8;
1021   bool header_present = false;
1022   int offsets[3];
1023
1024   if (ir->offset && ir->op != ir_txf) {
1025      /* The offsets set up by the ir_texture visitor are in the
1026       * m1 header, so we can't go headerless.
1027       */
1028      header_present = true;
1029      mlen++;
1030      base_mrf--;
1031   }
1032
1033   if (ir->shadow_comparitor && ir->op != ir_txd) {
1034      ir->shadow_comparitor->accept(this);
1035      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1036      mlen += reg_width;
1037   }
1038
1039   /* Set up the LOD info */
1040   switch (ir->op) {
1041   case ir_tex:
1042      break;
1043   case ir_txb:
1044      ir->lod_info.bias->accept(this);
1045      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1046      mlen += reg_width;
1047      break;
1048   case ir_txl:
1049      ir->lod_info.lod->accept(this);
1050      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1051      mlen += reg_width;
1052      break;
1053   case ir_txd: {
1054      if (c->dispatch_width == 16)
1055	 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
1056
1057      ir->lod_info.grad.dPdx->accept(this);
1058      fs_reg dPdx = this->result;
1059
1060      ir->lod_info.grad.dPdy->accept(this);
1061      fs_reg dPdy = this->result;
1062
1063      /* Load dPdx and the coordinate together:
1064       * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
1065       */
1066      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1067	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
1068	 coordinate.reg_offset++;
1069	 mlen += reg_width;
1070
1071	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
1072	 dPdx.reg_offset++;
1073	 mlen += reg_width;
1074
1075	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
1076	 dPdy.reg_offset++;
1077	 mlen += reg_width;
1078      }
1079      break;
1080   }
1081   case ir_txs:
1082      ir->lod_info.lod->accept(this);
1083      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
1084      mlen += reg_width;
1085      break;
1086   case ir_txf:
1087      /* It appears that the ld instruction used for txf does its
1088       * address bounds check before adding in the offset.  To work
1089       * around this, just add the integer offset to the integer texel
1090       * coordinate, and don't put the offset in the header.
1091       */
1092      if (ir->offset) {
1093	 ir_constant *offset = ir->offset->as_constant();
1094	 offsets[0] = offset->value.i[0];
1095	 offsets[1] = offset->value.i[1];
1096	 offsets[2] = offset->value.i[2];
1097      } else {
1098	 memset(offsets, 0, sizeof(offsets));
1099      }
1100
1101      /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */
1102      emit(BRW_OPCODE_ADD,
1103	   fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[0]);
1104      coordinate.reg_offset++;
1105      mlen += reg_width;
1106
1107      ir->lod_info.lod->accept(this);
1108      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result);
1109      mlen += reg_width;
1110
1111      for (int i = 1; i < ir->coordinate->type->vector_elements; i++) {
1112	 emit(BRW_OPCODE_ADD,
1113	      fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[i]);
1114	 coordinate.reg_offset++;
1115	 mlen += reg_width;
1116      }
1117      break;
1118   }
1119
1120   /* Set up the coordinate (except for cases where it was done above) */
1121   if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) {
1122      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1123	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
1124	 coordinate.reg_offset++;
1125	 mlen += reg_width;
1126      }
1127   }
1128
1129   /* Generate the SEND */
1130   fs_inst *inst = NULL;
1131   switch (ir->op) {
1132   case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break;
1133   case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break;
1134   case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break;
1135   case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break;
1136   case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break;
1137   case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break;
1138   }
1139   inst->base_mrf = base_mrf;
1140   inst->mlen = mlen;
1141   inst->header_present = header_present;
1142
1143   if (mlen > 11) {
1144      fail("Message length >11 disallowed by hardware\n");
1145   }
1146
1147   return inst;
1148}
1149
1150void
1151fs_visitor::visit(ir_texture *ir)
1152{
1153   fs_inst *inst = NULL;
1154
1155   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base);
1156   sampler = fp->Base.SamplerUnits[sampler];
1157
1158   /* Our hardware doesn't have a sample_d_c message, so shadow compares
1159    * for textureGrad/TXD need to be emulated with instructions.
1160    */
1161   bool hw_compare_supported = ir->op != ir_txd;
1162   if (ir->shadow_comparitor && !hw_compare_supported) {
1163      assert(c->key.tex.compare_funcs[sampler] != GL_NONE);
1164      /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */
1165      if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS)
1166	 return swizzle_result(ir, fs_reg(1.0f), sampler);
1167      else if (c->key.tex.compare_funcs[sampler] == GL_NEVER)
1168	 return swizzle_result(ir, fs_reg(0.0f), sampler);
1169   }
1170
1171   if (ir->coordinate)
1172      ir->coordinate->accept(this);
1173   fs_reg coordinate = this->result;
1174
1175   if (ir->offset != NULL && !(intel->gen == 7 && ir->op == ir_txf)) {
1176      uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant());
1177
1178      /* Explicitly set up the message header by copying g0 to msg reg m1. */
1179      emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD),
1180	   fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)));
1181
1182      /* Then set the offset bits in DWord 2 of the message header. */
1183      emit(BRW_OPCODE_MOV,
1184	   fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2),
1185			 BRW_REGISTER_TYPE_UD)),
1186	   fs_reg(brw_imm_uw(offset_bits)));
1187   }
1188
1189   /* Should be lowered by do_lower_texture_projection */
1190   assert(!ir->projector);
1191
1192   bool needs_gl_clamp = true;
1193
1194   fs_reg scale_x, scale_y;
1195
1196   /* The 965 requires the EU to do the normalization of GL rectangle
1197    * texture coordinates.  We use the program parameter state
1198    * tracking to get the scaling factor.
1199    */
1200   if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT &&
1201       (intel->gen < 6 ||
1202	(intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) ||
1203			     c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) {
1204      struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
1205      int tokens[STATE_LENGTH] = {
1206	 STATE_INTERNAL,
1207	 STATE_TEXRECT_SCALE,
1208	 sampler,
1209	 0,
1210	 0
1211      };
1212
1213      if (c->dispatch_width == 16) {
1214	 fail("rectangle scale uniform setup not supported on 16-wide\n");
1215	 this->result = fs_reg(this, ir->type);
1216	 return;
1217      }
1218
1219      c->prog_data.param_convert[c->prog_data.nr_params] =
1220	 PARAM_NO_CONVERT;
1221      c->prog_data.param_convert[c->prog_data.nr_params + 1] =
1222	 PARAM_NO_CONVERT;
1223
1224      scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
1225      scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
1226
1227      GLuint index = _mesa_add_state_reference(params,
1228					       (gl_state_index *)tokens);
1229
1230      this->param_index[c->prog_data.nr_params] = index;
1231      this->param_offset[c->prog_data.nr_params] = 0;
1232      c->prog_data.nr_params++;
1233      this->param_index[c->prog_data.nr_params] = index;
1234      this->param_offset[c->prog_data.nr_params] = 1;
1235      c->prog_data.nr_params++;
1236   }
1237
1238   /* The 965 requires the EU to do the normalization of GL rectangle
1239    * texture coordinates.  We use the program parameter state
1240    * tracking to get the scaling factor.
1241    */
1242   if (intel->gen < 6 &&
1243       ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1244      fs_reg dst = fs_reg(this, ir->coordinate->type);
1245      fs_reg src = coordinate;
1246      coordinate = dst;
1247
1248      emit(BRW_OPCODE_MUL, dst, src, scale_x);
1249      dst.reg_offset++;
1250      src.reg_offset++;
1251      emit(BRW_OPCODE_MUL, dst, src, scale_y);
1252   } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1253      /* On gen6+, the sampler handles the rectangle coordinates
1254       * natively, without needing rescaling.  But that means we have
1255       * to do GL_CLAMP clamping at the [0, width], [0, height] scale,
1256       * not [0, 1] like the default case below.
1257       */
1258      needs_gl_clamp = false;
1259
1260      for (int i = 0; i < 2; i++) {
1261	 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) {
1262	    fs_reg chan = coordinate;
1263	    chan.reg_offset += i;
1264
1265	    inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0));
1266	    inst->conditional_mod = BRW_CONDITIONAL_G;
1267
1268	    /* Our parameter comes in as 1.0/width or 1.0/height,
1269	     * because that's what people normally want for doing
1270	     * texture rectangle handling.  We need width or height
1271	     * for clamping, but we don't care enough to make a new
1272	     * parameter type, so just invert back.
1273	     */
1274	    fs_reg limit = fs_reg(this, glsl_type::float_type);
1275	    emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y);
1276	    emit(SHADER_OPCODE_RCP, limit, limit);
1277
1278	    inst = emit(BRW_OPCODE_SEL, chan, chan, limit);
1279	    inst->conditional_mod = BRW_CONDITIONAL_L;
1280	 }
1281      }
1282   }
1283
1284   if (ir->coordinate && needs_gl_clamp) {
1285      for (unsigned int i = 0;
1286	   i < MIN2(ir->coordinate->type->vector_elements, 3); i++) {
1287	 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) {
1288	    fs_reg chan = coordinate;
1289	    chan.reg_offset += i;
1290
1291	    fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan);
1292	    inst->saturate = true;
1293	 }
1294      }
1295   }
1296
1297   /* Writemasking doesn't eliminate channels on SIMD8 texture
1298    * samples, so don't worry about them.
1299    */
1300   fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1));
1301
1302   if (intel->gen >= 7) {
1303      inst = emit_texture_gen7(ir, dst, coordinate, sampler);
1304   } else if (intel->gen >= 5) {
1305      inst = emit_texture_gen5(ir, dst, coordinate, sampler);
1306   } else {
1307      inst = emit_texture_gen4(ir, dst, coordinate, sampler);
1308   }
1309
1310   /* If there's an offset, we already set up m1.  To avoid the implied move,
1311    * use the null register.  Otherwise, we want an implied move from g0.
1312    */
1313   if (ir->offset != NULL || !inst->header_present)
1314      inst->src[0] = reg_undef;
1315   else
1316      inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW));
1317
1318   inst->sampler = sampler;
1319
1320   if (ir->shadow_comparitor) {
1321      if (hw_compare_supported) {
1322	 inst->shadow_compare = true;
1323      } else {
1324	 ir->shadow_comparitor->accept(this);
1325	 fs_reg ref = this->result;
1326
1327	 fs_reg value = dst;
1328	 dst = fs_reg(this, glsl_type::vec4_type);
1329
1330	 /* FINISHME: This needs to be done pre-filtering. */
1331
1332	 uint32_t conditional = 0;
1333	 switch (c->key.tex.compare_funcs[sampler]) {
1334	 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */
1335	 case GL_LESS:     conditional = BRW_CONDITIONAL_L;   break;
1336	 case GL_GREATER:  conditional = BRW_CONDITIONAL_G;   break;
1337	 case GL_LEQUAL:   conditional = BRW_CONDITIONAL_LE;  break;
1338	 case GL_GEQUAL:   conditional = BRW_CONDITIONAL_GE;  break;
1339	 case GL_EQUAL:    conditional = BRW_CONDITIONAL_EQ;  break;
1340	 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break;
1341	 default: assert(!"Should not get here: bad shadow compare function");
1342	 }
1343
1344	 /* Use conditional moves to load 0 or 1 as the result */
1345	 this->current_annotation = "manual shadow comparison";
1346	 for (int i = 0; i < 4; i++) {
1347	    inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f));
1348
1349	    inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value);
1350	    inst->conditional_mod = conditional;
1351
1352	    inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f));
1353	    inst->predicated = true;
1354
1355	    dst.reg_offset++;
1356	    value.reg_offset++;
1357	 }
1358	 dst.reg_offset = 0;
1359      }
1360   }
1361
1362   swizzle_result(ir, dst, sampler);
1363}
1364
1365/**
1366 * Swizzle the result of a texture result.  This is necessary for
1367 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons.
1368 */
1369void
1370fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler)
1371{
1372   this->result = orig_val;
1373
1374   if (ir->op == ir_txs)
1375      return;
1376
1377   if (ir->type == glsl_type::float_type) {
1378      /* Ignore DEPTH_TEXTURE_MODE swizzling. */
1379      assert(ir->sampler->type->sampler_shadow);
1380   } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) {
1381      fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type);
1382
1383      for (int i = 0; i < 4; i++) {
1384	 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i);
1385	 fs_reg l = swizzled_result;
1386	 l.reg_offset += i;
1387
1388	 if (swiz == SWIZZLE_ZERO) {
1389	    emit(BRW_OPCODE_MOV, l, fs_reg(0.0f));
1390	 } else if (swiz == SWIZZLE_ONE) {
1391	    emit(BRW_OPCODE_MOV, l, fs_reg(1.0f));
1392	 } else {
1393	    fs_reg r = orig_val;
1394	    r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i);
1395	    emit(BRW_OPCODE_MOV, l, r);
1396	 }
1397      }
1398      this->result = swizzled_result;
1399   }
1400}
1401
1402void
1403fs_visitor::visit(ir_swizzle *ir)
1404{
1405   ir->val->accept(this);
1406   fs_reg val = this->result;
1407
1408   if (ir->type->vector_elements == 1) {
1409      this->result.reg_offset += ir->mask.x;
1410      return;
1411   }
1412
1413   fs_reg result = fs_reg(this, ir->type);
1414   this->result = result;
1415
1416   for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1417      fs_reg channel = val;
1418      int swiz = 0;
1419
1420      switch (i) {
1421      case 0:
1422	 swiz = ir->mask.x;
1423	 break;
1424      case 1:
1425	 swiz = ir->mask.y;
1426	 break;
1427      case 2:
1428	 swiz = ir->mask.z;
1429	 break;
1430      case 3:
1431	 swiz = ir->mask.w;
1432	 break;
1433      }
1434
1435      channel.reg_offset += swiz;
1436      emit(BRW_OPCODE_MOV, result, channel);
1437      result.reg_offset++;
1438   }
1439}
1440
1441void
1442fs_visitor::visit(ir_discard *ir)
1443{
1444   assert(ir->condition == NULL); /* FINISHME */
1445
1446   emit(FS_OPCODE_DISCARD);
1447   kill_emitted = true;
1448}
1449
1450void
1451fs_visitor::visit(ir_constant *ir)
1452{
1453   /* Set this->result to reg at the bottom of the function because some code
1454    * paths will cause this visitor to be applied to other fields.  This will
1455    * cause the value stored in this->result to be modified.
1456    *
1457    * Make reg constant so that it doesn't get accidentally modified along the
1458    * way.  Yes, I actually had this problem. :(
1459    */
1460   const fs_reg reg(this, ir->type);
1461   fs_reg dst_reg = reg;
1462
1463   if (ir->type->is_array()) {
1464      const unsigned size = type_size(ir->type->fields.array);
1465
1466      for (unsigned i = 0; i < ir->type->length; i++) {
1467	 ir->array_elements[i]->accept(this);
1468	 fs_reg src_reg = this->result;
1469
1470	 dst_reg.type = src_reg.type;
1471	 for (unsigned j = 0; j < size; j++) {
1472	    emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1473	    src_reg.reg_offset++;
1474	    dst_reg.reg_offset++;
1475	 }
1476      }
1477   } else if (ir->type->is_record()) {
1478      foreach_list(node, &ir->components) {
1479	 ir_constant *const field = (ir_constant *) node;
1480	 const unsigned size = type_size(field->type);
1481
1482	 field->accept(this);
1483	 fs_reg src_reg = this->result;
1484
1485	 dst_reg.type = src_reg.type;
1486	 for (unsigned j = 0; j < size; j++) {
1487	    emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1488	    src_reg.reg_offset++;
1489	    dst_reg.reg_offset++;
1490	 }
1491      }
1492   } else {
1493      const unsigned size = type_size(ir->type);
1494
1495      for (unsigned i = 0; i < size; i++) {
1496	 switch (ir->type->base_type) {
1497	 case GLSL_TYPE_FLOAT:
1498	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i]));
1499	    break;
1500	 case GLSL_TYPE_UINT:
1501	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i]));
1502	    break;
1503	 case GLSL_TYPE_INT:
1504	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i]));
1505	    break;
1506	 case GLSL_TYPE_BOOL:
1507	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i]));
1508	    break;
1509	 default:
1510	    assert(!"Non-float/uint/int/bool constant");
1511	 }
1512	 dst_reg.reg_offset++;
1513      }
1514   }
1515
1516   this->result = reg;
1517}
1518
1519void
1520fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
1521{
1522   ir_expression *expr = ir->as_expression();
1523
1524   if (expr) {
1525      fs_reg op[2];
1526      fs_inst *inst;
1527
1528      assert(expr->get_num_operands() <= 2);
1529      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1530	 assert(expr->operands[i]->type->is_scalar());
1531
1532	 expr->operands[i]->accept(this);
1533	 op[i] = this->result;
1534
1535	 resolve_ud_negate(&op[i]);
1536      }
1537
1538      switch (expr->operation) {
1539      case ir_unop_logic_not:
1540	 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1));
1541	 inst->conditional_mod = BRW_CONDITIONAL_Z;
1542	 break;
1543
1544      case ir_binop_logic_xor:
1545      case ir_binop_logic_or:
1546      case ir_binop_logic_and:
1547	 goto out;
1548
1549      case ir_unop_f2b:
1550	 if (intel->gen >= 6) {
1551	    inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f));
1552	 } else {
1553	    inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]);
1554	 }
1555	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1556	 break;
1557
1558      case ir_unop_i2b:
1559	 if (intel->gen >= 6) {
1560	    inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0));
1561	 } else {
1562	    inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]);
1563	 }
1564	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1565	 break;
1566
1567      case ir_binop_greater:
1568      case ir_binop_gequal:
1569      case ir_binop_less:
1570      case ir_binop_lequal:
1571      case ir_binop_equal:
1572      case ir_binop_all_equal:
1573      case ir_binop_nequal:
1574      case ir_binop_any_nequal:
1575	 resolve_bool_comparison(expr->operands[0], &op[0]);
1576	 resolve_bool_comparison(expr->operands[1], &op[1]);
1577
1578	 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]);
1579	 inst->conditional_mod =
1580	    brw_conditional_for_comparison(expr->operation);
1581	 break;
1582
1583      default:
1584	 assert(!"not reached");
1585	 fail("bad cond code\n");
1586	 break;
1587      }
1588      return;
1589   }
1590
1591out:
1592   ir->accept(this);
1593
1594   fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1));
1595   inst->conditional_mod = BRW_CONDITIONAL_NZ;
1596}
1597
1598/**
1599 * Emit a gen6 IF statement with the comparison folded into the IF
1600 * instruction.
1601 */
1602void
1603fs_visitor::emit_if_gen6(ir_if *ir)
1604{
1605   ir_expression *expr = ir->condition->as_expression();
1606
1607   if (expr) {
1608      fs_reg op[2];
1609      fs_inst *inst;
1610      fs_reg temp;
1611
1612      assert(expr->get_num_operands() <= 2);
1613      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1614	 assert(expr->operands[i]->type->is_scalar());
1615
1616	 expr->operands[i]->accept(this);
1617	 op[i] = this->result;
1618      }
1619
1620      switch (expr->operation) {
1621      case ir_unop_logic_not:
1622	 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0));
1623	 inst->conditional_mod = BRW_CONDITIONAL_Z;
1624	 return;
1625
1626      case ir_binop_logic_xor:
1627	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1628	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1629	 return;
1630
1631      case ir_binop_logic_or:
1632	 temp = fs_reg(this, glsl_type::bool_type);
1633	 emit(BRW_OPCODE_OR, temp, op[0], op[1]);
1634	 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1635	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1636	 return;
1637
1638      case ir_binop_logic_and:
1639	 temp = fs_reg(this, glsl_type::bool_type);
1640	 emit(BRW_OPCODE_AND, temp, op[0], op[1]);
1641	 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1642	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1643	 return;
1644
1645      case ir_unop_f2b:
1646	 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0));
1647	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1648	 return;
1649
1650      case ir_unop_i2b:
1651	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1652	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1653	 return;
1654
1655      case ir_binop_greater:
1656      case ir_binop_gequal:
1657      case ir_binop_less:
1658      case ir_binop_lequal:
1659      case ir_binop_equal:
1660      case ir_binop_all_equal:
1661      case ir_binop_nequal:
1662      case ir_binop_any_nequal:
1663	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1664	 inst->conditional_mod =
1665	    brw_conditional_for_comparison(expr->operation);
1666	 return;
1667      default:
1668	 assert(!"not reached");
1669	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1670	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1671	 fail("bad condition\n");
1672	 return;
1673      }
1674      return;
1675   }
1676
1677   ir->condition->accept(this);
1678
1679   fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0));
1680   inst->conditional_mod = BRW_CONDITIONAL_NZ;
1681}
1682
1683void
1684fs_visitor::visit(ir_if *ir)
1685{
1686   fs_inst *inst;
1687
1688   if (intel->gen < 6 && c->dispatch_width == 16) {
1689      fail("Can't support (non-uniform) control flow on 16-wide\n");
1690   }
1691
1692   /* Don't point the annotation at the if statement, because then it plus
1693    * the then and else blocks get printed.
1694    */
1695   this->base_ir = ir->condition;
1696
1697   if (intel->gen == 6) {
1698      emit_if_gen6(ir);
1699   } else {
1700      emit_bool_to_cond_code(ir->condition);
1701
1702      inst = emit(BRW_OPCODE_IF);
1703      inst->predicated = true;
1704   }
1705
1706   foreach_list(node, &ir->then_instructions) {
1707      ir_instruction *ir = (ir_instruction *)node;
1708      this->base_ir = ir;
1709
1710      ir->accept(this);
1711   }
1712
1713   if (!ir->else_instructions.is_empty()) {
1714      emit(BRW_OPCODE_ELSE);
1715
1716      foreach_list(node, &ir->else_instructions) {
1717	 ir_instruction *ir = (ir_instruction *)node;
1718	 this->base_ir = ir;
1719
1720	 ir->accept(this);
1721      }
1722   }
1723
1724   emit(BRW_OPCODE_ENDIF);
1725}
1726
1727void
1728fs_visitor::visit(ir_loop *ir)
1729{
1730   fs_reg counter = reg_undef;
1731
1732   if (intel->gen < 6 && c->dispatch_width == 16) {
1733      fail("Can't support (non-uniform) control flow on 16-wide\n");
1734   }
1735
1736   if (ir->counter) {
1737      this->base_ir = ir->counter;
1738      ir->counter->accept(this);
1739      counter = *(variable_storage(ir->counter));
1740
1741      if (ir->from) {
1742	 this->base_ir = ir->from;
1743	 ir->from->accept(this);
1744
1745	 emit(BRW_OPCODE_MOV, counter, this->result);
1746      }
1747   }
1748
1749   this->base_ir = NULL;
1750   emit(BRW_OPCODE_DO);
1751
1752   if (ir->to) {
1753      this->base_ir = ir->to;
1754      ir->to->accept(this);
1755
1756      fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result);
1757      inst->conditional_mod = brw_conditional_for_comparison(ir->cmp);
1758
1759      inst = emit(BRW_OPCODE_BREAK);
1760      inst->predicated = true;
1761   }
1762
1763   foreach_list(node, &ir->body_instructions) {
1764      ir_instruction *ir = (ir_instruction *)node;
1765
1766      this->base_ir = ir;
1767      ir->accept(this);
1768   }
1769
1770   if (ir->increment) {
1771      this->base_ir = ir->increment;
1772      ir->increment->accept(this);
1773      emit(BRW_OPCODE_ADD, counter, counter, this->result);
1774   }
1775
1776   this->base_ir = NULL;
1777   emit(BRW_OPCODE_WHILE);
1778}
1779
1780void
1781fs_visitor::visit(ir_loop_jump *ir)
1782{
1783   switch (ir->mode) {
1784   case ir_loop_jump::jump_break:
1785      emit(BRW_OPCODE_BREAK);
1786      break;
1787   case ir_loop_jump::jump_continue:
1788      emit(BRW_OPCODE_CONTINUE);
1789      break;
1790   }
1791}
1792
1793void
1794fs_visitor::visit(ir_call *ir)
1795{
1796   assert(!"FINISHME");
1797}
1798
1799void
1800fs_visitor::visit(ir_return *ir)
1801{
1802   assert(!"FINISHME");
1803}
1804
1805void
1806fs_visitor::visit(ir_function *ir)
1807{
1808   /* Ignore function bodies other than main() -- we shouldn't see calls to
1809    * them since they should all be inlined before we get to ir_to_mesa.
1810    */
1811   if (strcmp(ir->name, "main") == 0) {
1812      const ir_function_signature *sig;
1813      exec_list empty;
1814
1815      sig = ir->matching_signature(&empty);
1816
1817      assert(sig);
1818
1819      foreach_list(node, &sig->body) {
1820	 ir_instruction *ir = (ir_instruction *)node;
1821	 this->base_ir = ir;
1822
1823	 ir->accept(this);
1824      }
1825   }
1826}
1827
1828void
1829fs_visitor::visit(ir_function_signature *ir)
1830{
1831   assert(!"not reached");
1832   (void)ir;
1833}
1834
1835fs_inst *
1836fs_visitor::emit(fs_inst inst)
1837{
1838   fs_inst *list_inst = new(mem_ctx) fs_inst;
1839   *list_inst = inst;
1840
1841   if (force_uncompressed_stack > 0)
1842      list_inst->force_uncompressed = true;
1843   else if (force_sechalf_stack > 0)
1844      list_inst->force_sechalf = true;
1845
1846   list_inst->annotation = this->current_annotation;
1847   list_inst->ir = this->base_ir;
1848
1849   this->instructions.push_tail(list_inst);
1850
1851   return list_inst;
1852}
1853
1854/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1855void
1856fs_visitor::emit_dummy_fs()
1857{
1858   int reg_width = c->dispatch_width / 8;
1859
1860   /* Everyone's favorite color. */
1861   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f));
1862   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f));
1863   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f));
1864   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f));
1865
1866   fs_inst *write;
1867   write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0));
1868   write->base_mrf = 2;
1869   write->mlen = 4 * reg_width;
1870   write->eot = true;
1871}
1872
1873/* The register location here is relative to the start of the URB
1874 * data.  It will get adjusted to be a real location before
1875 * generate_code() time.
1876 */
1877struct brw_reg
1878fs_visitor::interp_reg(int location, int channel)
1879{
1880   int regnr = urb_setup[location] * 2 + channel / 2;
1881   int stride = (channel & 1) * 4;
1882
1883   assert(urb_setup[location] != -1);
1884
1885   return brw_vec1_grf(regnr, stride);
1886}
1887
1888/** Emits the interpolation for the varying inputs. */
1889void
1890fs_visitor::emit_interpolation_setup_gen4()
1891{
1892   this->current_annotation = "compute pixel centers";
1893   this->pixel_x = fs_reg(this, glsl_type::uint_type);
1894   this->pixel_y = fs_reg(this, glsl_type::uint_type);
1895   this->pixel_x.type = BRW_REGISTER_TYPE_UW;
1896   this->pixel_y.type = BRW_REGISTER_TYPE_UW;
1897
1898   emit(FS_OPCODE_PIXEL_X, this->pixel_x);
1899   emit(FS_OPCODE_PIXEL_Y, this->pixel_y);
1900
1901   this->current_annotation = "compute pixel deltas from v0";
1902   if (brw->has_pln) {
1903      this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1904         fs_reg(this, glsl_type::vec2_type);
1905      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1906         this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC];
1907      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++;
1908   } else {
1909      this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1910         fs_reg(this, glsl_type::float_type);
1911      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1912         fs_reg(this, glsl_type::float_type);
1913   }
1914   emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1915	this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0))));
1916   emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1917	this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1))));
1918
1919   this->current_annotation = "compute pos.w and 1/pos.w";
1920   /* Compute wpos.w.  It's always in our setup, since it's needed to
1921    * interpolate the other attributes.
1922    */
1923   this->wpos_w = fs_reg(this, glsl_type::float_type);
1924   emit(FS_OPCODE_LINTERP, wpos_w,
1925        this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1926        this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1927	interp_reg(FRAG_ATTRIB_WPOS, 3));
1928   /* Compute the pixel 1/W value from wpos.w. */
1929   this->pixel_w = fs_reg(this, glsl_type::float_type);
1930   emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
1931   this->current_annotation = NULL;
1932}
1933
1934/** Emits the interpolation for the varying inputs. */
1935void
1936fs_visitor::emit_interpolation_setup_gen6()
1937{
1938   struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1939
1940   /* If the pixel centers end up used, the setup is the same as for gen4. */
1941   this->current_annotation = "compute pixel centers";
1942   fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
1943   fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
1944   int_pixel_x.type = BRW_REGISTER_TYPE_UW;
1945   int_pixel_y.type = BRW_REGISTER_TYPE_UW;
1946   emit(BRW_OPCODE_ADD,
1947	int_pixel_x,
1948	fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1949	fs_reg(brw_imm_v(0x10101010)));
1950   emit(BRW_OPCODE_ADD,
1951	int_pixel_y,
1952	fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1953	fs_reg(brw_imm_v(0x11001100)));
1954
1955   /* As of gen6, we can no longer mix float and int sources.  We have
1956    * to turn the integer pixel centers into floats for their actual
1957    * use.
1958    */
1959   this->pixel_x = fs_reg(this, glsl_type::float_type);
1960   this->pixel_y = fs_reg(this, glsl_type::float_type);
1961   emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x);
1962   emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y);
1963
1964   this->current_annotation = "compute pos.w";
1965   this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
1966   this->wpos_w = fs_reg(this, glsl_type::float_type);
1967   emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w);
1968
1969   for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
1970      uint8_t reg = c->barycentric_coord_reg[i];
1971      this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0));
1972      this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0));
1973   }
1974
1975   this->current_annotation = NULL;
1976}
1977
1978void
1979fs_visitor::emit_color_write(int target, int index, int first_color_mrf)
1980{
1981   int reg_width = c->dispatch_width / 8;
1982   fs_inst *inst;
1983   fs_reg color = outputs[target];
1984   fs_reg mrf;
1985
1986   /* If there's no color data to be written, skip it. */
1987   if (color.file == BAD_FILE)
1988      return;
1989
1990   color.reg_offset += index;
1991
1992   if (c->dispatch_width == 8 || intel->gen >= 6) {
1993      /* SIMD8 write looks like:
1994       * m + 0: r0
1995       * m + 1: r1
1996       * m + 2: g0
1997       * m + 3: g1
1998       *
1999       * gen6 SIMD16 DP write looks like:
2000       * m + 0: r0
2001       * m + 1: r1
2002       * m + 2: g0
2003       * m + 3: g1
2004       * m + 4: b0
2005       * m + 5: b1
2006       * m + 6: a0
2007       * m + 7: a1
2008       */
2009      inst = emit(BRW_OPCODE_MOV,
2010		  fs_reg(MRF, first_color_mrf + index * reg_width, color.type),
2011		  color);
2012      inst->saturate = c->key.clamp_fragment_color;
2013   } else {
2014      /* pre-gen6 SIMD16 single source DP write looks like:
2015       * m + 0: r0
2016       * m + 1: g0
2017       * m + 2: b0
2018       * m + 3: a0
2019       * m + 4: r1
2020       * m + 5: g1
2021       * m + 6: b1
2022       * m + 7: a1
2023       */
2024      if (brw->has_compr4) {
2025	 /* By setting the high bit of the MRF register number, we
2026	  * indicate that we want COMPR4 mode - instead of doing the
2027	  * usual destination + 1 for the second half we get
2028	  * destination + 4.
2029	  */
2030	 inst = emit(BRW_OPCODE_MOV,
2031		     fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index,
2032			    color.type),
2033		     color);
2034	 inst->saturate = c->key.clamp_fragment_color;
2035      } else {
2036	 push_force_uncompressed();
2037	 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index,
2038					    color.type),
2039		     color);
2040	 inst->saturate = c->key.clamp_fragment_color;
2041	 pop_force_uncompressed();
2042
2043	 push_force_sechalf();
2044	 color.sechalf = true;
2045	 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4,
2046					    color.type),
2047		     color);
2048	 inst->saturate = c->key.clamp_fragment_color;
2049	 pop_force_sechalf();
2050	 color.sechalf = false;
2051      }
2052   }
2053}
2054
2055void
2056fs_visitor::emit_fb_writes()
2057{
2058   this->current_annotation = "FB write header";
2059   bool header_present = true;
2060   /* We can potentially have a message length of up to 15, so we have to set
2061    * base_mrf to either 0 or 1 in order to fit in m0..m15.
2062    */
2063   int base_mrf = 1;
2064   int nr = base_mrf;
2065   int reg_width = c->dispatch_width / 8;
2066   bool do_dual_src = this->dual_src_output.file != BAD_FILE;
2067
2068   if (c->dispatch_width == 16 && do_dual_src) {
2069      fail("GL_ARB_blend_func_extended not yet supported in 16-wide.");
2070      do_dual_src = false;
2071   }
2072
2073   /* From the Sandy Bridge PRM, volume 4, page 198:
2074    *
2075    *     "Dispatched Pixel Enables. One bit per pixel indicating
2076    *      which pixels were originally enabled when the thread was
2077    *      dispatched. This field is only required for the end-of-
2078    *      thread message and on all dual-source messages."
2079    */
2080   if (intel->gen >= 6 &&
2081       !this->kill_emitted &&
2082       !do_dual_src &&
2083       c->key.nr_color_regions == 1) {
2084      header_present = false;
2085   }
2086
2087   if (header_present) {
2088      /* m2, m3 header */
2089      nr += 2;
2090   }
2091
2092   if (c->aa_dest_stencil_reg) {
2093      push_force_uncompressed();
2094      emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2095	   fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0)));
2096      pop_force_uncompressed();
2097   }
2098
2099   /* Reserve space for color. It'll be filled in per MRT below. */
2100   int color_mrf = nr;
2101   nr += 4 * reg_width;
2102   if (do_dual_src)
2103      nr += 4;
2104
2105   if (c->source_depth_to_render_target) {
2106      if (intel->gen == 6 && c->dispatch_width == 16) {
2107	 /* For outputting oDepth on gen6, SIMD8 writes have to be
2108	  * used.  This would require 8-wide moves of each half to
2109	  * message regs, kind of like pre-gen5 SIMD16 FB writes.
2110	  * Just bail on doing so for now.
2111	  */
2112	 fail("Missing support for simd16 depth writes on gen6\n");
2113      }
2114
2115      if (c->computes_depth) {
2116	 /* Hand over gl_FragDepth. */
2117	 assert(this->frag_depth);
2118	 fs_reg depth = *(variable_storage(this->frag_depth));
2119
2120	 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth);
2121      } else {
2122	 /* Pass through the payload depth. */
2123	 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2124	      fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
2125      }
2126      nr += reg_width;
2127   }
2128
2129   if (c->dest_depth_reg) {
2130      emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2131	   fs_reg(brw_vec8_grf(c->dest_depth_reg, 0)));
2132      nr += reg_width;
2133   }
2134
2135   if (do_dual_src) {
2136      fs_reg src0 = this->outputs[0];
2137      fs_reg src1 = this->dual_src_output;
2138
2139      this->current_annotation = ralloc_asprintf(this->mem_ctx,
2140						 "FB write src0");
2141      for (int i = 0; i < 4; i++) {
2142	 fs_inst *inst = emit(BRW_OPCODE_MOV,
2143			      fs_reg(MRF, color_mrf + i, src0.type),
2144			      src0);
2145	 src0.reg_offset++;
2146	 inst->saturate = c->key.clamp_fragment_color;
2147      }
2148
2149      this->current_annotation = ralloc_asprintf(this->mem_ctx,
2150						 "FB write src1");
2151      for (int i = 0; i < 4; i++) {
2152	 fs_inst *inst = emit(BRW_OPCODE_MOV,
2153			      fs_reg(MRF, color_mrf + 4 + i, src1.type),
2154			      src1);
2155	 src1.reg_offset++;
2156	 inst->saturate = c->key.clamp_fragment_color;
2157      }
2158
2159      fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2160      inst->target = 0;
2161      inst->base_mrf = base_mrf;
2162      inst->mlen = nr - base_mrf;
2163      inst->eot = true;
2164      inst->header_present = header_present;
2165
2166      c->prog_data.dual_src_blend = true;
2167      this->current_annotation = NULL;
2168      return;
2169   }
2170
2171   for (int target = 0; target < c->key.nr_color_regions; target++) {
2172      this->current_annotation = ralloc_asprintf(this->mem_ctx,
2173						 "FB write target %d",
2174						 target);
2175      for (unsigned i = 0; i < this->output_components[target]; i++)
2176	 emit_color_write(target, i, color_mrf);
2177
2178      fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2179      inst->target = target;
2180      inst->base_mrf = base_mrf;
2181      inst->mlen = nr - base_mrf;
2182      if (target == c->key.nr_color_regions - 1)
2183	 inst->eot = true;
2184      inst->header_present = header_present;
2185   }
2186
2187   if (c->key.nr_color_regions == 0) {
2188      if (c->key.alpha_test) {
2189	 /* If the alpha test is enabled but there's no color buffer,
2190	  * we still need to send alpha out the pipeline to our null
2191	  * renderbuffer.
2192	  */
2193	 emit_color_write(0, 3, color_mrf);
2194      }
2195
2196      fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2197      inst->base_mrf = base_mrf;
2198      inst->mlen = nr - base_mrf;
2199      inst->eot = true;
2200      inst->header_present = header_present;
2201   }
2202
2203   this->current_annotation = NULL;
2204}
2205
2206void
2207fs_visitor::resolve_ud_negate(fs_reg *reg)
2208{
2209   if (reg->type != BRW_REGISTER_TYPE_UD ||
2210       !reg->negate)
2211      return;
2212
2213   fs_reg temp = fs_reg(this, glsl_type::uint_type);
2214   emit(BRW_OPCODE_MOV, temp, *reg);
2215   *reg = temp;
2216}
2217
2218void
2219fs_visitor::resolve_bool_comparison(ir_rvalue *rvalue, fs_reg *reg)
2220{
2221   if (rvalue->type != glsl_type::bool_type)
2222      return;
2223
2224   fs_reg temp = fs_reg(this, glsl_type::bool_type);
2225   emit(BRW_OPCODE_AND, temp, *reg, fs_reg(1));
2226   *reg = temp;
2227}
2228