brw_fs_visitor.cpp revision 32ae8d3b321185a85b73ff703d8fc26bd5f48fa7
1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/** @file brw_fs_visitor.cpp
25 *
26 * This file supports generating the FS LIR from the GLSL IR.  The LIR
27 * makes it easier to do backend-specific optimizations than doing so
28 * in the GLSL IR or in the native code.
29 */
30extern "C" {
31
32#include <sys/types.h>
33
34#include "main/macros.h"
35#include "main/shaderobj.h"
36#include "main/uniforms.h"
37#include "program/prog_parameter.h"
38#include "program/prog_print.h"
39#include "program/prog_optimize.h"
40#include "program/register_allocate.h"
41#include "program/sampler.h"
42#include "program/hash_table.h"
43#include "brw_context.h"
44#include "brw_eu.h"
45#include "brw_wm.h"
46}
47#include "brw_shader.h"
48#include "brw_fs.h"
49#include "glsl/glsl_types.h"
50#include "glsl/ir_optimization.h"
51#include "glsl/ir_print_visitor.h"
52
53void
54fs_visitor::visit(ir_variable *ir)
55{
56   fs_reg *reg = NULL;
57
58   if (variable_storage(ir))
59      return;
60
61   if (ir->mode == ir_var_in) {
62      if (!strcmp(ir->name, "gl_FragCoord")) {
63	 reg = emit_fragcoord_interpolation(ir);
64      } else if (!strcmp(ir->name, "gl_FrontFacing")) {
65	 reg = emit_frontfacing_interpolation(ir);
66      } else {
67	 reg = emit_general_interpolation(ir);
68      }
69      assert(reg);
70      hash_table_insert(this->variable_ht, reg, ir);
71      return;
72   } else if (ir->mode == ir_var_out) {
73      reg = new(this->mem_ctx) fs_reg(this, ir->type);
74
75      if (ir->location == FRAG_RESULT_COLOR) {
76	 /* Writing gl_FragColor outputs to all color regions. */
77	 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) {
78	    this->outputs[i] = *reg;
79	 }
80      } else if (ir->location == FRAG_RESULT_DEPTH) {
81	 this->frag_depth = ir;
82      } else {
83	 /* gl_FragData or a user-defined FS output */
84	 assert(ir->location >= FRAG_RESULT_DATA0 &&
85		ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS);
86
87	 /* General color output. */
88	 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) {
89	    int output = ir->location - FRAG_RESULT_DATA0 + i;
90	    this->outputs[output] = *reg;
91	    this->outputs[output].reg_offset += 4 * i;
92	 }
93      }
94   } else if (ir->mode == ir_var_uniform) {
95      int param_index = c->prog_data.nr_params;
96
97      if (c->dispatch_width == 16) {
98	 if (!variable_storage(ir)) {
99	    fail("Failed to find uniform '%s' in 16-wide\n", ir->name);
100	 }
101	 return;
102      }
103
104      if (!strncmp(ir->name, "gl_", 3)) {
105	 setup_builtin_uniform_values(ir);
106      } else {
107	 setup_uniform_values(ir->location, ir->type);
108      }
109
110      reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index);
111      reg->type = brw_type_for_base_type(ir->type);
112   }
113
114   if (!reg)
115      reg = new(this->mem_ctx) fs_reg(this, ir->type);
116
117   hash_table_insert(this->variable_ht, reg, ir);
118}
119
120void
121fs_visitor::visit(ir_dereference_variable *ir)
122{
123   fs_reg *reg = variable_storage(ir->var);
124   this->result = *reg;
125}
126
127void
128fs_visitor::visit(ir_dereference_record *ir)
129{
130   const glsl_type *struct_type = ir->record->type;
131
132   ir->record->accept(this);
133
134   unsigned int offset = 0;
135   for (unsigned int i = 0; i < struct_type->length; i++) {
136      if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0)
137	 break;
138      offset += type_size(struct_type->fields.structure[i].type);
139   }
140   this->result.reg_offset += offset;
141   this->result.type = brw_type_for_base_type(ir->type);
142}
143
144void
145fs_visitor::visit(ir_dereference_array *ir)
146{
147   ir_constant *index;
148   int element_size;
149
150   ir->array->accept(this);
151   index = ir->array_index->as_constant();
152
153   element_size = type_size(ir->type);
154   this->result.type = brw_type_for_base_type(ir->type);
155
156   if (index) {
157      assert(this->result.file == UNIFORM || this->result.file == GRF);
158      this->result.reg_offset += index->value.i[0] * element_size;
159   } else {
160      assert(!"FINISHME: non-constant array element");
161   }
162}
163
164/* Instruction selection: Produce a MOV.sat instead of
165 * MIN(MAX(val, 0), 1) when possible.
166 */
167bool
168fs_visitor::try_emit_saturate(ir_expression *ir)
169{
170   ir_rvalue *sat_val = ir->as_rvalue_to_saturate();
171
172   if (!sat_val)
173      return false;
174
175   fs_inst *pre_inst = (fs_inst *) this->instructions.get_tail();
176
177   sat_val->accept(this);
178   fs_reg src = this->result;
179
180   fs_inst *last_inst = (fs_inst *) this->instructions.get_tail();
181
182   /* If the last instruction from our accept() didn't generate our
183    * src, generate a saturated MOV
184    */
185   fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src);
186   if (!modify || modify->regs_written() != 1) {
187      fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src);
188      inst->saturate = true;
189   } else {
190      modify->saturate = true;
191      this->result = src;
192   }
193
194
195   return true;
196}
197
198bool
199fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg)
200{
201   /* 3-src instructions were introduced in gen6. */
202   if (intel->gen < 6)
203      return false;
204
205   /* MAD can only handle floating-point data. */
206   if (ir->type != glsl_type::float_type)
207      return false;
208
209   ir_rvalue *nonmul = ir->operands[1 - mul_arg];
210   ir_expression *mul = ir->operands[mul_arg]->as_expression();
211
212   if (!mul || mul->operation != ir_binop_mul)
213      return false;
214
215   if (nonmul->as_constant() ||
216       mul->operands[0]->as_constant() ||
217       mul->operands[1]->as_constant())
218      return false;
219
220   nonmul->accept(this);
221   fs_reg src0 = this->result;
222
223   mul->operands[0]->accept(this);
224   fs_reg src1 = this->result;
225
226   mul->operands[1]->accept(this);
227   fs_reg src2 = this->result;
228
229   this->result = fs_reg(this, ir->type);
230   emit(BRW_OPCODE_MAD, this->result, src0, src1, src2);
231
232   return true;
233}
234
235void
236fs_visitor::visit(ir_expression *ir)
237{
238   unsigned int operand;
239   fs_reg op[2], temp;
240   fs_inst *inst;
241
242   assert(ir->get_num_operands() <= 2);
243
244   if (try_emit_saturate(ir))
245      return;
246   if (ir->operation == ir_binop_add) {
247      if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1))
248	 return;
249   }
250
251   for (operand = 0; operand < ir->get_num_operands(); operand++) {
252      ir->operands[operand]->accept(this);
253      if (this->result.file == BAD_FILE) {
254	 ir_print_visitor v;
255	 fail("Failed to get tree for expression operand:\n");
256	 ir->operands[operand]->accept(&v);
257      }
258      op[operand] = this->result;
259
260      /* Matrix expression operands should have been broken down to vector
261       * operations already.
262       */
263      assert(!ir->operands[operand]->type->is_matrix());
264      /* And then those vector operands should have been broken down to scalar.
265       */
266      assert(!ir->operands[operand]->type->is_vector());
267   }
268
269   /* Storage for our result.  If our result goes into an assignment, it will
270    * just get copy-propagated out, so no worries.
271    */
272   this->result = fs_reg(this, ir->type);
273
274   switch (ir->operation) {
275   case ir_unop_logic_not:
276      /* Note that BRW_OPCODE_NOT is not appropriate here, since it is
277       * ones complement of the whole register, not just bit 0.
278       */
279      emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1));
280      break;
281   case ir_unop_neg:
282      op[0].negate = !op[0].negate;
283      this->result = op[0];
284      break;
285   case ir_unop_abs:
286      op[0].abs = true;
287      op[0].negate = false;
288      this->result = op[0];
289      break;
290   case ir_unop_sign:
291      temp = fs_reg(this, ir->type);
292
293      emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f));
294
295      inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
296      inst->conditional_mod = BRW_CONDITIONAL_G;
297      inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f));
298      inst->predicated = true;
299
300      inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f));
301      inst->conditional_mod = BRW_CONDITIONAL_L;
302      inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f));
303      inst->predicated = true;
304
305      break;
306   case ir_unop_rcp:
307      emit_math(SHADER_OPCODE_RCP, this->result, op[0]);
308      break;
309
310   case ir_unop_exp2:
311      emit_math(SHADER_OPCODE_EXP2, this->result, op[0]);
312      break;
313   case ir_unop_log2:
314      emit_math(SHADER_OPCODE_LOG2, this->result, op[0]);
315      break;
316   case ir_unop_exp:
317   case ir_unop_log:
318      assert(!"not reached: should be handled by ir_explog_to_explog2");
319      break;
320   case ir_unop_sin:
321   case ir_unop_sin_reduced:
322      emit_math(SHADER_OPCODE_SIN, this->result, op[0]);
323      break;
324   case ir_unop_cos:
325   case ir_unop_cos_reduced:
326      emit_math(SHADER_OPCODE_COS, this->result, op[0]);
327      break;
328
329   case ir_unop_dFdx:
330      emit(FS_OPCODE_DDX, this->result, op[0]);
331      break;
332   case ir_unop_dFdy:
333      emit(FS_OPCODE_DDY, this->result, op[0]);
334      break;
335
336   case ir_binop_add:
337      emit(BRW_OPCODE_ADD, this->result, op[0], op[1]);
338      break;
339   case ir_binop_sub:
340      assert(!"not reached: should be handled by ir_sub_to_add_neg");
341      break;
342
343   case ir_binop_mul:
344      if (ir->type->is_integer()) {
345	 /* For integer multiplication, the MUL uses the low 16 bits
346	  * of one of the operands (src0 on gen6, src1 on gen7).  The
347	  * MACH accumulates in the contribution of the upper 16 bits
348	  * of that operand.
349	  *
350	  * FINISHME: Emit just the MUL if we know an operand is small
351	  * enough.
352	  */
353	 if (intel->gen >= 7 && c->dispatch_width == 16)
354	    fail("16-wide explicit accumulator operands unsupported\n");
355
356	 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D);
357
358	 emit(BRW_OPCODE_MUL, acc, op[0], op[1]);
359	 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]);
360	 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc));
361      } else {
362	 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]);
363      }
364      break;
365   case ir_binop_div:
366      if (intel->gen >= 7 && c->dispatch_width == 16)
367	 fail("16-wide INTDIV unsupported\n");
368
369      /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */
370      assert(ir->type->is_integer());
371      emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]);
372      break;
373   case ir_binop_mod:
374      if (intel->gen >= 7 && c->dispatch_width == 16)
375	 fail("16-wide INTDIV unsupported\n");
376
377      /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */
378      assert(ir->type->is_integer());
379      emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]);
380      break;
381
382   case ir_binop_less:
383   case ir_binop_greater:
384   case ir_binop_lequal:
385   case ir_binop_gequal:
386   case ir_binop_equal:
387   case ir_binop_all_equal:
388   case ir_binop_nequal:
389   case ir_binop_any_nequal:
390      temp = this->result;
391      /* original gen4 does implicit conversion before comparison. */
392      if (intel->gen < 5)
393	 temp.type = op[0].type;
394
395      resolve_ud_negate(&op[0]);
396      resolve_ud_negate(&op[1]);
397
398      inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]);
399      inst->conditional_mod = brw_conditional_for_comparison(ir->operation);
400      emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1));
401      break;
402
403   case ir_binop_logic_xor:
404      emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
405      break;
406
407   case ir_binop_logic_or:
408      emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
409      break;
410
411   case ir_binop_logic_and:
412      emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
413      break;
414
415   case ir_binop_dot:
416   case ir_unop_any:
417      assert(!"not reached: should be handled by brw_fs_channel_expressions");
418      break;
419
420   case ir_unop_noise:
421      assert(!"not reached: should be handled by lower_noise");
422      break;
423
424   case ir_quadop_vector:
425      assert(!"not reached: should be handled by lower_quadop_vector");
426      break;
427
428   case ir_unop_sqrt:
429      emit_math(SHADER_OPCODE_SQRT, this->result, op[0]);
430      break;
431
432   case ir_unop_rsq:
433      emit_math(SHADER_OPCODE_RSQ, this->result, op[0]);
434      break;
435
436   case ir_unop_i2u:
437      op[0].type = BRW_REGISTER_TYPE_UD;
438      this->result = op[0];
439      break;
440   case ir_unop_u2i:
441      op[0].type = BRW_REGISTER_TYPE_D;
442      this->result = op[0];
443      break;
444   case ir_unop_i2f:
445   case ir_unop_u2f:
446   case ir_unop_b2f:
447   case ir_unop_b2i:
448   case ir_unop_f2i:
449      emit(BRW_OPCODE_MOV, this->result, op[0]);
450      break;
451   case ir_unop_f2b:
452   case ir_unop_i2b:
453      temp = this->result;
454      /* original gen4 does implicit conversion before comparison. */
455      if (intel->gen < 5)
456	 temp.type = op[0].type;
457
458      resolve_ud_negate(&op[0]);
459
460      inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f));
461      inst->conditional_mod = BRW_CONDITIONAL_NZ;
462      inst = emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(1));
463      break;
464
465   case ir_unop_trunc:
466      emit(BRW_OPCODE_RNDZ, this->result, op[0]);
467      break;
468   case ir_unop_ceil:
469      op[0].negate = !op[0].negate;
470      inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
471      this->result.negate = true;
472      break;
473   case ir_unop_floor:
474      inst = emit(BRW_OPCODE_RNDD, this->result, op[0]);
475      break;
476   case ir_unop_fract:
477      inst = emit(BRW_OPCODE_FRC, this->result, op[0]);
478      break;
479   case ir_unop_round_even:
480      emit(BRW_OPCODE_RNDE, this->result, op[0]);
481      break;
482
483   case ir_binop_min:
484      resolve_ud_negate(&op[0]);
485      resolve_ud_negate(&op[1]);
486
487      if (intel->gen >= 6) {
488	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
489	 inst->conditional_mod = BRW_CONDITIONAL_L;
490      } else {
491	 /* Unalias the destination */
492	 this->result = fs_reg(this, ir->type);
493
494	 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
495	 inst->conditional_mod = BRW_CONDITIONAL_L;
496
497	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
498	 inst->predicated = true;
499      }
500      break;
501   case ir_binop_max:
502      resolve_ud_negate(&op[0]);
503      resolve_ud_negate(&op[1]);
504
505      if (intel->gen >= 6) {
506	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
507	 inst->conditional_mod = BRW_CONDITIONAL_GE;
508      } else {
509	 /* Unalias the destination */
510	 this->result = fs_reg(this, ir->type);
511
512	 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]);
513	 inst->conditional_mod = BRW_CONDITIONAL_G;
514
515	 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]);
516	 inst->predicated = true;
517      }
518      break;
519
520   case ir_binop_pow:
521      emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]);
522      break;
523
524   case ir_unop_bit_not:
525      inst = emit(BRW_OPCODE_NOT, this->result, op[0]);
526      break;
527   case ir_binop_bit_and:
528      inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]);
529      break;
530   case ir_binop_bit_xor:
531      inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]);
532      break;
533   case ir_binop_bit_or:
534      inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]);
535      break;
536
537   case ir_binop_lshift:
538      inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]);
539      break;
540
541   case ir_binop_rshift:
542      if (ir->type->base_type == GLSL_TYPE_INT)
543	 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]);
544      else
545	 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]);
546      break;
547   }
548}
549
550void
551fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r,
552				   const glsl_type *type, bool predicated)
553{
554   switch (type->base_type) {
555   case GLSL_TYPE_FLOAT:
556   case GLSL_TYPE_UINT:
557   case GLSL_TYPE_INT:
558   case GLSL_TYPE_BOOL:
559      for (unsigned int i = 0; i < type->components(); i++) {
560	 l.type = brw_type_for_base_type(type);
561	 r.type = brw_type_for_base_type(type);
562
563	 if (predicated || !l.equals(&r)) {
564	    fs_inst *inst = emit(BRW_OPCODE_MOV, l, r);
565	    inst->predicated = predicated;
566	 }
567
568	 l.reg_offset++;
569	 r.reg_offset++;
570      }
571      break;
572   case GLSL_TYPE_ARRAY:
573      for (unsigned int i = 0; i < type->length; i++) {
574	 emit_assignment_writes(l, r, type->fields.array, predicated);
575      }
576      break;
577
578   case GLSL_TYPE_STRUCT:
579      for (unsigned int i = 0; i < type->length; i++) {
580	 emit_assignment_writes(l, r, type->fields.structure[i].type,
581				predicated);
582      }
583      break;
584
585   case GLSL_TYPE_SAMPLER:
586      break;
587
588   default:
589      assert(!"not reached");
590      break;
591   }
592}
593
594/* If the RHS processing resulted in an instruction generating a
595 * temporary value, and it would be easy to rewrite the instruction to
596 * generate its result right into the LHS instead, do so.  This ends
597 * up reliably removing instructions where it can be tricky to do so
598 * later without real UD chain information.
599 */
600bool
601fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir,
602                                   fs_reg dst,
603                                   fs_reg src,
604                                   fs_inst *pre_rhs_inst,
605                                   fs_inst *last_rhs_inst)
606{
607   /* Only attempt if we're doing a direct assignment. */
608   if (ir->condition ||
609       !(ir->lhs->type->is_scalar() ||
610        (ir->lhs->type->is_vector() &&
611         ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1)))
612      return false;
613
614   /* Make sure the last instruction generated our source reg. */
615   fs_inst *modify = get_instruction_generating_reg(pre_rhs_inst,
616						    last_rhs_inst,
617						    src);
618   if (!modify)
619      return false;
620
621   /* If last_rhs_inst wrote a different number of components than our LHS,
622    * we can't safely rewrite it.
623    */
624   if (ir->lhs->type->vector_elements != modify->regs_written())
625      return false;
626
627   /* Success!  Rewrite the instruction. */
628   modify->dst = dst;
629
630   return true;
631}
632
633void
634fs_visitor::visit(ir_assignment *ir)
635{
636   fs_reg l, r;
637   fs_inst *inst;
638
639   /* FINISHME: arrays on the lhs */
640   ir->lhs->accept(this);
641   l = this->result;
642
643   fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail();
644
645   ir->rhs->accept(this);
646   r = this->result;
647
648   fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail();
649
650   assert(l.file != BAD_FILE);
651   assert(r.file != BAD_FILE);
652
653   if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst))
654      return;
655
656   if (ir->condition) {
657      emit_bool_to_cond_code(ir->condition);
658   }
659
660   if (ir->lhs->type->is_scalar() ||
661       ir->lhs->type->is_vector()) {
662      for (int i = 0; i < ir->lhs->type->vector_elements; i++) {
663	 if (ir->write_mask & (1 << i)) {
664	    inst = emit(BRW_OPCODE_MOV, l, r);
665	    if (ir->condition)
666	       inst->predicated = true;
667	    r.reg_offset++;
668	 }
669	 l.reg_offset++;
670      }
671   } else {
672      emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL);
673   }
674}
675
676fs_inst *
677fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate,
678			      int sampler)
679{
680   int mlen;
681   int base_mrf = 1;
682   bool simd16 = false;
683   fs_reg orig_dst;
684
685   /* g0 header. */
686   mlen = 1;
687
688   if (ir->shadow_comparitor && ir->op != ir_txd) {
689      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
690	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
691	 coordinate.reg_offset++;
692      }
693      /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
694      mlen += 3;
695
696      if (ir->op == ir_tex) {
697	 /* There's no plain shadow compare message, so we use shadow
698	  * compare with a bias of 0.0.
699	  */
700	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f));
701	 mlen++;
702      } else if (ir->op == ir_txb) {
703	 ir->lod_info.bias->accept(this);
704	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
705	 mlen++;
706      } else {
707	 assert(ir->op == ir_txl);
708	 ir->lod_info.lod->accept(this);
709	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
710	 mlen++;
711      }
712
713      ir->shadow_comparitor->accept(this);
714      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
715      mlen++;
716   } else if (ir->op == ir_tex) {
717      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
718	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
719	 coordinate.reg_offset++;
720      }
721      /* gen4's SIMD8 sampler always has the slots for u,v,r present. */
722      mlen += 3;
723   } else if (ir->op == ir_txd) {
724      ir->lod_info.grad.dPdx->accept(this);
725      fs_reg dPdx = this->result;
726
727      ir->lod_info.grad.dPdy->accept(this);
728      fs_reg dPdy = this->result;
729
730      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
731	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate);
732	 coordinate.reg_offset++;
733      }
734      /* the slots for u and v are always present, but r is optional */
735      mlen += MAX2(ir->coordinate->type->vector_elements, 2);
736
737      /*  P   = u, v, r
738       * dPdx = dudx, dvdx, drdx
739       * dPdy = dudy, dvdy, drdy
740       *
741       * 1-arg: Does not exist.
742       *
743       * 2-arg: dudx   dvdx   dudy   dvdy
744       *        dPdx.x dPdx.y dPdy.x dPdy.y
745       *        m4     m5     m6     m7
746       *
747       * 3-arg: dudx   dvdx   drdx   dudy   dvdy   drdy
748       *        dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
749       *        m5     m6     m7     m8     m9     m10
750       */
751      for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
752	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
753	 dPdx.reg_offset++;
754      }
755      mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2);
756
757      for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) {
758	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
759	 dPdy.reg_offset++;
760      }
761      mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2);
762   } else if (ir->op == ir_txs) {
763      /* There's no SIMD8 resinfo message on Gen4.  Use SIMD16 instead. */
764      simd16 = true;
765      ir->lod_info.lod->accept(this);
766      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
767      mlen += 2;
768   } else {
769      /* Oh joy.  gen4 doesn't have SIMD8 non-shadow-compare bias/lod
770       * instructions.  We'll need to do SIMD16 here.
771       */
772      simd16 = true;
773      assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf);
774
775      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
776	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type),
777	      coordinate);
778	 coordinate.reg_offset++;
779      }
780
781      /* Initialize the rest of u/v/r with 0.0.  Empirically, this seems to
782       * be necessary for TXF (ld), but seems wise to do for all messages.
783       */
784      for (int i = ir->coordinate->type->vector_elements; i < 3; i++) {
785	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f));
786      }
787
788      /* lod/bias appears after u/v/r. */
789      mlen += 6;
790
791      if (ir->op == ir_txb) {
792	 ir->lod_info.bias->accept(this);
793	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
794	 mlen++;
795      } else {
796	 ir->lod_info.lod->accept(this);
797	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type),
798			      this->result);
799	 mlen++;
800      }
801
802      /* The unused upper half. */
803      mlen++;
804   }
805
806   if (simd16) {
807      /* Now, since we're doing simd16, the return is 2 interleaved
808       * vec4s where the odd-indexed ones are junk. We'll need to move
809       * this weirdness around to the expected layout.
810       */
811      orig_dst = dst;
812      const glsl_type *vec_type =
813	 glsl_type::get_instance(ir->type->base_type, 4, 1);
814      dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2));
815      dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type)
816			       : BRW_REGISTER_TYPE_F;
817   }
818
819   fs_inst *inst = NULL;
820   switch (ir->op) {
821   case ir_tex:
822      inst = emit(SHADER_OPCODE_TEX, dst);
823      break;
824   case ir_txb:
825      inst = emit(FS_OPCODE_TXB, dst);
826      break;
827   case ir_txl:
828      inst = emit(SHADER_OPCODE_TXL, dst);
829      break;
830   case ir_txd:
831      inst = emit(SHADER_OPCODE_TXD, dst);
832      break;
833   case ir_txs:
834      inst = emit(SHADER_OPCODE_TXS, dst);
835      break;
836   case ir_txf:
837      inst = emit(SHADER_OPCODE_TXF, dst);
838      break;
839   }
840   inst->base_mrf = base_mrf;
841   inst->mlen = mlen;
842   inst->header_present = true;
843
844   if (simd16) {
845      for (int i = 0; i < 4; i++) {
846	 emit(BRW_OPCODE_MOV, orig_dst, dst);
847	 orig_dst.reg_offset++;
848	 dst.reg_offset += 2;
849      }
850   }
851
852   return inst;
853}
854
855/* gen5's sampler has slots for u, v, r, array index, then optional
856 * parameters like shadow comparitor or LOD bias.  If optional
857 * parameters aren't present, those base slots are optional and don't
858 * need to be included in the message.
859 *
860 * We don't fill in the unnecessary slots regardless, which may look
861 * surprising in the disassembly.
862 */
863fs_inst *
864fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate,
865			      int sampler)
866{
867   int mlen = 0;
868   int base_mrf = 2;
869   int reg_width = c->dispatch_width / 8;
870   bool header_present = false;
871   const int vector_elements =
872      ir->coordinate ? ir->coordinate->type->vector_elements : 0;
873
874   if (ir->offset) {
875      /* The offsets set up by the ir_texture visitor are in the
876       * m1 header, so we can't go headerless.
877       */
878      header_present = true;
879      mlen++;
880      base_mrf--;
881   }
882
883   for (int i = 0; i < vector_elements; i++) {
884      emit(BRW_OPCODE_MOV,
885	   fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type),
886	   coordinate);
887      coordinate.reg_offset++;
888   }
889   mlen += vector_elements * reg_width;
890
891   if (ir->shadow_comparitor && ir->op != ir_txd) {
892      mlen = MAX2(mlen, header_present + 4 * reg_width);
893
894      ir->shadow_comparitor->accept(this);
895      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
896      mlen += reg_width;
897   }
898
899   fs_inst *inst = NULL;
900   switch (ir->op) {
901   case ir_tex:
902      inst = emit(SHADER_OPCODE_TEX, dst);
903      break;
904   case ir_txb:
905      ir->lod_info.bias->accept(this);
906      mlen = MAX2(mlen, header_present + 4 * reg_width);
907      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
908      mlen += reg_width;
909
910      inst = emit(FS_OPCODE_TXB, dst);
911
912      break;
913   case ir_txl:
914      ir->lod_info.lod->accept(this);
915      mlen = MAX2(mlen, header_present + 4 * reg_width);
916      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
917      mlen += reg_width;
918
919      inst = emit(SHADER_OPCODE_TXL, dst);
920      break;
921   case ir_txd: {
922      ir->lod_info.grad.dPdx->accept(this);
923      fs_reg dPdx = this->result;
924
925      ir->lod_info.grad.dPdy->accept(this);
926      fs_reg dPdy = this->result;
927
928      mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */
929
930      /**
931       *  P   =  u,    v,    r
932       * dPdx = dudx, dvdx, drdx
933       * dPdy = dudy, dvdy, drdy
934       *
935       * Load up these values:
936       * - dudx   dudy   dvdx   dvdy   drdx   drdy
937       * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
938       */
939      for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) {
940	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
941	 dPdx.reg_offset++;
942	 mlen += reg_width;
943
944	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
945	 dPdy.reg_offset++;
946	 mlen += reg_width;
947      }
948
949      inst = emit(SHADER_OPCODE_TXD, dst);
950      break;
951   }
952   case ir_txs:
953      ir->lod_info.lod->accept(this);
954      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
955      mlen += reg_width;
956      inst = emit(SHADER_OPCODE_TXS, dst);
957      break;
958   case ir_txf:
959      mlen = header_present + 4 * reg_width;
960
961      ir->lod_info.lod->accept(this);
962      emit(BRW_OPCODE_MOV,
963	   fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD),
964	   this->result);
965      inst = emit(SHADER_OPCODE_TXF, dst);
966      break;
967   }
968   inst->base_mrf = base_mrf;
969   inst->mlen = mlen;
970   inst->header_present = header_present;
971
972   if (mlen > 11) {
973      fail("Message length >11 disallowed by hardware\n");
974   }
975
976   return inst;
977}
978
979fs_inst *
980fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate,
981			      int sampler)
982{
983   int mlen = 0;
984   int base_mrf = 2;
985   int reg_width = c->dispatch_width / 8;
986   bool header_present = false;
987
988   if (ir->offset) {
989      /* The offsets set up by the ir_texture visitor are in the
990       * m1 header, so we can't go headerless.
991       */
992      header_present = true;
993      mlen++;
994      base_mrf--;
995   }
996
997   if (ir->shadow_comparitor && ir->op != ir_txd) {
998      ir->shadow_comparitor->accept(this);
999      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1000      mlen += reg_width;
1001   }
1002
1003   /* Set up the LOD info */
1004   switch (ir->op) {
1005   case ir_tex:
1006      break;
1007   case ir_txb:
1008      ir->lod_info.bias->accept(this);
1009      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1010      mlen += reg_width;
1011      break;
1012   case ir_txl:
1013      ir->lod_info.lod->accept(this);
1014      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result);
1015      mlen += reg_width;
1016      break;
1017   case ir_txd: {
1018      if (c->dispatch_width == 16)
1019	 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode.");
1020
1021      ir->lod_info.grad.dPdx->accept(this);
1022      fs_reg dPdx = this->result;
1023
1024      ir->lod_info.grad.dPdy->accept(this);
1025      fs_reg dPdy = this->result;
1026
1027      /* Load dPdx and the coordinate together:
1028       * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
1029       */
1030      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1031	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
1032	 coordinate.reg_offset++;
1033	 mlen += reg_width;
1034
1035	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx);
1036	 dPdx.reg_offset++;
1037	 mlen += reg_width;
1038
1039	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy);
1040	 dPdy.reg_offset++;
1041	 mlen += reg_width;
1042      }
1043      break;
1044   }
1045   case ir_txs:
1046      ir->lod_info.lod->accept(this);
1047      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result);
1048      mlen += reg_width;
1049      break;
1050   case ir_txf:
1051      /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */
1052      emit(BRW_OPCODE_MOV,
1053	   fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate);
1054      coordinate.reg_offset++;
1055      mlen += reg_width;
1056
1057      ir->lod_info.lod->accept(this);
1058      emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result);
1059      mlen += reg_width;
1060
1061      for (int i = 1; i < ir->coordinate->type->vector_elements; i++) {
1062	 emit(BRW_OPCODE_MOV,
1063	      fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate);
1064	 coordinate.reg_offset++;
1065	 mlen += reg_width;
1066      }
1067      break;
1068   }
1069
1070   /* Set up the coordinate (except for cases where it was done above) */
1071   if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) {
1072      for (int i = 0; i < ir->coordinate->type->vector_elements; i++) {
1073	 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate);
1074	 coordinate.reg_offset++;
1075	 mlen += reg_width;
1076      }
1077   }
1078
1079   /* Generate the SEND */
1080   fs_inst *inst = NULL;
1081   switch (ir->op) {
1082   case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break;
1083   case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break;
1084   case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break;
1085   case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break;
1086   case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break;
1087   case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break;
1088   }
1089   inst->base_mrf = base_mrf;
1090   inst->mlen = mlen;
1091   inst->header_present = header_present;
1092
1093   if (mlen > 11) {
1094      fail("Message length >11 disallowed by hardware\n");
1095   }
1096
1097   return inst;
1098}
1099
1100void
1101fs_visitor::visit(ir_texture *ir)
1102{
1103   fs_inst *inst = NULL;
1104
1105   int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base);
1106   sampler = fp->Base.SamplerUnits[sampler];
1107
1108   /* Our hardware doesn't have a sample_d_c message, so shadow compares
1109    * for textureGrad/TXD need to be emulated with instructions.
1110    */
1111   bool hw_compare_supported = ir->op != ir_txd;
1112   if (ir->shadow_comparitor && !hw_compare_supported) {
1113      assert(c->key.tex.compare_funcs[sampler] != GL_NONE);
1114      /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */
1115      if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS)
1116	 return swizzle_result(ir, fs_reg(1.0f), sampler);
1117      else if (c->key.tex.compare_funcs[sampler] == GL_NEVER)
1118	 return swizzle_result(ir, fs_reg(0.0f), sampler);
1119   }
1120
1121   if (ir->coordinate)
1122      ir->coordinate->accept(this);
1123   fs_reg coordinate = this->result;
1124
1125   if (ir->offset != NULL) {
1126      uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant());
1127
1128      /* Explicitly set up the message header by copying g0 to msg reg m1. */
1129      emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD),
1130	   fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD)));
1131
1132      /* Then set the offset bits in DWord 2 of the message header. */
1133      emit(BRW_OPCODE_MOV,
1134	   fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2),
1135			 BRW_REGISTER_TYPE_UD)),
1136	   fs_reg(brw_imm_uw(offset_bits)));
1137   }
1138
1139   /* Should be lowered by do_lower_texture_projection */
1140   assert(!ir->projector);
1141
1142   bool needs_gl_clamp = true;
1143
1144   fs_reg scale_x, scale_y;
1145
1146   /* The 965 requires the EU to do the normalization of GL rectangle
1147    * texture coordinates.  We use the program parameter state
1148    * tracking to get the scaling factor.
1149    */
1150   if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT &&
1151       (intel->gen < 6 ||
1152	(intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) ||
1153			     c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) {
1154      struct gl_program_parameter_list *params = c->fp->program.Base.Parameters;
1155      int tokens[STATE_LENGTH] = {
1156	 STATE_INTERNAL,
1157	 STATE_TEXRECT_SCALE,
1158	 sampler,
1159	 0,
1160	 0
1161      };
1162
1163      if (c->dispatch_width == 16) {
1164	 fail("rectangle scale uniform setup not supported on 16-wide\n");
1165	 this->result = fs_reg(this, ir->type);
1166	 return;
1167      }
1168
1169      c->prog_data.param_convert[c->prog_data.nr_params] =
1170	 PARAM_NO_CONVERT;
1171      c->prog_data.param_convert[c->prog_data.nr_params + 1] =
1172	 PARAM_NO_CONVERT;
1173
1174      scale_x = fs_reg(UNIFORM, c->prog_data.nr_params);
1175      scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1);
1176
1177      GLuint index = _mesa_add_state_reference(params,
1178					       (gl_state_index *)tokens);
1179
1180      this->param_index[c->prog_data.nr_params] = index;
1181      this->param_offset[c->prog_data.nr_params] = 0;
1182      c->prog_data.nr_params++;
1183      this->param_index[c->prog_data.nr_params] = index;
1184      this->param_offset[c->prog_data.nr_params] = 1;
1185      c->prog_data.nr_params++;
1186   }
1187
1188   /* The 965 requires the EU to do the normalization of GL rectangle
1189    * texture coordinates.  We use the program parameter state
1190    * tracking to get the scaling factor.
1191    */
1192   if (intel->gen < 6 &&
1193       ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1194      fs_reg dst = fs_reg(this, ir->coordinate->type);
1195      fs_reg src = coordinate;
1196      coordinate = dst;
1197
1198      emit(BRW_OPCODE_MUL, dst, src, scale_x);
1199      dst.reg_offset++;
1200      src.reg_offset++;
1201      emit(BRW_OPCODE_MUL, dst, src, scale_y);
1202   } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) {
1203      /* On gen6+, the sampler handles the rectangle coordinates
1204       * natively, without needing rescaling.  But that means we have
1205       * to do GL_CLAMP clamping at the [0, width], [0, height] scale,
1206       * not [0, 1] like the default case below.
1207       */
1208      needs_gl_clamp = false;
1209
1210      for (int i = 0; i < 2; i++) {
1211	 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) {
1212	    fs_reg chan = coordinate;
1213	    chan.reg_offset += i;
1214
1215	    inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0));
1216	    inst->conditional_mod = BRW_CONDITIONAL_G;
1217
1218	    /* Our parameter comes in as 1.0/width or 1.0/height,
1219	     * because that's what people normally want for doing
1220	     * texture rectangle handling.  We need width or height
1221	     * for clamping, but we don't care enough to make a new
1222	     * parameter type, so just invert back.
1223	     */
1224	    fs_reg limit = fs_reg(this, glsl_type::float_type);
1225	    emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y);
1226	    emit(SHADER_OPCODE_RCP, limit, limit);
1227
1228	    inst = emit(BRW_OPCODE_SEL, chan, chan, limit);
1229	    inst->conditional_mod = BRW_CONDITIONAL_L;
1230	 }
1231      }
1232   }
1233
1234   if (ir->coordinate && needs_gl_clamp) {
1235      for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) {
1236	 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) {
1237	    fs_reg chan = coordinate;
1238	    chan.reg_offset += i;
1239
1240	    fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan);
1241	    inst->saturate = true;
1242	 }
1243      }
1244   }
1245
1246   /* Writemasking doesn't eliminate channels on SIMD8 texture
1247    * samples, so don't worry about them.
1248    */
1249   fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1));
1250
1251   if (intel->gen >= 7) {
1252      inst = emit_texture_gen7(ir, dst, coordinate, sampler);
1253   } else if (intel->gen >= 5) {
1254      inst = emit_texture_gen5(ir, dst, coordinate, sampler);
1255   } else {
1256      inst = emit_texture_gen4(ir, dst, coordinate, sampler);
1257   }
1258
1259   /* If there's an offset, we already set up m1.  To avoid the implied move,
1260    * use the null register.  Otherwise, we want an implied move from g0.
1261    */
1262   if (ir->offset != NULL || !inst->header_present)
1263      inst->src[0] = reg_undef;
1264   else
1265      inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW));
1266
1267   inst->sampler = sampler;
1268
1269   if (ir->shadow_comparitor) {
1270      if (hw_compare_supported) {
1271	 inst->shadow_compare = true;
1272      } else {
1273	 ir->shadow_comparitor->accept(this);
1274	 fs_reg ref = this->result;
1275
1276	 fs_reg value = dst;
1277	 dst = fs_reg(this, glsl_type::vec4_type);
1278
1279	 /* FINISHME: This needs to be done pre-filtering. */
1280
1281	 uint32_t conditional = 0;
1282	 switch (c->key.tex.compare_funcs[sampler]) {
1283	 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */
1284	 case GL_LESS:     conditional = BRW_CONDITIONAL_L;   break;
1285	 case GL_GREATER:  conditional = BRW_CONDITIONAL_G;   break;
1286	 case GL_LEQUAL:   conditional = BRW_CONDITIONAL_LE;  break;
1287	 case GL_GEQUAL:   conditional = BRW_CONDITIONAL_GE;  break;
1288	 case GL_EQUAL:    conditional = BRW_CONDITIONAL_EQ;  break;
1289	 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break;
1290	 default: assert(!"Should not get here: bad shadow compare function");
1291	 }
1292
1293	 /* Use conditional moves to load 0 or 1 as the result */
1294	 this->current_annotation = "manual shadow comparison";
1295	 for (int i = 0; i < 4; i++) {
1296	    inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f));
1297
1298	    inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value);
1299	    inst->conditional_mod = conditional;
1300
1301	    inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f));
1302	    inst->predicated = true;
1303
1304	    dst.reg_offset++;
1305	    value.reg_offset++;
1306	 }
1307	 dst.reg_offset = 0;
1308      }
1309   }
1310
1311   swizzle_result(ir, dst, sampler);
1312}
1313
1314/**
1315 * Swizzle the result of a texture result.  This is necessary for
1316 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons.
1317 */
1318void
1319fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler)
1320{
1321   this->result = orig_val;
1322
1323   if (ir->op == ir_txs)
1324      return;
1325
1326   if (ir->type == glsl_type::float_type) {
1327      /* Ignore DEPTH_TEXTURE_MODE swizzling. */
1328      assert(ir->sampler->type->sampler_shadow);
1329   } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) {
1330      fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type);
1331
1332      for (int i = 0; i < 4; i++) {
1333	 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i);
1334	 fs_reg l = swizzled_result;
1335	 l.reg_offset += i;
1336
1337	 if (swiz == SWIZZLE_ZERO) {
1338	    emit(BRW_OPCODE_MOV, l, fs_reg(0.0f));
1339	 } else if (swiz == SWIZZLE_ONE) {
1340	    emit(BRW_OPCODE_MOV, l, fs_reg(1.0f));
1341	 } else {
1342	    fs_reg r = orig_val;
1343	    r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i);
1344	    emit(BRW_OPCODE_MOV, l, r);
1345	 }
1346      }
1347      this->result = swizzled_result;
1348   }
1349}
1350
1351void
1352fs_visitor::visit(ir_swizzle *ir)
1353{
1354   ir->val->accept(this);
1355   fs_reg val = this->result;
1356
1357   if (ir->type->vector_elements == 1) {
1358      this->result.reg_offset += ir->mask.x;
1359      return;
1360   }
1361
1362   fs_reg result = fs_reg(this, ir->type);
1363   this->result = result;
1364
1365   for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
1366      fs_reg channel = val;
1367      int swiz = 0;
1368
1369      switch (i) {
1370      case 0:
1371	 swiz = ir->mask.x;
1372	 break;
1373      case 1:
1374	 swiz = ir->mask.y;
1375	 break;
1376      case 2:
1377	 swiz = ir->mask.z;
1378	 break;
1379      case 3:
1380	 swiz = ir->mask.w;
1381	 break;
1382      }
1383
1384      channel.reg_offset += swiz;
1385      emit(BRW_OPCODE_MOV, result, channel);
1386      result.reg_offset++;
1387   }
1388}
1389
1390void
1391fs_visitor::visit(ir_discard *ir)
1392{
1393   assert(ir->condition == NULL); /* FINISHME */
1394
1395   emit(FS_OPCODE_DISCARD);
1396   kill_emitted = true;
1397}
1398
1399void
1400fs_visitor::visit(ir_constant *ir)
1401{
1402   /* Set this->result to reg at the bottom of the function because some code
1403    * paths will cause this visitor to be applied to other fields.  This will
1404    * cause the value stored in this->result to be modified.
1405    *
1406    * Make reg constant so that it doesn't get accidentally modified along the
1407    * way.  Yes, I actually had this problem. :(
1408    */
1409   const fs_reg reg(this, ir->type);
1410   fs_reg dst_reg = reg;
1411
1412   if (ir->type->is_array()) {
1413      const unsigned size = type_size(ir->type->fields.array);
1414
1415      for (unsigned i = 0; i < ir->type->length; i++) {
1416	 ir->array_elements[i]->accept(this);
1417	 fs_reg src_reg = this->result;
1418
1419	 dst_reg.type = src_reg.type;
1420	 for (unsigned j = 0; j < size; j++) {
1421	    emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1422	    src_reg.reg_offset++;
1423	    dst_reg.reg_offset++;
1424	 }
1425      }
1426   } else if (ir->type->is_record()) {
1427      foreach_list(node, &ir->components) {
1428	 ir_constant *const field = (ir_constant *) node;
1429	 const unsigned size = type_size(field->type);
1430
1431	 field->accept(this);
1432	 fs_reg src_reg = this->result;
1433
1434	 dst_reg.type = src_reg.type;
1435	 for (unsigned j = 0; j < size; j++) {
1436	    emit(BRW_OPCODE_MOV, dst_reg, src_reg);
1437	    src_reg.reg_offset++;
1438	    dst_reg.reg_offset++;
1439	 }
1440      }
1441   } else {
1442      const unsigned size = type_size(ir->type);
1443
1444      for (unsigned i = 0; i < size; i++) {
1445	 switch (ir->type->base_type) {
1446	 case GLSL_TYPE_FLOAT:
1447	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i]));
1448	    break;
1449	 case GLSL_TYPE_UINT:
1450	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i]));
1451	    break;
1452	 case GLSL_TYPE_INT:
1453	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i]));
1454	    break;
1455	 case GLSL_TYPE_BOOL:
1456	    emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i]));
1457	    break;
1458	 default:
1459	    assert(!"Non-float/uint/int/bool constant");
1460	 }
1461	 dst_reg.reg_offset++;
1462      }
1463   }
1464
1465   this->result = reg;
1466}
1467
1468void
1469fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
1470{
1471   ir_expression *expr = ir->as_expression();
1472
1473   if (expr) {
1474      fs_reg op[2];
1475      fs_inst *inst;
1476
1477      assert(expr->get_num_operands() <= 2);
1478      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1479	 assert(expr->operands[i]->type->is_scalar());
1480
1481	 expr->operands[i]->accept(this);
1482	 op[i] = this->result;
1483
1484	 resolve_ud_negate(&op[i]);
1485      }
1486
1487      switch (expr->operation) {
1488      case ir_unop_logic_not:
1489	 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1));
1490	 inst->conditional_mod = BRW_CONDITIONAL_Z;
1491	 break;
1492
1493      case ir_binop_logic_xor:
1494	 inst = emit(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]);
1495	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1496	 break;
1497
1498      case ir_binop_logic_or:
1499	 inst = emit(BRW_OPCODE_OR, reg_null_d, op[0], op[1]);
1500	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1501	 break;
1502
1503      case ir_binop_logic_and:
1504	 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], op[1]);
1505	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1506	 break;
1507
1508      case ir_unop_f2b:
1509	 if (intel->gen >= 6) {
1510	    inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f));
1511	 } else {
1512	    inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]);
1513	 }
1514	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1515	 break;
1516
1517      case ir_unop_i2b:
1518	 if (intel->gen >= 6) {
1519	    inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0));
1520	 } else {
1521	    inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]);
1522	 }
1523	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1524	 break;
1525
1526      case ir_binop_greater:
1527      case ir_binop_gequal:
1528      case ir_binop_less:
1529      case ir_binop_lequal:
1530      case ir_binop_equal:
1531      case ir_binop_all_equal:
1532      case ir_binop_nequal:
1533      case ir_binop_any_nequal:
1534	 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]);
1535	 inst->conditional_mod =
1536	    brw_conditional_for_comparison(expr->operation);
1537	 break;
1538
1539      default:
1540	 assert(!"not reached");
1541	 fail("bad cond code\n");
1542	 break;
1543      }
1544      return;
1545   }
1546
1547   ir->accept(this);
1548
1549   if (intel->gen >= 6) {
1550      fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1));
1551      inst->conditional_mod = BRW_CONDITIONAL_NZ;
1552   } else {
1553      fs_inst *inst = emit(BRW_OPCODE_MOV, reg_null_d, this->result);
1554      inst->conditional_mod = BRW_CONDITIONAL_NZ;
1555   }
1556}
1557
1558/**
1559 * Emit a gen6 IF statement with the comparison folded into the IF
1560 * instruction.
1561 */
1562void
1563fs_visitor::emit_if_gen6(ir_if *ir)
1564{
1565   ir_expression *expr = ir->condition->as_expression();
1566
1567   if (expr) {
1568      fs_reg op[2];
1569      fs_inst *inst;
1570      fs_reg temp;
1571
1572      assert(expr->get_num_operands() <= 2);
1573      for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
1574	 assert(expr->operands[i]->type->is_scalar());
1575
1576	 expr->operands[i]->accept(this);
1577	 op[i] = this->result;
1578      }
1579
1580      switch (expr->operation) {
1581      case ir_unop_logic_not:
1582	 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0));
1583	 inst->conditional_mod = BRW_CONDITIONAL_Z;
1584	 return;
1585
1586      case ir_binop_logic_xor:
1587	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1588	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1589	 return;
1590
1591      case ir_binop_logic_or:
1592	 temp = fs_reg(this, glsl_type::bool_type);
1593	 emit(BRW_OPCODE_OR, temp, op[0], op[1]);
1594	 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1595	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1596	 return;
1597
1598      case ir_binop_logic_and:
1599	 temp = fs_reg(this, glsl_type::bool_type);
1600	 emit(BRW_OPCODE_AND, temp, op[0], op[1]);
1601	 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0));
1602	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1603	 return;
1604
1605      case ir_unop_f2b:
1606	 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0));
1607	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1608	 return;
1609
1610      case ir_unop_i2b:
1611	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1612	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1613	 return;
1614
1615      case ir_binop_greater:
1616      case ir_binop_gequal:
1617      case ir_binop_less:
1618      case ir_binop_lequal:
1619      case ir_binop_equal:
1620      case ir_binop_all_equal:
1621      case ir_binop_nequal:
1622      case ir_binop_any_nequal:
1623	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]);
1624	 inst->conditional_mod =
1625	    brw_conditional_for_comparison(expr->operation);
1626	 return;
1627      default:
1628	 assert(!"not reached");
1629	 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0));
1630	 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1631	 fail("bad condition\n");
1632	 return;
1633      }
1634      return;
1635   }
1636
1637   ir->condition->accept(this);
1638
1639   fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0));
1640   inst->conditional_mod = BRW_CONDITIONAL_NZ;
1641}
1642
1643void
1644fs_visitor::visit(ir_if *ir)
1645{
1646   fs_inst *inst;
1647
1648   if (intel->gen < 6 && c->dispatch_width == 16) {
1649      fail("Can't support (non-uniform) control flow on 16-wide\n");
1650   }
1651
1652   /* Don't point the annotation at the if statement, because then it plus
1653    * the then and else blocks get printed.
1654    */
1655   this->base_ir = ir->condition;
1656
1657   if (intel->gen == 6) {
1658      emit_if_gen6(ir);
1659   } else {
1660      emit_bool_to_cond_code(ir->condition);
1661
1662      inst = emit(BRW_OPCODE_IF);
1663      inst->predicated = true;
1664   }
1665
1666   foreach_list(node, &ir->then_instructions) {
1667      ir_instruction *ir = (ir_instruction *)node;
1668      this->base_ir = ir;
1669
1670      ir->accept(this);
1671   }
1672
1673   if (!ir->else_instructions.is_empty()) {
1674      emit(BRW_OPCODE_ELSE);
1675
1676      foreach_list(node, &ir->else_instructions) {
1677	 ir_instruction *ir = (ir_instruction *)node;
1678	 this->base_ir = ir;
1679
1680	 ir->accept(this);
1681      }
1682   }
1683
1684   emit(BRW_OPCODE_ENDIF);
1685}
1686
1687void
1688fs_visitor::visit(ir_loop *ir)
1689{
1690   fs_reg counter = reg_undef;
1691
1692   if (intel->gen < 6 && c->dispatch_width == 16) {
1693      fail("Can't support (non-uniform) control flow on 16-wide\n");
1694   }
1695
1696   if (ir->counter) {
1697      this->base_ir = ir->counter;
1698      ir->counter->accept(this);
1699      counter = *(variable_storage(ir->counter));
1700
1701      if (ir->from) {
1702	 this->base_ir = ir->from;
1703	 ir->from->accept(this);
1704
1705	 emit(BRW_OPCODE_MOV, counter, this->result);
1706      }
1707   }
1708
1709   emit(BRW_OPCODE_DO);
1710
1711   if (ir->to) {
1712      this->base_ir = ir->to;
1713      ir->to->accept(this);
1714
1715      fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result);
1716      inst->conditional_mod = brw_conditional_for_comparison(ir->cmp);
1717
1718      inst = emit(BRW_OPCODE_BREAK);
1719      inst->predicated = true;
1720   }
1721
1722   foreach_list(node, &ir->body_instructions) {
1723      ir_instruction *ir = (ir_instruction *)node;
1724
1725      this->base_ir = ir;
1726      ir->accept(this);
1727   }
1728
1729   if (ir->increment) {
1730      this->base_ir = ir->increment;
1731      ir->increment->accept(this);
1732      emit(BRW_OPCODE_ADD, counter, counter, this->result);
1733   }
1734
1735   emit(BRW_OPCODE_WHILE);
1736}
1737
1738void
1739fs_visitor::visit(ir_loop_jump *ir)
1740{
1741   switch (ir->mode) {
1742   case ir_loop_jump::jump_break:
1743      emit(BRW_OPCODE_BREAK);
1744      break;
1745   case ir_loop_jump::jump_continue:
1746      emit(BRW_OPCODE_CONTINUE);
1747      break;
1748   }
1749}
1750
1751void
1752fs_visitor::visit(ir_call *ir)
1753{
1754   assert(!"FINISHME");
1755}
1756
1757void
1758fs_visitor::visit(ir_return *ir)
1759{
1760   assert(!"FINISHME");
1761}
1762
1763void
1764fs_visitor::visit(ir_function *ir)
1765{
1766   /* Ignore function bodies other than main() -- we shouldn't see calls to
1767    * them since they should all be inlined before we get to ir_to_mesa.
1768    */
1769   if (strcmp(ir->name, "main") == 0) {
1770      const ir_function_signature *sig;
1771      exec_list empty;
1772
1773      sig = ir->matching_signature(&empty);
1774
1775      assert(sig);
1776
1777      foreach_list(node, &sig->body) {
1778	 ir_instruction *ir = (ir_instruction *)node;
1779	 this->base_ir = ir;
1780
1781	 ir->accept(this);
1782      }
1783   }
1784}
1785
1786void
1787fs_visitor::visit(ir_function_signature *ir)
1788{
1789   assert(!"not reached");
1790   (void)ir;
1791}
1792
1793fs_inst *
1794fs_visitor::emit(fs_inst inst)
1795{
1796   fs_inst *list_inst = new(mem_ctx) fs_inst;
1797   *list_inst = inst;
1798
1799   if (force_uncompressed_stack > 0)
1800      list_inst->force_uncompressed = true;
1801   else if (force_sechalf_stack > 0)
1802      list_inst->force_sechalf = true;
1803
1804   list_inst->annotation = this->current_annotation;
1805   list_inst->ir = this->base_ir;
1806
1807   this->instructions.push_tail(list_inst);
1808
1809   return list_inst;
1810}
1811
1812/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */
1813void
1814fs_visitor::emit_dummy_fs()
1815{
1816   int reg_width = c->dispatch_width / 8;
1817
1818   /* Everyone's favorite color. */
1819   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f));
1820   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f));
1821   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f));
1822   emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f));
1823
1824   fs_inst *write;
1825   write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0));
1826   write->base_mrf = 2;
1827   write->mlen = 4 * reg_width;
1828   write->eot = true;
1829}
1830
1831/* The register location here is relative to the start of the URB
1832 * data.  It will get adjusted to be a real location before
1833 * generate_code() time.
1834 */
1835struct brw_reg
1836fs_visitor::interp_reg(int location, int channel)
1837{
1838   int regnr = urb_setup[location] * 2 + channel / 2;
1839   int stride = (channel & 1) * 4;
1840
1841   assert(urb_setup[location] != -1);
1842
1843   return brw_vec1_grf(regnr, stride);
1844}
1845
1846/** Emits the interpolation for the varying inputs. */
1847void
1848fs_visitor::emit_interpolation_setup_gen4()
1849{
1850   this->current_annotation = "compute pixel centers";
1851   this->pixel_x = fs_reg(this, glsl_type::uint_type);
1852   this->pixel_y = fs_reg(this, glsl_type::uint_type);
1853   this->pixel_x.type = BRW_REGISTER_TYPE_UW;
1854   this->pixel_y.type = BRW_REGISTER_TYPE_UW;
1855
1856   emit(FS_OPCODE_PIXEL_X, this->pixel_x);
1857   emit(FS_OPCODE_PIXEL_Y, this->pixel_y);
1858
1859   this->current_annotation = "compute pixel deltas from v0";
1860   if (brw->has_pln) {
1861      this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1862         fs_reg(this, glsl_type::vec2_type);
1863      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1864         this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC];
1865      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++;
1866   } else {
1867      this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1868         fs_reg(this, glsl_type::float_type);
1869      this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] =
1870         fs_reg(this, glsl_type::float_type);
1871   }
1872   emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1873	this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0))));
1874   emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1875	this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1))));
1876
1877   this->current_annotation = "compute pos.w and 1/pos.w";
1878   /* Compute wpos.w.  It's always in our setup, since it's needed to
1879    * interpolate the other attributes.
1880    */
1881   this->wpos_w = fs_reg(this, glsl_type::float_type);
1882   emit(FS_OPCODE_LINTERP, wpos_w,
1883        this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1884        this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1885	interp_reg(FRAG_ATTRIB_WPOS, 3));
1886   /* Compute the pixel 1/W value from wpos.w. */
1887   this->pixel_w = fs_reg(this, glsl_type::float_type);
1888   emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w);
1889   this->current_annotation = NULL;
1890}
1891
1892/** Emits the interpolation for the varying inputs. */
1893void
1894fs_visitor::emit_interpolation_setup_gen6()
1895{
1896   struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW);
1897
1898   /* If the pixel centers end up used, the setup is the same as for gen4. */
1899   this->current_annotation = "compute pixel centers";
1900   fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type);
1901   fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type);
1902   int_pixel_x.type = BRW_REGISTER_TYPE_UW;
1903   int_pixel_y.type = BRW_REGISTER_TYPE_UW;
1904   emit(BRW_OPCODE_ADD,
1905	int_pixel_x,
1906	fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)),
1907	fs_reg(brw_imm_v(0x10101010)));
1908   emit(BRW_OPCODE_ADD,
1909	int_pixel_y,
1910	fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)),
1911	fs_reg(brw_imm_v(0x11001100)));
1912
1913   /* As of gen6, we can no longer mix float and int sources.  We have
1914    * to turn the integer pixel centers into floats for their actual
1915    * use.
1916    */
1917   this->pixel_x = fs_reg(this, glsl_type::float_type);
1918   this->pixel_y = fs_reg(this, glsl_type::float_type);
1919   emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x);
1920   emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y);
1921
1922   this->current_annotation = "compute pos.w";
1923   this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0));
1924   this->wpos_w = fs_reg(this, glsl_type::float_type);
1925   emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w);
1926
1927   for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
1928      uint8_t reg = c->barycentric_coord_reg[i];
1929      this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0));
1930      this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0));
1931   }
1932
1933   this->current_annotation = NULL;
1934}
1935
1936void
1937fs_visitor::emit_color_write(int target, int index, int first_color_mrf)
1938{
1939   int reg_width = c->dispatch_width / 8;
1940   fs_inst *inst;
1941   fs_reg color = outputs[target];
1942   fs_reg mrf;
1943
1944   /* If there's no color data to be written, skip it. */
1945   if (color.file == BAD_FILE)
1946      return;
1947
1948   color.reg_offset += index;
1949
1950   if (c->dispatch_width == 8 || intel->gen >= 6) {
1951      /* SIMD8 write looks like:
1952       * m + 0: r0
1953       * m + 1: r1
1954       * m + 2: g0
1955       * m + 3: g1
1956       *
1957       * gen6 SIMD16 DP write looks like:
1958       * m + 0: r0
1959       * m + 1: r1
1960       * m + 2: g0
1961       * m + 3: g1
1962       * m + 4: b0
1963       * m + 5: b1
1964       * m + 6: a0
1965       * m + 7: a1
1966       */
1967      inst = emit(BRW_OPCODE_MOV,
1968		  fs_reg(MRF, first_color_mrf + index * reg_width, color.type),
1969		  color);
1970      inst->saturate = c->key.clamp_fragment_color;
1971   } else {
1972      /* pre-gen6 SIMD16 single source DP write looks like:
1973       * m + 0: r0
1974       * m + 1: g0
1975       * m + 2: b0
1976       * m + 3: a0
1977       * m + 4: r1
1978       * m + 5: g1
1979       * m + 6: b1
1980       * m + 7: a1
1981       */
1982      if (brw->has_compr4) {
1983	 /* By setting the high bit of the MRF register number, we
1984	  * indicate that we want COMPR4 mode - instead of doing the
1985	  * usual destination + 1 for the second half we get
1986	  * destination + 4.
1987	  */
1988	 inst = emit(BRW_OPCODE_MOV,
1989		     fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index,
1990			    color.type),
1991		     color);
1992	 inst->saturate = c->key.clamp_fragment_color;
1993      } else {
1994	 push_force_uncompressed();
1995	 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index,
1996					    color.type),
1997		     color);
1998	 inst->saturate = c->key.clamp_fragment_color;
1999	 pop_force_uncompressed();
2000
2001	 push_force_sechalf();
2002	 color.sechalf = true;
2003	 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4,
2004					    color.type),
2005		     color);
2006	 inst->saturate = c->key.clamp_fragment_color;
2007	 pop_force_sechalf();
2008	 color.sechalf = false;
2009      }
2010   }
2011}
2012
2013void
2014fs_visitor::emit_fb_writes()
2015{
2016   this->current_annotation = "FB write header";
2017   bool header_present = true;
2018   int base_mrf = 2;
2019   int nr = base_mrf;
2020   int reg_width = c->dispatch_width / 8;
2021
2022   if (intel->gen >= 6 &&
2023       !this->kill_emitted &&
2024       c->key.nr_color_regions == 1) {
2025      header_present = false;
2026   }
2027
2028   if (header_present) {
2029      /* m2, m3 header */
2030      nr += 2;
2031   }
2032
2033   if (c->aa_dest_stencil_reg) {
2034      push_force_uncompressed();
2035      emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++),
2036	   fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0)));
2037      pop_force_uncompressed();
2038   }
2039
2040   /* Reserve space for color. It'll be filled in per MRT below. */
2041   int color_mrf = nr;
2042   nr += 4 * reg_width;
2043
2044   if (c->source_depth_to_render_target) {
2045      if (intel->gen == 6 && c->dispatch_width == 16) {
2046	 /* For outputting oDepth on gen6, SIMD8 writes have to be
2047	  * used.  This would require 8-wide moves of each half to
2048	  * message regs, kind of like pre-gen5 SIMD16 FB writes.
2049	  * Just bail on doing so for now.
2050	  */
2051	 fail("Missing support for simd16 depth writes on gen6\n");
2052      }
2053
2054      if (c->computes_depth) {
2055	 /* Hand over gl_FragDepth. */
2056	 assert(this->frag_depth);
2057	 fs_reg depth = *(variable_storage(this->frag_depth));
2058
2059	 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth);
2060      } else {
2061	 /* Pass through the payload depth. */
2062	 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2063	      fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
2064      }
2065      nr += reg_width;
2066   }
2067
2068   if (c->dest_depth_reg) {
2069      emit(BRW_OPCODE_MOV, fs_reg(MRF, nr),
2070	   fs_reg(brw_vec8_grf(c->dest_depth_reg, 0)));
2071      nr += reg_width;
2072   }
2073
2074   for (int target = 0; target < c->key.nr_color_regions; target++) {
2075      this->current_annotation = ralloc_asprintf(this->mem_ctx,
2076						 "FB write target %d",
2077						 target);
2078      for (int i = 0; i < 4; i++)
2079	 emit_color_write(target, i, color_mrf);
2080
2081      fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2082      inst->target = target;
2083      inst->base_mrf = base_mrf;
2084      inst->mlen = nr - base_mrf;
2085      if (target == c->key.nr_color_regions - 1)
2086	 inst->eot = true;
2087      inst->header_present = header_present;
2088   }
2089
2090   if (c->key.nr_color_regions == 0) {
2091      if (c->key.alpha_test) {
2092	 /* If the alpha test is enabled but there's no color buffer,
2093	  * we still need to send alpha out the pipeline to our null
2094	  * renderbuffer.
2095	  */
2096	 emit_color_write(0, 3, color_mrf);
2097      }
2098
2099      fs_inst *inst = emit(FS_OPCODE_FB_WRITE);
2100      inst->base_mrf = base_mrf;
2101      inst->mlen = nr - base_mrf;
2102      inst->eot = true;
2103      inst->header_present = header_present;
2104   }
2105
2106   this->current_annotation = NULL;
2107}
2108
2109void
2110fs_visitor::resolve_ud_negate(fs_reg *reg)
2111{
2112   if (reg->type != BRW_REGISTER_TYPE_UD ||
2113       !reg->negate)
2114      return;
2115
2116   fs_reg temp = fs_reg(this, glsl_type::uint_type);
2117   emit(BRW_OPCODE_MOV, temp, *reg);
2118   *reg = temp;
2119}
2120