brw_fs_visitor.cpp revision 01044fce6b3de11635ea5078b76ffee1a33b3802
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_visitor.cpp 25 * 26 * This file supports generating the FS LIR from the GLSL IR. The LIR 27 * makes it easier to do backend-specific optimizations than doing so 28 * in the GLSL IR or in the native code. 29 */ 30extern "C" { 31 32#include <sys/types.h> 33 34#include "main/macros.h" 35#include "main/shaderobj.h" 36#include "main/uniforms.h" 37#include "program/prog_parameter.h" 38#include "program/prog_print.h" 39#include "program/prog_optimize.h" 40#include "program/register_allocate.h" 41#include "program/sampler.h" 42#include "program/hash_table.h" 43#include "brw_context.h" 44#include "brw_eu.h" 45#include "brw_wm.h" 46} 47#include "brw_shader.h" 48#include "brw_fs.h" 49#include "glsl/glsl_types.h" 50#include "glsl/ir_optimization.h" 51#include "glsl/ir_print_visitor.h" 52 53void 54fs_visitor::visit(ir_variable *ir) 55{ 56 fs_reg *reg = NULL; 57 58 if (variable_storage(ir)) 59 return; 60 61 if (ir->mode == ir_var_in) { 62 if (!strcmp(ir->name, "gl_FragCoord")) { 63 reg = emit_fragcoord_interpolation(ir); 64 } else if (!strcmp(ir->name, "gl_FrontFacing")) { 65 reg = emit_frontfacing_interpolation(ir); 66 } else { 67 reg = emit_general_interpolation(ir); 68 } 69 assert(reg); 70 hash_table_insert(this->variable_ht, reg, ir); 71 return; 72 } else if (ir->mode == ir_var_out) { 73 reg = new(this->mem_ctx) fs_reg(this, ir->type); 74 75 if (ir->location == FRAG_RESULT_COLOR) { 76 /* Writing gl_FragColor outputs to all color regions. */ 77 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) { 78 this->outputs[i] = *reg; 79 } 80 } else if (ir->location == FRAG_RESULT_DEPTH) { 81 this->frag_depth = ir; 82 } else { 83 /* gl_FragData or a user-defined FS output */ 84 assert(ir->location >= FRAG_RESULT_DATA0 && 85 ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS); 86 87 /* General color output. */ 88 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) { 89 int output = ir->location - FRAG_RESULT_DATA0 + i; 90 this->outputs[output] = *reg; 91 this->outputs[output].reg_offset += 4 * i; 92 } 93 } 94 } else if (ir->mode == ir_var_uniform) { 95 int param_index = c->prog_data.nr_params; 96 97 if (c->dispatch_width == 16) { 98 if (!variable_storage(ir)) { 99 fail("Failed to find uniform '%s' in 16-wide\n", ir->name); 100 } 101 return; 102 } 103 104 if (!strncmp(ir->name, "gl_", 3)) { 105 setup_builtin_uniform_values(ir); 106 } else { 107 setup_uniform_values(ir->location, ir->type); 108 } 109 110 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); 111 reg->type = brw_type_for_base_type(ir->type); 112 } 113 114 if (!reg) 115 reg = new(this->mem_ctx) fs_reg(this, ir->type); 116 117 hash_table_insert(this->variable_ht, reg, ir); 118} 119 120void 121fs_visitor::visit(ir_dereference_variable *ir) 122{ 123 fs_reg *reg = variable_storage(ir->var); 124 this->result = *reg; 125} 126 127void 128fs_visitor::visit(ir_dereference_record *ir) 129{ 130 const glsl_type *struct_type = ir->record->type; 131 132 ir->record->accept(this); 133 134 unsigned int offset = 0; 135 for (unsigned int i = 0; i < struct_type->length; i++) { 136 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 137 break; 138 offset += type_size(struct_type->fields.structure[i].type); 139 } 140 this->result.reg_offset += offset; 141 this->result.type = brw_type_for_base_type(ir->type); 142} 143 144void 145fs_visitor::visit(ir_dereference_array *ir) 146{ 147 ir_constant *index; 148 int element_size; 149 150 ir->array->accept(this); 151 index = ir->array_index->as_constant(); 152 153 element_size = type_size(ir->type); 154 this->result.type = brw_type_for_base_type(ir->type); 155 156 if (index) { 157 assert(this->result.file == UNIFORM || this->result.file == GRF); 158 this->result.reg_offset += index->value.i[0] * element_size; 159 } else { 160 assert(!"FINISHME: non-constant array element"); 161 } 162} 163 164/* Instruction selection: Produce a MOV.sat instead of 165 * MIN(MAX(val, 0), 1) when possible. 166 */ 167bool 168fs_visitor::try_emit_saturate(ir_expression *ir) 169{ 170 ir_rvalue *sat_val = ir->as_rvalue_to_saturate(); 171 172 if (!sat_val) 173 return false; 174 175 sat_val->accept(this); 176 fs_reg src = this->result; 177 178 this->result = fs_reg(this, ir->type); 179 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src); 180 inst->saturate = true; 181 182 return true; 183} 184 185bool 186fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) 187{ 188 /* 3-src instructions were introduced in gen6. */ 189 if (intel->gen < 6) 190 return false; 191 192 /* MAD can only handle floating-point data. */ 193 if (ir->type != glsl_type::float_type) 194 return false; 195 196 ir_rvalue *nonmul = ir->operands[1 - mul_arg]; 197 ir_expression *mul = ir->operands[mul_arg]->as_expression(); 198 199 if (!mul || mul->operation != ir_binop_mul) 200 return false; 201 202 if (nonmul->as_constant() || 203 mul->operands[0]->as_constant() || 204 mul->operands[1]->as_constant()) 205 return false; 206 207 nonmul->accept(this); 208 fs_reg src0 = this->result; 209 210 mul->operands[0]->accept(this); 211 fs_reg src1 = this->result; 212 213 mul->operands[1]->accept(this); 214 fs_reg src2 = this->result; 215 216 this->result = fs_reg(this, ir->type); 217 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2); 218 219 return true; 220} 221 222void 223fs_visitor::visit(ir_expression *ir) 224{ 225 unsigned int operand; 226 fs_reg op[2], temp; 227 fs_inst *inst; 228 229 assert(ir->get_num_operands() <= 2); 230 231 if (try_emit_saturate(ir)) 232 return; 233 if (ir->operation == ir_binop_add) { 234 if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1)) 235 return; 236 } 237 238 for (operand = 0; operand < ir->get_num_operands(); operand++) { 239 ir->operands[operand]->accept(this); 240 if (this->result.file == BAD_FILE) { 241 ir_print_visitor v; 242 fail("Failed to get tree for expression operand:\n"); 243 ir->operands[operand]->accept(&v); 244 } 245 op[operand] = this->result; 246 247 /* Matrix expression operands should have been broken down to vector 248 * operations already. 249 */ 250 assert(!ir->operands[operand]->type->is_matrix()); 251 /* And then those vector operands should have been broken down to scalar. 252 */ 253 assert(!ir->operands[operand]->type->is_vector()); 254 } 255 256 /* Storage for our result. If our result goes into an assignment, it will 257 * just get copy-propagated out, so no worries. 258 */ 259 this->result = fs_reg(this, ir->type); 260 261 switch (ir->operation) { 262 case ir_unop_logic_not: 263 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 264 * ones complement of the whole register, not just bit 0. 265 */ 266 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)); 267 break; 268 case ir_unop_neg: 269 op[0].negate = !op[0].negate; 270 this->result = op[0]; 271 break; 272 case ir_unop_abs: 273 op[0].abs = true; 274 op[0].negate = false; 275 this->result = op[0]; 276 break; 277 case ir_unop_sign: 278 temp = fs_reg(this, ir->type); 279 280 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)); 281 282 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 283 inst->conditional_mod = BRW_CONDITIONAL_G; 284 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)); 285 inst->predicated = true; 286 287 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 288 inst->conditional_mod = BRW_CONDITIONAL_L; 289 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)); 290 inst->predicated = true; 291 292 break; 293 case ir_unop_rcp: 294 emit_math(SHADER_OPCODE_RCP, this->result, op[0]); 295 break; 296 297 case ir_unop_exp2: 298 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]); 299 break; 300 case ir_unop_log2: 301 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]); 302 break; 303 case ir_unop_exp: 304 case ir_unop_log: 305 assert(!"not reached: should be handled by ir_explog_to_explog2"); 306 break; 307 case ir_unop_sin: 308 case ir_unop_sin_reduced: 309 emit_math(SHADER_OPCODE_SIN, this->result, op[0]); 310 break; 311 case ir_unop_cos: 312 case ir_unop_cos_reduced: 313 emit_math(SHADER_OPCODE_COS, this->result, op[0]); 314 break; 315 316 case ir_unop_dFdx: 317 emit(FS_OPCODE_DDX, this->result, op[0]); 318 break; 319 case ir_unop_dFdy: 320 emit(FS_OPCODE_DDY, this->result, op[0]); 321 break; 322 323 case ir_binop_add: 324 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]); 325 break; 326 case ir_binop_sub: 327 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 328 break; 329 330 case ir_binop_mul: 331 if (ir->type->is_integer()) { 332 /* For integer multiplication, the MUL uses the low 16 bits 333 * of one of the operands (src0 on gen6, src1 on gen7). The 334 * MACH accumulates in the contribution of the upper 16 bits 335 * of that operand. 336 * 337 * FINISHME: Emit just the MUL if we know an operand is small 338 * enough. 339 */ 340 if (intel->gen >= 7 && c->dispatch_width == 16) 341 fail("16-wide explicit accumulator operands unsupported\n"); 342 343 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 344 345 emit(BRW_OPCODE_MUL, acc, op[0], op[1]); 346 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]); 347 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc)); 348 } else { 349 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]); 350 } 351 break; 352 case ir_binop_div: 353 if (intel->gen >= 7 && c->dispatch_width == 16) 354 fail("16-wide INTDIV unsupported\n"); 355 356 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 357 assert(ir->type->is_integer()); 358 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]); 359 break; 360 case ir_binop_mod: 361 if (intel->gen >= 7 && c->dispatch_width == 16) 362 fail("16-wide INTDIV unsupported\n"); 363 364 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 365 assert(ir->type->is_integer()); 366 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]); 367 break; 368 369 case ir_binop_less: 370 case ir_binop_greater: 371 case ir_binop_lequal: 372 case ir_binop_gequal: 373 case ir_binop_equal: 374 case ir_binop_all_equal: 375 case ir_binop_nequal: 376 case ir_binop_any_nequal: 377 temp = this->result; 378 /* original gen4 does implicit conversion before comparison. */ 379 if (intel->gen < 5) 380 temp.type = op[0].type; 381 382 resolve_ud_negate(&op[0]); 383 resolve_ud_negate(&op[1]); 384 385 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]); 386 inst->conditional_mod = brw_conditional_for_comparison(ir->operation); 387 emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)); 388 break; 389 390 case ir_binop_logic_xor: 391 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 392 break; 393 394 case ir_binop_logic_or: 395 emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 396 break; 397 398 case ir_binop_logic_and: 399 emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 400 break; 401 402 case ir_binop_dot: 403 case ir_unop_any: 404 assert(!"not reached: should be handled by brw_fs_channel_expressions"); 405 break; 406 407 case ir_unop_noise: 408 assert(!"not reached: should be handled by lower_noise"); 409 break; 410 411 case ir_quadop_vector: 412 assert(!"not reached: should be handled by lower_quadop_vector"); 413 break; 414 415 case ir_unop_sqrt: 416 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]); 417 break; 418 419 case ir_unop_rsq: 420 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]); 421 break; 422 423 case ir_unop_i2u: 424 op[0].type = BRW_REGISTER_TYPE_UD; 425 this->result = op[0]; 426 break; 427 case ir_unop_u2i: 428 op[0].type = BRW_REGISTER_TYPE_D; 429 this->result = op[0]; 430 break; 431 case ir_unop_i2f: 432 case ir_unop_u2f: 433 case ir_unop_b2f: 434 case ir_unop_b2i: 435 case ir_unop_f2i: 436 emit(BRW_OPCODE_MOV, this->result, op[0]); 437 break; 438 case ir_unop_f2b: 439 case ir_unop_i2b: 440 temp = this->result; 441 /* original gen4 does implicit conversion before comparison. */ 442 if (intel->gen < 5) 443 temp.type = op[0].type; 444 445 resolve_ud_negate(&op[0]); 446 447 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f)); 448 inst->conditional_mod = BRW_CONDITIONAL_NZ; 449 inst = emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(1)); 450 break; 451 452 case ir_unop_trunc: 453 emit(BRW_OPCODE_RNDZ, this->result, op[0]); 454 break; 455 case ir_unop_ceil: 456 op[0].negate = !op[0].negate; 457 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 458 this->result.negate = true; 459 break; 460 case ir_unop_floor: 461 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 462 break; 463 case ir_unop_fract: 464 inst = emit(BRW_OPCODE_FRC, this->result, op[0]); 465 break; 466 case ir_unop_round_even: 467 emit(BRW_OPCODE_RNDE, this->result, op[0]); 468 break; 469 470 case ir_binop_min: 471 resolve_ud_negate(&op[0]); 472 resolve_ud_negate(&op[1]); 473 474 if (intel->gen >= 6) { 475 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 476 inst->conditional_mod = BRW_CONDITIONAL_L; 477 } else { 478 /* Unalias the destination */ 479 this->result = fs_reg(this, ir->type); 480 481 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 482 inst->conditional_mod = BRW_CONDITIONAL_L; 483 484 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 485 inst->predicated = true; 486 } 487 break; 488 case ir_binop_max: 489 resolve_ud_negate(&op[0]); 490 resolve_ud_negate(&op[1]); 491 492 if (intel->gen >= 6) { 493 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 494 inst->conditional_mod = BRW_CONDITIONAL_GE; 495 } else { 496 /* Unalias the destination */ 497 this->result = fs_reg(this, ir->type); 498 499 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 500 inst->conditional_mod = BRW_CONDITIONAL_G; 501 502 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 503 inst->predicated = true; 504 } 505 break; 506 507 case ir_binop_pow: 508 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]); 509 break; 510 511 case ir_unop_bit_not: 512 inst = emit(BRW_OPCODE_NOT, this->result, op[0]); 513 break; 514 case ir_binop_bit_and: 515 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 516 break; 517 case ir_binop_bit_xor: 518 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 519 break; 520 case ir_binop_bit_or: 521 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 522 break; 523 524 case ir_binop_lshift: 525 inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]); 526 break; 527 528 case ir_binop_rshift: 529 if (ir->type->base_type == GLSL_TYPE_INT) 530 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]); 531 else 532 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]); 533 break; 534 } 535} 536 537void 538fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, 539 const glsl_type *type, bool predicated) 540{ 541 switch (type->base_type) { 542 case GLSL_TYPE_FLOAT: 543 case GLSL_TYPE_UINT: 544 case GLSL_TYPE_INT: 545 case GLSL_TYPE_BOOL: 546 for (unsigned int i = 0; i < type->components(); i++) { 547 l.type = brw_type_for_base_type(type); 548 r.type = brw_type_for_base_type(type); 549 550 if (predicated || !l.equals(&r)) { 551 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r); 552 inst->predicated = predicated; 553 } 554 555 l.reg_offset++; 556 r.reg_offset++; 557 } 558 break; 559 case GLSL_TYPE_ARRAY: 560 for (unsigned int i = 0; i < type->length; i++) { 561 emit_assignment_writes(l, r, type->fields.array, predicated); 562 } 563 break; 564 565 case GLSL_TYPE_STRUCT: 566 for (unsigned int i = 0; i < type->length; i++) { 567 emit_assignment_writes(l, r, type->fields.structure[i].type, 568 predicated); 569 } 570 break; 571 572 case GLSL_TYPE_SAMPLER: 573 break; 574 575 default: 576 assert(!"not reached"); 577 break; 578 } 579} 580 581/* If the RHS processing resulted in an instruction generating a 582 * temporary value, and it would be easy to rewrite the instruction to 583 * generate its result right into the LHS instead, do so. This ends 584 * up reliably removing instructions where it can be tricky to do so 585 * later without real UD chain information. 586 */ 587bool 588fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 589 fs_reg dst, 590 fs_reg src, 591 fs_inst *pre_rhs_inst, 592 fs_inst *last_rhs_inst) 593{ 594 if (pre_rhs_inst == last_rhs_inst) 595 return false; /* No instructions generated to work with. */ 596 597 /* Only attempt if we're doing a direct assignment. */ 598 if (ir->condition || 599 !(ir->lhs->type->is_scalar() || 600 (ir->lhs->type->is_vector() && 601 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1))) 602 return false; 603 604 /* Make sure the last instruction generated our source reg. */ 605 if (last_rhs_inst->predicated || 606 last_rhs_inst->force_uncompressed || 607 last_rhs_inst->force_sechalf || 608 !src.equals(&last_rhs_inst->dst)) 609 return false; 610 611 /* If last_rhs_inst wrote a different number of components than our LHS, 612 * we can't safely rewrite it. 613 */ 614 if (ir->lhs->type->vector_elements != last_rhs_inst->regs_written()) 615 return false; 616 617 /* Success! Rewrite the instruction. */ 618 last_rhs_inst->dst = dst; 619 620 return true; 621} 622 623void 624fs_visitor::visit(ir_assignment *ir) 625{ 626 fs_reg l, r; 627 fs_inst *inst; 628 629 /* FINISHME: arrays on the lhs */ 630 ir->lhs->accept(this); 631 l = this->result; 632 633 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail(); 634 635 ir->rhs->accept(this); 636 r = this->result; 637 638 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail(); 639 640 assert(l.file != BAD_FILE); 641 assert(r.file != BAD_FILE); 642 643 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst)) 644 return; 645 646 if (ir->condition) { 647 emit_bool_to_cond_code(ir->condition); 648 } 649 650 if (ir->lhs->type->is_scalar() || 651 ir->lhs->type->is_vector()) { 652 for (int i = 0; i < ir->lhs->type->vector_elements; i++) { 653 if (ir->write_mask & (1 << i)) { 654 inst = emit(BRW_OPCODE_MOV, l, r); 655 if (ir->condition) 656 inst->predicated = true; 657 r.reg_offset++; 658 } 659 l.reg_offset++; 660 } 661 } else { 662 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); 663 } 664} 665 666fs_inst * 667fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate, 668 int sampler) 669{ 670 int mlen; 671 int base_mrf = 1; 672 bool simd16 = false; 673 fs_reg orig_dst; 674 675 /* g0 header. */ 676 mlen = 1; 677 678 if (ir->shadow_comparitor && ir->op != ir_txd) { 679 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 680 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 681 coordinate.reg_offset++; 682 } 683 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 684 mlen += 3; 685 686 if (ir->op == ir_tex) { 687 /* There's no plain shadow compare message, so we use shadow 688 * compare with a bias of 0.0. 689 */ 690 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)); 691 mlen++; 692 } else if (ir->op == ir_txb) { 693 ir->lod_info.bias->accept(this); 694 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 695 mlen++; 696 } else { 697 assert(ir->op == ir_txl); 698 ir->lod_info.lod->accept(this); 699 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 700 mlen++; 701 } 702 703 ir->shadow_comparitor->accept(this); 704 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 705 mlen++; 706 } else if (ir->op == ir_tex) { 707 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 708 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 709 coordinate.reg_offset++; 710 } 711 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 712 mlen += 3; 713 } else if (ir->op == ir_txd) { 714 ir->lod_info.grad.dPdx->accept(this); 715 fs_reg dPdx = this->result; 716 717 ir->lod_info.grad.dPdy->accept(this); 718 fs_reg dPdy = this->result; 719 720 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 721 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 722 coordinate.reg_offset++; 723 } 724 /* the slots for u and v are always present, but r is optional */ 725 mlen += MAX2(ir->coordinate->type->vector_elements, 2); 726 727 /* P = u, v, r 728 * dPdx = dudx, dvdx, drdx 729 * dPdy = dudy, dvdy, drdy 730 * 731 * 1-arg: Does not exist. 732 * 733 * 2-arg: dudx dvdx dudy dvdy 734 * dPdx.x dPdx.y dPdy.x dPdy.y 735 * m4 m5 m6 m7 736 * 737 * 3-arg: dudx dvdx drdx dudy dvdy drdy 738 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z 739 * m5 m6 m7 m8 m9 m10 740 */ 741 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 742 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 743 dPdx.reg_offset++; 744 } 745 mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2); 746 747 for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) { 748 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 749 dPdy.reg_offset++; 750 } 751 mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2); 752 } else if (ir->op == ir_txs) { 753 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */ 754 simd16 = true; 755 ir->lod_info.lod->accept(this); 756 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 757 mlen += 2; 758 } else { 759 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod 760 * instructions. We'll need to do SIMD16 here. 761 */ 762 simd16 = true; 763 assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf); 764 765 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 766 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type), 767 coordinate); 768 coordinate.reg_offset++; 769 } 770 771 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to 772 * be necessary for TXF (ld), but seems wise to do for all messages. 773 */ 774 for (int i = ir->coordinate->type->vector_elements; i < 3; i++) { 775 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)); 776 } 777 778 /* lod/bias appears after u/v/r. */ 779 mlen += 6; 780 781 if (ir->op == ir_txb) { 782 ir->lod_info.bias->accept(this); 783 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 784 mlen++; 785 } else { 786 ir->lod_info.lod->accept(this); 787 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type), 788 this->result); 789 mlen++; 790 } 791 792 /* The unused upper half. */ 793 mlen++; 794 } 795 796 if (simd16) { 797 /* Now, since we're doing simd16, the return is 2 interleaved 798 * vec4s where the odd-indexed ones are junk. We'll need to move 799 * this weirdness around to the expected layout. 800 */ 801 orig_dst = dst; 802 const glsl_type *vec_type = 803 glsl_type::get_instance(ir->type->base_type, 4, 1); 804 dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2)); 805 dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type) 806 : BRW_REGISTER_TYPE_F; 807 } 808 809 fs_inst *inst = NULL; 810 switch (ir->op) { 811 case ir_tex: 812 inst = emit(SHADER_OPCODE_TEX, dst); 813 break; 814 case ir_txb: 815 inst = emit(FS_OPCODE_TXB, dst); 816 break; 817 case ir_txl: 818 inst = emit(SHADER_OPCODE_TXL, dst); 819 break; 820 case ir_txd: 821 inst = emit(SHADER_OPCODE_TXD, dst); 822 break; 823 case ir_txs: 824 inst = emit(SHADER_OPCODE_TXS, dst); 825 break; 826 case ir_txf: 827 inst = emit(SHADER_OPCODE_TXF, dst); 828 break; 829 } 830 inst->base_mrf = base_mrf; 831 inst->mlen = mlen; 832 inst->header_present = true; 833 834 if (simd16) { 835 for (int i = 0; i < 4; i++) { 836 emit(BRW_OPCODE_MOV, orig_dst, dst); 837 orig_dst.reg_offset++; 838 dst.reg_offset += 2; 839 } 840 } 841 842 return inst; 843} 844 845/* gen5's sampler has slots for u, v, r, array index, then optional 846 * parameters like shadow comparitor or LOD bias. If optional 847 * parameters aren't present, those base slots are optional and don't 848 * need to be included in the message. 849 * 850 * We don't fill in the unnecessary slots regardless, which may look 851 * surprising in the disassembly. 852 */ 853fs_inst * 854fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate, 855 int sampler) 856{ 857 int mlen = 0; 858 int base_mrf = 2; 859 int reg_width = c->dispatch_width / 8; 860 bool header_present = false; 861 const int vector_elements = 862 ir->coordinate ? ir->coordinate->type->vector_elements : 0; 863 864 if (ir->offset) { 865 /* The offsets set up by the ir_texture visitor are in the 866 * m1 header, so we can't go headerless. 867 */ 868 header_present = true; 869 mlen++; 870 base_mrf--; 871 } 872 873 for (int i = 0; i < vector_elements; i++) { 874 emit(BRW_OPCODE_MOV, 875 fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type), 876 coordinate); 877 coordinate.reg_offset++; 878 } 879 mlen += vector_elements * reg_width; 880 881 if (ir->shadow_comparitor && ir->op != ir_txd) { 882 mlen = MAX2(mlen, header_present + 4 * reg_width); 883 884 ir->shadow_comparitor->accept(this); 885 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 886 mlen += reg_width; 887 } 888 889 fs_inst *inst = NULL; 890 switch (ir->op) { 891 case ir_tex: 892 inst = emit(SHADER_OPCODE_TEX, dst); 893 break; 894 case ir_txb: 895 ir->lod_info.bias->accept(this); 896 mlen = MAX2(mlen, header_present + 4 * reg_width); 897 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 898 mlen += reg_width; 899 900 inst = emit(FS_OPCODE_TXB, dst); 901 902 break; 903 case ir_txl: 904 ir->lod_info.lod->accept(this); 905 mlen = MAX2(mlen, header_present + 4 * reg_width); 906 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 907 mlen += reg_width; 908 909 inst = emit(SHADER_OPCODE_TXL, dst); 910 break; 911 case ir_txd: { 912 ir->lod_info.grad.dPdx->accept(this); 913 fs_reg dPdx = this->result; 914 915 ir->lod_info.grad.dPdy->accept(this); 916 fs_reg dPdy = this->result; 917 918 mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */ 919 920 /** 921 * P = u, v, r 922 * dPdx = dudx, dvdx, drdx 923 * dPdy = dudy, dvdy, drdy 924 * 925 * Load up these values: 926 * - dudx dudy dvdx dvdy drdx drdy 927 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z 928 */ 929 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 930 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 931 dPdx.reg_offset++; 932 mlen += reg_width; 933 934 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 935 dPdy.reg_offset++; 936 mlen += reg_width; 937 } 938 939 inst = emit(SHADER_OPCODE_TXD, dst); 940 break; 941 } 942 case ir_txs: 943 ir->lod_info.lod->accept(this); 944 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 945 mlen += reg_width; 946 inst = emit(SHADER_OPCODE_TXS, dst); 947 break; 948 case ir_txf: 949 mlen = header_present + 4 * reg_width; 950 951 ir->lod_info.lod->accept(this); 952 emit(BRW_OPCODE_MOV, 953 fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), 954 this->result); 955 inst = emit(SHADER_OPCODE_TXF, dst); 956 break; 957 } 958 inst->base_mrf = base_mrf; 959 inst->mlen = mlen; 960 inst->header_present = header_present; 961 962 if (mlen > 11) { 963 fail("Message length >11 disallowed by hardware\n"); 964 } 965 966 return inst; 967} 968 969fs_inst * 970fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate, 971 int sampler) 972{ 973 int mlen = 0; 974 int base_mrf = 2; 975 int reg_width = c->dispatch_width / 8; 976 bool header_present = false; 977 978 if (ir->offset) { 979 /* The offsets set up by the ir_texture visitor are in the 980 * m1 header, so we can't go headerless. 981 */ 982 header_present = true; 983 mlen++; 984 base_mrf--; 985 } 986 987 if (ir->shadow_comparitor && ir->op != ir_txd) { 988 ir->shadow_comparitor->accept(this); 989 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 990 mlen += reg_width; 991 } 992 993 /* Set up the LOD info */ 994 switch (ir->op) { 995 case ir_tex: 996 break; 997 case ir_txb: 998 ir->lod_info.bias->accept(this); 999 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1000 mlen += reg_width; 1001 break; 1002 case ir_txl: 1003 ir->lod_info.lod->accept(this); 1004 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1005 mlen += reg_width; 1006 break; 1007 case ir_txd: { 1008 if (c->dispatch_width == 16) 1009 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode."); 1010 1011 ir->lod_info.grad.dPdx->accept(this); 1012 fs_reg dPdx = this->result; 1013 1014 ir->lod_info.grad.dPdy->accept(this); 1015 fs_reg dPdy = this->result; 1016 1017 /* Load dPdx and the coordinate together: 1018 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z 1019 */ 1020 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1021 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1022 coordinate.reg_offset++; 1023 mlen += reg_width; 1024 1025 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 1026 dPdx.reg_offset++; 1027 mlen += reg_width; 1028 1029 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 1030 dPdy.reg_offset++; 1031 mlen += reg_width; 1032 } 1033 break; 1034 } 1035 case ir_txs: 1036 ir->lod_info.lod->accept(this); 1037 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 1038 mlen += reg_width; 1039 break; 1040 case ir_txf: 1041 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */ 1042 emit(BRW_OPCODE_MOV, 1043 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1044 coordinate.reg_offset++; 1045 mlen += reg_width; 1046 1047 ir->lod_info.lod->accept(this); 1048 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result); 1049 mlen += reg_width; 1050 1051 for (int i = 1; i < ir->coordinate->type->vector_elements; i++) { 1052 emit(BRW_OPCODE_MOV, 1053 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1054 coordinate.reg_offset++; 1055 mlen += reg_width; 1056 } 1057 break; 1058 } 1059 1060 /* Set up the coordinate (except for cases where it was done above) */ 1061 if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) { 1062 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1063 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1064 coordinate.reg_offset++; 1065 mlen += reg_width; 1066 } 1067 } 1068 1069 /* Generate the SEND */ 1070 fs_inst *inst = NULL; 1071 switch (ir->op) { 1072 case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break; 1073 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break; 1074 case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break; 1075 case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break; 1076 case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break; 1077 case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break; 1078 } 1079 inst->base_mrf = base_mrf; 1080 inst->mlen = mlen; 1081 inst->header_present = header_present; 1082 1083 if (mlen > 11) { 1084 fail("Message length >11 disallowed by hardware\n"); 1085 } 1086 1087 return inst; 1088} 1089 1090void 1091fs_visitor::visit(ir_texture *ir) 1092{ 1093 fs_inst *inst = NULL; 1094 1095 int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base); 1096 sampler = fp->Base.SamplerUnits[sampler]; 1097 1098 /* Our hardware doesn't have a sample_d_c message, so shadow compares 1099 * for textureGrad/TXD need to be emulated with instructions. 1100 */ 1101 bool hw_compare_supported = ir->op != ir_txd; 1102 if (ir->shadow_comparitor && !hw_compare_supported) { 1103 assert(c->key.tex.compare_funcs[sampler] != GL_NONE); 1104 /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */ 1105 if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS) 1106 return swizzle_result(ir, fs_reg(1.0f), sampler); 1107 else if (c->key.tex.compare_funcs[sampler] == GL_NEVER) 1108 return swizzle_result(ir, fs_reg(0.0f), sampler); 1109 } 1110 1111 if (ir->coordinate) 1112 ir->coordinate->accept(this); 1113 fs_reg coordinate = this->result; 1114 1115 if (ir->offset != NULL) { 1116 uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant()); 1117 1118 /* Explicitly set up the message header by copying g0 to msg reg m1. */ 1119 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD), 1120 fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD))); 1121 1122 /* Then set the offset bits in DWord 2 of the message header. */ 1123 emit(BRW_OPCODE_MOV, 1124 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2), 1125 BRW_REGISTER_TYPE_UD)), 1126 fs_reg(brw_imm_uw(offset_bits))); 1127 } 1128 1129 /* Should be lowered by do_lower_texture_projection */ 1130 assert(!ir->projector); 1131 1132 bool needs_gl_clamp = true; 1133 1134 fs_reg scale_x, scale_y; 1135 1136 /* The 965 requires the EU to do the normalization of GL rectangle 1137 * texture coordinates. We use the program parameter state 1138 * tracking to get the scaling factor. 1139 */ 1140 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT && 1141 (intel->gen < 6 || 1142 (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || 1143 c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { 1144 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; 1145 int tokens[STATE_LENGTH] = { 1146 STATE_INTERNAL, 1147 STATE_TEXRECT_SCALE, 1148 sampler, 1149 0, 1150 0 1151 }; 1152 1153 if (c->dispatch_width == 16) { 1154 fail("rectangle scale uniform setup not supported on 16-wide\n"); 1155 this->result = fs_reg(this, ir->type); 1156 return; 1157 } 1158 1159 c->prog_data.param_convert[c->prog_data.nr_params] = 1160 PARAM_NO_CONVERT; 1161 c->prog_data.param_convert[c->prog_data.nr_params + 1] = 1162 PARAM_NO_CONVERT; 1163 1164 scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); 1165 scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); 1166 1167 GLuint index = _mesa_add_state_reference(params, 1168 (gl_state_index *)tokens); 1169 1170 this->param_index[c->prog_data.nr_params] = index; 1171 this->param_offset[c->prog_data.nr_params] = 0; 1172 c->prog_data.nr_params++; 1173 this->param_index[c->prog_data.nr_params] = index; 1174 this->param_offset[c->prog_data.nr_params] = 1; 1175 c->prog_data.nr_params++; 1176 } 1177 1178 /* The 965 requires the EU to do the normalization of GL rectangle 1179 * texture coordinates. We use the program parameter state 1180 * tracking to get the scaling factor. 1181 */ 1182 if (intel->gen < 6 && 1183 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1184 fs_reg dst = fs_reg(this, ir->coordinate->type); 1185 fs_reg src = coordinate; 1186 coordinate = dst; 1187 1188 emit(BRW_OPCODE_MUL, dst, src, scale_x); 1189 dst.reg_offset++; 1190 src.reg_offset++; 1191 emit(BRW_OPCODE_MUL, dst, src, scale_y); 1192 } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1193 /* On gen6+, the sampler handles the rectangle coordinates 1194 * natively, without needing rescaling. But that means we have 1195 * to do GL_CLAMP clamping at the [0, width], [0, height] scale, 1196 * not [0, 1] like the default case below. 1197 */ 1198 needs_gl_clamp = false; 1199 1200 for (int i = 0; i < 2; i++) { 1201 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1202 fs_reg chan = coordinate; 1203 chan.reg_offset += i; 1204 1205 inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0)); 1206 inst->conditional_mod = BRW_CONDITIONAL_G; 1207 1208 /* Our parameter comes in as 1.0/width or 1.0/height, 1209 * because that's what people normally want for doing 1210 * texture rectangle handling. We need width or height 1211 * for clamping, but we don't care enough to make a new 1212 * parameter type, so just invert back. 1213 */ 1214 fs_reg limit = fs_reg(this, glsl_type::float_type); 1215 emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y); 1216 emit(SHADER_OPCODE_RCP, limit, limit); 1217 1218 inst = emit(BRW_OPCODE_SEL, chan, chan, limit); 1219 inst->conditional_mod = BRW_CONDITIONAL_L; 1220 } 1221 } 1222 } 1223 1224 if (ir->coordinate && needs_gl_clamp) { 1225 for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) { 1226 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1227 fs_reg chan = coordinate; 1228 chan.reg_offset += i; 1229 1230 fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan); 1231 inst->saturate = true; 1232 } 1233 } 1234 } 1235 1236 /* Writemasking doesn't eliminate channels on SIMD8 texture 1237 * samples, so don't worry about them. 1238 */ 1239 fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); 1240 1241 if (intel->gen >= 7) { 1242 inst = emit_texture_gen7(ir, dst, coordinate, sampler); 1243 } else if (intel->gen >= 5) { 1244 inst = emit_texture_gen5(ir, dst, coordinate, sampler); 1245 } else { 1246 inst = emit_texture_gen4(ir, dst, coordinate, sampler); 1247 } 1248 1249 /* If there's an offset, we already set up m1. To avoid the implied move, 1250 * use the null register. Otherwise, we want an implied move from g0. 1251 */ 1252 if (ir->offset != NULL || !inst->header_present) 1253 inst->src[0] = reg_undef; 1254 else 1255 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); 1256 1257 inst->sampler = sampler; 1258 1259 if (ir->shadow_comparitor) { 1260 if (hw_compare_supported) { 1261 inst->shadow_compare = true; 1262 } else { 1263 ir->shadow_comparitor->accept(this); 1264 fs_reg ref = this->result; 1265 1266 fs_reg value = dst; 1267 dst = fs_reg(this, glsl_type::vec4_type); 1268 1269 /* FINISHME: This needs to be done pre-filtering. */ 1270 1271 uint32_t conditional = 0; 1272 switch (c->key.tex.compare_funcs[sampler]) { 1273 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */ 1274 case GL_LESS: conditional = BRW_CONDITIONAL_L; break; 1275 case GL_GREATER: conditional = BRW_CONDITIONAL_G; break; 1276 case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break; 1277 case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break; 1278 case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break; 1279 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break; 1280 default: assert(!"Should not get here: bad shadow compare function"); 1281 } 1282 1283 /* Use conditional moves to load 0 or 1 as the result */ 1284 this->current_annotation = "manual shadow comparison"; 1285 for (int i = 0; i < 4; i++) { 1286 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f)); 1287 1288 inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value); 1289 inst->conditional_mod = conditional; 1290 1291 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f)); 1292 inst->predicated = true; 1293 1294 dst.reg_offset++; 1295 value.reg_offset++; 1296 } 1297 dst.reg_offset = 0; 1298 } 1299 } 1300 1301 swizzle_result(ir, dst, sampler); 1302} 1303 1304/** 1305 * Swizzle the result of a texture result. This is necessary for 1306 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons. 1307 */ 1308void 1309fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler) 1310{ 1311 this->result = orig_val; 1312 1313 if (ir->op == ir_txs) 1314 return; 1315 1316 if (ir->type == glsl_type::float_type) { 1317 /* Ignore DEPTH_TEXTURE_MODE swizzling. */ 1318 assert(ir->sampler->type->sampler_shadow); 1319 } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) { 1320 fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type); 1321 1322 for (int i = 0; i < 4; i++) { 1323 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i); 1324 fs_reg l = swizzled_result; 1325 l.reg_offset += i; 1326 1327 if (swiz == SWIZZLE_ZERO) { 1328 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f)); 1329 } else if (swiz == SWIZZLE_ONE) { 1330 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f)); 1331 } else { 1332 fs_reg r = orig_val; 1333 r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i); 1334 emit(BRW_OPCODE_MOV, l, r); 1335 } 1336 } 1337 this->result = swizzled_result; 1338 } 1339} 1340 1341void 1342fs_visitor::visit(ir_swizzle *ir) 1343{ 1344 ir->val->accept(this); 1345 fs_reg val = this->result; 1346 1347 if (ir->type->vector_elements == 1) { 1348 this->result.reg_offset += ir->mask.x; 1349 return; 1350 } 1351 1352 fs_reg result = fs_reg(this, ir->type); 1353 this->result = result; 1354 1355 for (unsigned int i = 0; i < ir->type->vector_elements; i++) { 1356 fs_reg channel = val; 1357 int swiz = 0; 1358 1359 switch (i) { 1360 case 0: 1361 swiz = ir->mask.x; 1362 break; 1363 case 1: 1364 swiz = ir->mask.y; 1365 break; 1366 case 2: 1367 swiz = ir->mask.z; 1368 break; 1369 case 3: 1370 swiz = ir->mask.w; 1371 break; 1372 } 1373 1374 channel.reg_offset += swiz; 1375 emit(BRW_OPCODE_MOV, result, channel); 1376 result.reg_offset++; 1377 } 1378} 1379 1380void 1381fs_visitor::visit(ir_discard *ir) 1382{ 1383 assert(ir->condition == NULL); /* FINISHME */ 1384 1385 emit(FS_OPCODE_DISCARD); 1386 kill_emitted = true; 1387} 1388 1389void 1390fs_visitor::visit(ir_constant *ir) 1391{ 1392 /* Set this->result to reg at the bottom of the function because some code 1393 * paths will cause this visitor to be applied to other fields. This will 1394 * cause the value stored in this->result to be modified. 1395 * 1396 * Make reg constant so that it doesn't get accidentally modified along the 1397 * way. Yes, I actually had this problem. :( 1398 */ 1399 const fs_reg reg(this, ir->type); 1400 fs_reg dst_reg = reg; 1401 1402 if (ir->type->is_array()) { 1403 const unsigned size = type_size(ir->type->fields.array); 1404 1405 for (unsigned i = 0; i < ir->type->length; i++) { 1406 ir->array_elements[i]->accept(this); 1407 fs_reg src_reg = this->result; 1408 1409 dst_reg.type = src_reg.type; 1410 for (unsigned j = 0; j < size; j++) { 1411 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1412 src_reg.reg_offset++; 1413 dst_reg.reg_offset++; 1414 } 1415 } 1416 } else if (ir->type->is_record()) { 1417 foreach_list(node, &ir->components) { 1418 ir_instruction *const field = (ir_instruction *) node; 1419 const unsigned size = type_size(field->type); 1420 1421 field->accept(this); 1422 fs_reg src_reg = this->result; 1423 1424 dst_reg.type = src_reg.type; 1425 for (unsigned j = 0; j < size; j++) { 1426 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1427 src_reg.reg_offset++; 1428 dst_reg.reg_offset++; 1429 } 1430 } 1431 } else { 1432 const unsigned size = type_size(ir->type); 1433 1434 for (unsigned i = 0; i < size; i++) { 1435 switch (ir->type->base_type) { 1436 case GLSL_TYPE_FLOAT: 1437 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])); 1438 break; 1439 case GLSL_TYPE_UINT: 1440 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])); 1441 break; 1442 case GLSL_TYPE_INT: 1443 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])); 1444 break; 1445 case GLSL_TYPE_BOOL: 1446 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])); 1447 break; 1448 default: 1449 assert(!"Non-float/uint/int/bool constant"); 1450 } 1451 dst_reg.reg_offset++; 1452 } 1453 } 1454 1455 this->result = reg; 1456} 1457 1458void 1459fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) 1460{ 1461 ir_expression *expr = ir->as_expression(); 1462 1463 if (expr) { 1464 fs_reg op[2]; 1465 fs_inst *inst; 1466 1467 assert(expr->get_num_operands() <= 2); 1468 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1469 assert(expr->operands[i]->type->is_scalar()); 1470 1471 expr->operands[i]->accept(this); 1472 op[i] = this->result; 1473 1474 resolve_ud_negate(&op[i]); 1475 } 1476 1477 switch (expr->operation) { 1478 case ir_unop_logic_not: 1479 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)); 1480 inst->conditional_mod = BRW_CONDITIONAL_Z; 1481 break; 1482 1483 case ir_binop_logic_xor: 1484 inst = emit(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]); 1485 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1486 break; 1487 1488 case ir_binop_logic_or: 1489 inst = emit(BRW_OPCODE_OR, reg_null_d, op[0], op[1]); 1490 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1491 break; 1492 1493 case ir_binop_logic_and: 1494 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], op[1]); 1495 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1496 break; 1497 1498 case ir_unop_f2b: 1499 if (intel->gen >= 6) { 1500 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f)); 1501 } else { 1502 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]); 1503 } 1504 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1505 break; 1506 1507 case ir_unop_i2b: 1508 if (intel->gen >= 6) { 1509 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)); 1510 } else { 1511 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]); 1512 } 1513 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1514 break; 1515 1516 case ir_binop_greater: 1517 case ir_binop_gequal: 1518 case ir_binop_less: 1519 case ir_binop_lequal: 1520 case ir_binop_equal: 1521 case ir_binop_all_equal: 1522 case ir_binop_nequal: 1523 case ir_binop_any_nequal: 1524 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]); 1525 inst->conditional_mod = 1526 brw_conditional_for_comparison(expr->operation); 1527 break; 1528 1529 default: 1530 assert(!"not reached"); 1531 fail("bad cond code\n"); 1532 break; 1533 } 1534 return; 1535 } 1536 1537 ir->accept(this); 1538 1539 if (intel->gen >= 6) { 1540 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1)); 1541 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1542 } else { 1543 fs_inst *inst = emit(BRW_OPCODE_MOV, reg_null_d, this->result); 1544 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1545 } 1546} 1547 1548/** 1549 * Emit a gen6 IF statement with the comparison folded into the IF 1550 * instruction. 1551 */ 1552void 1553fs_visitor::emit_if_gen6(ir_if *ir) 1554{ 1555 ir_expression *expr = ir->condition->as_expression(); 1556 1557 if (expr) { 1558 fs_reg op[2]; 1559 fs_inst *inst; 1560 fs_reg temp; 1561 1562 assert(expr->get_num_operands() <= 2); 1563 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1564 assert(expr->operands[i]->type->is_scalar()); 1565 1566 expr->operands[i]->accept(this); 1567 op[i] = this->result; 1568 } 1569 1570 switch (expr->operation) { 1571 case ir_unop_logic_not: 1572 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0)); 1573 inst->conditional_mod = BRW_CONDITIONAL_Z; 1574 return; 1575 1576 case ir_binop_logic_xor: 1577 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1578 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1579 return; 1580 1581 case ir_binop_logic_or: 1582 temp = fs_reg(this, glsl_type::bool_type); 1583 emit(BRW_OPCODE_OR, temp, op[0], op[1]); 1584 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1585 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1586 return; 1587 1588 case ir_binop_logic_and: 1589 temp = fs_reg(this, glsl_type::bool_type); 1590 emit(BRW_OPCODE_AND, temp, op[0], op[1]); 1591 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1592 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1593 return; 1594 1595 case ir_unop_f2b: 1596 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)); 1597 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1598 return; 1599 1600 case ir_unop_i2b: 1601 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1602 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1603 return; 1604 1605 case ir_binop_greater: 1606 case ir_binop_gequal: 1607 case ir_binop_less: 1608 case ir_binop_lequal: 1609 case ir_binop_equal: 1610 case ir_binop_all_equal: 1611 case ir_binop_nequal: 1612 case ir_binop_any_nequal: 1613 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1614 inst->conditional_mod = 1615 brw_conditional_for_comparison(expr->operation); 1616 return; 1617 default: 1618 assert(!"not reached"); 1619 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1620 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1621 fail("bad condition\n"); 1622 return; 1623 } 1624 return; 1625 } 1626 1627 ir->condition->accept(this); 1628 1629 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)); 1630 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1631} 1632 1633void 1634fs_visitor::visit(ir_if *ir) 1635{ 1636 fs_inst *inst; 1637 1638 if (intel->gen < 6 && c->dispatch_width == 16) { 1639 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1640 } 1641 1642 /* Don't point the annotation at the if statement, because then it plus 1643 * the then and else blocks get printed. 1644 */ 1645 this->base_ir = ir->condition; 1646 1647 if (intel->gen == 6) { 1648 emit_if_gen6(ir); 1649 } else { 1650 emit_bool_to_cond_code(ir->condition); 1651 1652 inst = emit(BRW_OPCODE_IF); 1653 inst->predicated = true; 1654 } 1655 1656 foreach_list(node, &ir->then_instructions) { 1657 ir_instruction *ir = (ir_instruction *)node; 1658 this->base_ir = ir; 1659 1660 ir->accept(this); 1661 } 1662 1663 if (!ir->else_instructions.is_empty()) { 1664 emit(BRW_OPCODE_ELSE); 1665 1666 foreach_list(node, &ir->else_instructions) { 1667 ir_instruction *ir = (ir_instruction *)node; 1668 this->base_ir = ir; 1669 1670 ir->accept(this); 1671 } 1672 } 1673 1674 emit(BRW_OPCODE_ENDIF); 1675} 1676 1677void 1678fs_visitor::visit(ir_loop *ir) 1679{ 1680 fs_reg counter = reg_undef; 1681 1682 if (intel->gen < 6 && c->dispatch_width == 16) { 1683 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1684 } 1685 1686 if (ir->counter) { 1687 this->base_ir = ir->counter; 1688 ir->counter->accept(this); 1689 counter = *(variable_storage(ir->counter)); 1690 1691 if (ir->from) { 1692 this->base_ir = ir->from; 1693 ir->from->accept(this); 1694 1695 emit(BRW_OPCODE_MOV, counter, this->result); 1696 } 1697 } 1698 1699 emit(BRW_OPCODE_DO); 1700 1701 if (ir->to) { 1702 this->base_ir = ir->to; 1703 ir->to->accept(this); 1704 1705 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result); 1706 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp); 1707 1708 inst = emit(BRW_OPCODE_BREAK); 1709 inst->predicated = true; 1710 } 1711 1712 foreach_list(node, &ir->body_instructions) { 1713 ir_instruction *ir = (ir_instruction *)node; 1714 1715 this->base_ir = ir; 1716 ir->accept(this); 1717 } 1718 1719 if (ir->increment) { 1720 this->base_ir = ir->increment; 1721 ir->increment->accept(this); 1722 emit(BRW_OPCODE_ADD, counter, counter, this->result); 1723 } 1724 1725 emit(BRW_OPCODE_WHILE); 1726} 1727 1728void 1729fs_visitor::visit(ir_loop_jump *ir) 1730{ 1731 switch (ir->mode) { 1732 case ir_loop_jump::jump_break: 1733 emit(BRW_OPCODE_BREAK); 1734 break; 1735 case ir_loop_jump::jump_continue: 1736 emit(BRW_OPCODE_CONTINUE); 1737 break; 1738 } 1739} 1740 1741void 1742fs_visitor::visit(ir_call *ir) 1743{ 1744 assert(!"FINISHME"); 1745} 1746 1747void 1748fs_visitor::visit(ir_return *ir) 1749{ 1750 assert(!"FINISHME"); 1751} 1752 1753void 1754fs_visitor::visit(ir_function *ir) 1755{ 1756 /* Ignore function bodies other than main() -- we shouldn't see calls to 1757 * them since they should all be inlined before we get to ir_to_mesa. 1758 */ 1759 if (strcmp(ir->name, "main") == 0) { 1760 const ir_function_signature *sig; 1761 exec_list empty; 1762 1763 sig = ir->matching_signature(&empty); 1764 1765 assert(sig); 1766 1767 foreach_list(node, &sig->body) { 1768 ir_instruction *ir = (ir_instruction *)node; 1769 this->base_ir = ir; 1770 1771 ir->accept(this); 1772 } 1773 } 1774} 1775 1776void 1777fs_visitor::visit(ir_function_signature *ir) 1778{ 1779 assert(!"not reached"); 1780 (void)ir; 1781} 1782 1783fs_inst * 1784fs_visitor::emit(fs_inst inst) 1785{ 1786 fs_inst *list_inst = new(mem_ctx) fs_inst; 1787 *list_inst = inst; 1788 1789 if (force_uncompressed_stack > 0) 1790 list_inst->force_uncompressed = true; 1791 else if (force_sechalf_stack > 0) 1792 list_inst->force_sechalf = true; 1793 1794 list_inst->annotation = this->current_annotation; 1795 list_inst->ir = this->base_ir; 1796 1797 this->instructions.push_tail(list_inst); 1798 1799 return list_inst; 1800} 1801 1802/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */ 1803void 1804fs_visitor::emit_dummy_fs() 1805{ 1806 int reg_width = c->dispatch_width / 8; 1807 1808 /* Everyone's favorite color. */ 1809 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f)); 1810 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f)); 1811 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f)); 1812 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f)); 1813 1814 fs_inst *write; 1815 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0)); 1816 write->base_mrf = 2; 1817 write->mlen = 4 * reg_width; 1818 write->eot = true; 1819} 1820 1821/* The register location here is relative to the start of the URB 1822 * data. It will get adjusted to be a real location before 1823 * generate_code() time. 1824 */ 1825struct brw_reg 1826fs_visitor::interp_reg(int location, int channel) 1827{ 1828 int regnr = urb_setup[location] * 2 + channel / 2; 1829 int stride = (channel & 1) * 4; 1830 1831 assert(urb_setup[location] != -1); 1832 1833 return brw_vec1_grf(regnr, stride); 1834} 1835 1836/** Emits the interpolation for the varying inputs. */ 1837void 1838fs_visitor::emit_interpolation_setup_gen4() 1839{ 1840 this->current_annotation = "compute pixel centers"; 1841 this->pixel_x = fs_reg(this, glsl_type::uint_type); 1842 this->pixel_y = fs_reg(this, glsl_type::uint_type); 1843 this->pixel_x.type = BRW_REGISTER_TYPE_UW; 1844 this->pixel_y.type = BRW_REGISTER_TYPE_UW; 1845 1846 emit(FS_OPCODE_PIXEL_X, this->pixel_x); 1847 emit(FS_OPCODE_PIXEL_Y, this->pixel_y); 1848 1849 this->current_annotation = "compute pixel deltas from v0"; 1850 if (brw->has_pln) { 1851 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1852 fs_reg(this, glsl_type::vec2_type); 1853 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1854 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC]; 1855 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++; 1856 } else { 1857 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1858 fs_reg(this, glsl_type::float_type); 1859 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1860 fs_reg(this, glsl_type::float_type); 1861 } 1862 emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1863 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))); 1864 emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1865 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))); 1866 1867 this->current_annotation = "compute pos.w and 1/pos.w"; 1868 /* Compute wpos.w. It's always in our setup, since it's needed to 1869 * interpolate the other attributes. 1870 */ 1871 this->wpos_w = fs_reg(this, glsl_type::float_type); 1872 emit(FS_OPCODE_LINTERP, wpos_w, 1873 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1874 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1875 interp_reg(FRAG_ATTRIB_WPOS, 3)); 1876 /* Compute the pixel 1/W value from wpos.w. */ 1877 this->pixel_w = fs_reg(this, glsl_type::float_type); 1878 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w); 1879 this->current_annotation = NULL; 1880} 1881 1882/** Emits the interpolation for the varying inputs. */ 1883void 1884fs_visitor::emit_interpolation_setup_gen6() 1885{ 1886 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); 1887 1888 /* If the pixel centers end up used, the setup is the same as for gen4. */ 1889 this->current_annotation = "compute pixel centers"; 1890 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); 1891 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); 1892 int_pixel_x.type = BRW_REGISTER_TYPE_UW; 1893 int_pixel_y.type = BRW_REGISTER_TYPE_UW; 1894 emit(BRW_OPCODE_ADD, 1895 int_pixel_x, 1896 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), 1897 fs_reg(brw_imm_v(0x10101010))); 1898 emit(BRW_OPCODE_ADD, 1899 int_pixel_y, 1900 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), 1901 fs_reg(brw_imm_v(0x11001100))); 1902 1903 /* As of gen6, we can no longer mix float and int sources. We have 1904 * to turn the integer pixel centers into floats for their actual 1905 * use. 1906 */ 1907 this->pixel_x = fs_reg(this, glsl_type::float_type); 1908 this->pixel_y = fs_reg(this, glsl_type::float_type); 1909 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x); 1910 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y); 1911 1912 this->current_annotation = "compute pos.w"; 1913 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0)); 1914 this->wpos_w = fs_reg(this, glsl_type::float_type); 1915 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w); 1916 1917 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) { 1918 uint8_t reg = c->barycentric_coord_reg[i]; 1919 this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0)); 1920 this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0)); 1921 } 1922 1923 this->current_annotation = NULL; 1924} 1925 1926void 1927fs_visitor::emit_color_write(int target, int index, int first_color_mrf) 1928{ 1929 int reg_width = c->dispatch_width / 8; 1930 fs_inst *inst; 1931 fs_reg color = outputs[target]; 1932 fs_reg mrf; 1933 1934 /* If there's no color data to be written, skip it. */ 1935 if (color.file == BAD_FILE) 1936 return; 1937 1938 color.reg_offset += index; 1939 1940 if (c->dispatch_width == 8 || intel->gen >= 6) { 1941 /* SIMD8 write looks like: 1942 * m + 0: r0 1943 * m + 1: r1 1944 * m + 2: g0 1945 * m + 3: g1 1946 * 1947 * gen6 SIMD16 DP write looks like: 1948 * m + 0: r0 1949 * m + 1: r1 1950 * m + 2: g0 1951 * m + 3: g1 1952 * m + 4: b0 1953 * m + 5: b1 1954 * m + 6: a0 1955 * m + 7: a1 1956 */ 1957 inst = emit(BRW_OPCODE_MOV, 1958 fs_reg(MRF, first_color_mrf + index * reg_width, color.type), 1959 color); 1960 inst->saturate = c->key.clamp_fragment_color; 1961 } else { 1962 /* pre-gen6 SIMD16 single source DP write looks like: 1963 * m + 0: r0 1964 * m + 1: g0 1965 * m + 2: b0 1966 * m + 3: a0 1967 * m + 4: r1 1968 * m + 5: g1 1969 * m + 6: b1 1970 * m + 7: a1 1971 */ 1972 if (brw->has_compr4) { 1973 /* By setting the high bit of the MRF register number, we 1974 * indicate that we want COMPR4 mode - instead of doing the 1975 * usual destination + 1 for the second half we get 1976 * destination + 4. 1977 */ 1978 inst = emit(BRW_OPCODE_MOV, 1979 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index, 1980 color.type), 1981 color); 1982 inst->saturate = c->key.clamp_fragment_color; 1983 } else { 1984 push_force_uncompressed(); 1985 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index, 1986 color.type), 1987 color); 1988 inst->saturate = c->key.clamp_fragment_color; 1989 pop_force_uncompressed(); 1990 1991 push_force_sechalf(); 1992 color.sechalf = true; 1993 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4, 1994 color.type), 1995 color); 1996 inst->saturate = c->key.clamp_fragment_color; 1997 pop_force_sechalf(); 1998 color.sechalf = false; 1999 } 2000 } 2001} 2002 2003void 2004fs_visitor::emit_fb_writes() 2005{ 2006 this->current_annotation = "FB write header"; 2007 bool header_present = true; 2008 int base_mrf = 2; 2009 int nr = base_mrf; 2010 int reg_width = c->dispatch_width / 8; 2011 2012 if (intel->gen >= 6 && 2013 !this->kill_emitted && 2014 c->key.nr_color_regions == 1) { 2015 header_present = false; 2016 } 2017 2018 if (header_present) { 2019 /* m2, m3 header */ 2020 nr += 2; 2021 } 2022 2023 if (c->aa_dest_stencil_reg) { 2024 push_force_uncompressed(); 2025 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++), 2026 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))); 2027 pop_force_uncompressed(); 2028 } 2029 2030 /* Reserve space for color. It'll be filled in per MRT below. */ 2031 int color_mrf = nr; 2032 nr += 4 * reg_width; 2033 2034 if (c->source_depth_to_render_target) { 2035 if (intel->gen == 6 && c->dispatch_width == 16) { 2036 /* For outputting oDepth on gen6, SIMD8 writes have to be 2037 * used. This would require 8-wide moves of each half to 2038 * message regs, kind of like pre-gen5 SIMD16 FB writes. 2039 * Just bail on doing so for now. 2040 */ 2041 fail("Missing support for simd16 depth writes on gen6\n"); 2042 } 2043 2044 if (c->computes_depth) { 2045 /* Hand over gl_FragDepth. */ 2046 assert(this->frag_depth); 2047 fs_reg depth = *(variable_storage(this->frag_depth)); 2048 2049 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth); 2050 } else { 2051 /* Pass through the payload depth. */ 2052 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2053 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))); 2054 } 2055 nr += reg_width; 2056 } 2057 2058 if (c->dest_depth_reg) { 2059 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2060 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))); 2061 nr += reg_width; 2062 } 2063 2064 for (int target = 0; target < c->key.nr_color_regions; target++) { 2065 this->current_annotation = ralloc_asprintf(this->mem_ctx, 2066 "FB write target %d", 2067 target); 2068 for (int i = 0; i < 4; i++) 2069 emit_color_write(target, i, color_mrf); 2070 2071 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2072 inst->target = target; 2073 inst->base_mrf = base_mrf; 2074 inst->mlen = nr - base_mrf; 2075 if (target == c->key.nr_color_regions - 1) 2076 inst->eot = true; 2077 inst->header_present = header_present; 2078 } 2079 2080 if (c->key.nr_color_regions == 0) { 2081 if (c->key.alpha_test) { 2082 /* If the alpha test is enabled but there's no color buffer, 2083 * we still need to send alpha out the pipeline to our null 2084 * renderbuffer. 2085 */ 2086 emit_color_write(0, 3, color_mrf); 2087 } 2088 2089 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2090 inst->base_mrf = base_mrf; 2091 inst->mlen = nr - base_mrf; 2092 inst->eot = true; 2093 inst->header_present = header_present; 2094 } 2095 2096 this->current_annotation = NULL; 2097} 2098 2099void 2100fs_visitor::resolve_ud_negate(fs_reg *reg) 2101{ 2102 if (reg->type != BRW_REGISTER_TYPE_UD || 2103 !reg->negate) 2104 return; 2105 2106 fs_reg temp = fs_reg(this, glsl_type::uint_type); 2107 emit(BRW_OPCODE_MOV, temp, *reg); 2108 *reg = temp; 2109} 2110