brw_fs_visitor.cpp revision b443ca96a55a06ee215a3f9a9e7dba558deeb58c
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_visitor.cpp 25 * 26 * This file supports generating the FS LIR from the GLSL IR. The LIR 27 * makes it easier to do backend-specific optimizations than doing so 28 * in the GLSL IR or in the native code. 29 */ 30extern "C" { 31 32#include <sys/types.h> 33 34#include "main/macros.h" 35#include "main/shaderobj.h" 36#include "main/uniforms.h" 37#include "program/prog_parameter.h" 38#include "program/prog_print.h" 39#include "program/prog_optimize.h" 40#include "program/register_allocate.h" 41#include "program/sampler.h" 42#include "program/hash_table.h" 43#include "brw_context.h" 44#include "brw_eu.h" 45#include "brw_wm.h" 46} 47#include "brw_shader.h" 48#include "brw_fs.h" 49#include "glsl/glsl_types.h" 50#include "glsl/ir_optimization.h" 51#include "glsl/ir_print_visitor.h" 52 53void 54fs_visitor::visit(ir_variable *ir) 55{ 56 fs_reg *reg = NULL; 57 58 if (variable_storage(ir)) 59 return; 60 61 if (ir->mode == ir_var_in) { 62 if (!strcmp(ir->name, "gl_FragCoord")) { 63 reg = emit_fragcoord_interpolation(ir); 64 } else if (!strcmp(ir->name, "gl_FrontFacing")) { 65 reg = emit_frontfacing_interpolation(ir); 66 } else { 67 reg = emit_general_interpolation(ir); 68 } 69 assert(reg); 70 hash_table_insert(this->variable_ht, reg, ir); 71 return; 72 } else if (ir->mode == ir_var_out) { 73 reg = new(this->mem_ctx) fs_reg(this, ir->type); 74 75 if (ir->location == FRAG_RESULT_COLOR) { 76 /* Writing gl_FragColor outputs to all color regions. */ 77 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) { 78 this->outputs[i] = *reg; 79 } 80 } else if (ir->location == FRAG_RESULT_DEPTH) { 81 this->frag_depth = ir; 82 } else { 83 /* gl_FragData or a user-defined FS output */ 84 assert(ir->location >= FRAG_RESULT_DATA0 && 85 ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS); 86 87 /* General color output. */ 88 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) { 89 int output = ir->location - FRAG_RESULT_DATA0 + i; 90 this->outputs[output] = *reg; 91 this->outputs[output].reg_offset += 4 * i; 92 } 93 } 94 } else if (ir->mode == ir_var_uniform) { 95 int param_index = c->prog_data.nr_params; 96 97 if (c->dispatch_width == 16) { 98 if (!variable_storage(ir)) { 99 fail("Failed to find uniform '%s' in 16-wide\n", ir->name); 100 } 101 return; 102 } 103 104 if (!strncmp(ir->name, "gl_", 3)) { 105 setup_builtin_uniform_values(ir); 106 } else { 107 setup_uniform_values(ir->location, ir->type); 108 } 109 110 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); 111 reg->type = brw_type_for_base_type(ir->type); 112 } 113 114 if (!reg) 115 reg = new(this->mem_ctx) fs_reg(this, ir->type); 116 117 hash_table_insert(this->variable_ht, reg, ir); 118} 119 120void 121fs_visitor::visit(ir_dereference_variable *ir) 122{ 123 fs_reg *reg = variable_storage(ir->var); 124 this->result = *reg; 125} 126 127void 128fs_visitor::visit(ir_dereference_record *ir) 129{ 130 const glsl_type *struct_type = ir->record->type; 131 132 ir->record->accept(this); 133 134 unsigned int offset = 0; 135 for (unsigned int i = 0; i < struct_type->length; i++) { 136 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 137 break; 138 offset += type_size(struct_type->fields.structure[i].type); 139 } 140 this->result.reg_offset += offset; 141 this->result.type = brw_type_for_base_type(ir->type); 142} 143 144void 145fs_visitor::visit(ir_dereference_array *ir) 146{ 147 ir_constant *index; 148 int element_size; 149 150 ir->array->accept(this); 151 index = ir->array_index->as_constant(); 152 153 element_size = type_size(ir->type); 154 this->result.type = brw_type_for_base_type(ir->type); 155 156 if (index) { 157 assert(this->result.file == UNIFORM || this->result.file == GRF); 158 this->result.reg_offset += index->value.i[0] * element_size; 159 } else { 160 assert(!"FINISHME: non-constant array element"); 161 } 162} 163 164/* Instruction selection: Produce a MOV.sat instead of 165 * MIN(MAX(val, 0), 1) when possible. 166 */ 167bool 168fs_visitor::try_emit_saturate(ir_expression *ir) 169{ 170 ir_rvalue *sat_val = ir->as_rvalue_to_saturate(); 171 172 if (!sat_val) 173 return false; 174 175 fs_inst *pre_inst = (fs_inst *) this->instructions.get_tail(); 176 177 sat_val->accept(this); 178 fs_reg src = this->result; 179 180 fs_inst *last_inst = (fs_inst *) this->instructions.get_tail(); 181 182 /* If the last instruction from our accept() didn't generate our 183 * src, generate a saturated MOV 184 */ 185 fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src); 186 if (!modify || modify->regs_written() != 1) { 187 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src); 188 inst->saturate = true; 189 } else { 190 modify->saturate = true; 191 this->result = src; 192 } 193 194 195 return true; 196} 197 198bool 199fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) 200{ 201 /* 3-src instructions were introduced in gen6. */ 202 if (intel->gen < 6) 203 return false; 204 205 /* MAD can only handle floating-point data. */ 206 if (ir->type != glsl_type::float_type) 207 return false; 208 209 ir_rvalue *nonmul = ir->operands[1 - mul_arg]; 210 ir_expression *mul = ir->operands[mul_arg]->as_expression(); 211 212 if (!mul || mul->operation != ir_binop_mul) 213 return false; 214 215 if (nonmul->as_constant() || 216 mul->operands[0]->as_constant() || 217 mul->operands[1]->as_constant()) 218 return false; 219 220 nonmul->accept(this); 221 fs_reg src0 = this->result; 222 223 mul->operands[0]->accept(this); 224 fs_reg src1 = this->result; 225 226 mul->operands[1]->accept(this); 227 fs_reg src2 = this->result; 228 229 this->result = fs_reg(this, ir->type); 230 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2); 231 232 return true; 233} 234 235void 236fs_visitor::visit(ir_expression *ir) 237{ 238 unsigned int operand; 239 fs_reg op[2], temp; 240 fs_inst *inst; 241 242 assert(ir->get_num_operands() <= 2); 243 244 if (try_emit_saturate(ir)) 245 return; 246 if (ir->operation == ir_binop_add) { 247 if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1)) 248 return; 249 } 250 251 for (operand = 0; operand < ir->get_num_operands(); operand++) { 252 ir->operands[operand]->accept(this); 253 if (this->result.file == BAD_FILE) { 254 ir_print_visitor v; 255 fail("Failed to get tree for expression operand:\n"); 256 ir->operands[operand]->accept(&v); 257 } 258 op[operand] = this->result; 259 260 /* Matrix expression operands should have been broken down to vector 261 * operations already. 262 */ 263 assert(!ir->operands[operand]->type->is_matrix()); 264 /* And then those vector operands should have been broken down to scalar. 265 */ 266 assert(!ir->operands[operand]->type->is_vector()); 267 } 268 269 /* Storage for our result. If our result goes into an assignment, it will 270 * just get copy-propagated out, so no worries. 271 */ 272 this->result = fs_reg(this, ir->type); 273 274 switch (ir->operation) { 275 case ir_unop_logic_not: 276 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 277 * ones complement of the whole register, not just bit 0. 278 */ 279 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)); 280 break; 281 case ir_unop_neg: 282 op[0].negate = !op[0].negate; 283 this->result = op[0]; 284 break; 285 case ir_unop_abs: 286 op[0].abs = true; 287 op[0].negate = false; 288 this->result = op[0]; 289 break; 290 case ir_unop_sign: 291 temp = fs_reg(this, ir->type); 292 293 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)); 294 295 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 296 inst->conditional_mod = BRW_CONDITIONAL_G; 297 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)); 298 inst->predicated = true; 299 300 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 301 inst->conditional_mod = BRW_CONDITIONAL_L; 302 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)); 303 inst->predicated = true; 304 305 break; 306 case ir_unop_rcp: 307 emit_math(SHADER_OPCODE_RCP, this->result, op[0]); 308 break; 309 310 case ir_unop_exp2: 311 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]); 312 break; 313 case ir_unop_log2: 314 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]); 315 break; 316 case ir_unop_exp: 317 case ir_unop_log: 318 assert(!"not reached: should be handled by ir_explog_to_explog2"); 319 break; 320 case ir_unop_sin: 321 case ir_unop_sin_reduced: 322 emit_math(SHADER_OPCODE_SIN, this->result, op[0]); 323 break; 324 case ir_unop_cos: 325 case ir_unop_cos_reduced: 326 emit_math(SHADER_OPCODE_COS, this->result, op[0]); 327 break; 328 329 case ir_unop_dFdx: 330 emit(FS_OPCODE_DDX, this->result, op[0]); 331 break; 332 case ir_unop_dFdy: 333 emit(FS_OPCODE_DDY, this->result, op[0]); 334 break; 335 336 case ir_binop_add: 337 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]); 338 break; 339 case ir_binop_sub: 340 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 341 break; 342 343 case ir_binop_mul: 344 if (ir->type->is_integer()) { 345 /* For integer multiplication, the MUL uses the low 16 bits 346 * of one of the operands (src0 on gen6, src1 on gen7). The 347 * MACH accumulates in the contribution of the upper 16 bits 348 * of that operand. 349 * 350 * FINISHME: Emit just the MUL if we know an operand is small 351 * enough. 352 */ 353 if (intel->gen >= 7 && c->dispatch_width == 16) 354 fail("16-wide explicit accumulator operands unsupported\n"); 355 356 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 357 358 emit(BRW_OPCODE_MUL, acc, op[0], op[1]); 359 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]); 360 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc)); 361 } else { 362 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]); 363 } 364 break; 365 case ir_binop_div: 366 if (intel->gen >= 7 && c->dispatch_width == 16) 367 fail("16-wide INTDIV unsupported\n"); 368 369 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 370 assert(ir->type->is_integer()); 371 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]); 372 break; 373 case ir_binop_mod: 374 if (intel->gen >= 7 && c->dispatch_width == 16) 375 fail("16-wide INTDIV unsupported\n"); 376 377 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 378 assert(ir->type->is_integer()); 379 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]); 380 break; 381 382 case ir_binop_less: 383 case ir_binop_greater: 384 case ir_binop_lequal: 385 case ir_binop_gequal: 386 case ir_binop_equal: 387 case ir_binop_all_equal: 388 case ir_binop_nequal: 389 case ir_binop_any_nequal: 390 temp = this->result; 391 /* original gen4 does implicit conversion before comparison. */ 392 if (intel->gen < 5) 393 temp.type = op[0].type; 394 395 resolve_ud_negate(&op[0]); 396 resolve_ud_negate(&op[1]); 397 398 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]); 399 inst->conditional_mod = brw_conditional_for_comparison(ir->operation); 400 break; 401 402 case ir_binop_logic_xor: 403 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 404 break; 405 406 case ir_binop_logic_or: 407 emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 408 break; 409 410 case ir_binop_logic_and: 411 emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 412 break; 413 414 case ir_binop_dot: 415 case ir_unop_any: 416 assert(!"not reached: should be handled by brw_fs_channel_expressions"); 417 break; 418 419 case ir_unop_noise: 420 assert(!"not reached: should be handled by lower_noise"); 421 break; 422 423 case ir_quadop_vector: 424 assert(!"not reached: should be handled by lower_quadop_vector"); 425 break; 426 427 case ir_unop_sqrt: 428 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]); 429 break; 430 431 case ir_unop_rsq: 432 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]); 433 break; 434 435 case ir_unop_i2u: 436 op[0].type = BRW_REGISTER_TYPE_UD; 437 this->result = op[0]; 438 break; 439 case ir_unop_u2i: 440 op[0].type = BRW_REGISTER_TYPE_D; 441 this->result = op[0]; 442 break; 443 case ir_unop_i2f: 444 case ir_unop_u2f: 445 case ir_unop_f2i: 446 emit(BRW_OPCODE_MOV, this->result, op[0]); 447 break; 448 449 case ir_unop_b2i: 450 inst = emit(BRW_OPCODE_AND, this->result, op[0], fs_reg(1)); 451 break; 452 case ir_unop_b2f: 453 temp = fs_reg(this, glsl_type::int_type); 454 emit(BRW_OPCODE_AND, temp, op[0], fs_reg(1)); 455 emit(BRW_OPCODE_MOV, this->result, temp); 456 break; 457 458 case ir_unop_f2b: 459 case ir_unop_i2b: 460 temp = this->result; 461 /* original gen4 does implicit conversion before comparison. */ 462 if (intel->gen < 5) 463 temp.type = op[0].type; 464 465 resolve_ud_negate(&op[0]); 466 467 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f)); 468 inst->conditional_mod = BRW_CONDITIONAL_NZ; 469 break; 470 471 case ir_unop_trunc: 472 emit(BRW_OPCODE_RNDZ, this->result, op[0]); 473 break; 474 case ir_unop_ceil: 475 op[0].negate = !op[0].negate; 476 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 477 this->result.negate = true; 478 break; 479 case ir_unop_floor: 480 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 481 break; 482 case ir_unop_fract: 483 inst = emit(BRW_OPCODE_FRC, this->result, op[0]); 484 break; 485 case ir_unop_round_even: 486 emit(BRW_OPCODE_RNDE, this->result, op[0]); 487 break; 488 489 case ir_binop_min: 490 resolve_ud_negate(&op[0]); 491 resolve_ud_negate(&op[1]); 492 493 if (intel->gen >= 6) { 494 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 495 inst->conditional_mod = BRW_CONDITIONAL_L; 496 } else { 497 /* Unalias the destination */ 498 this->result = fs_reg(this, ir->type); 499 500 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 501 inst->conditional_mod = BRW_CONDITIONAL_L; 502 503 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 504 inst->predicated = true; 505 } 506 break; 507 case ir_binop_max: 508 resolve_ud_negate(&op[0]); 509 resolve_ud_negate(&op[1]); 510 511 if (intel->gen >= 6) { 512 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 513 inst->conditional_mod = BRW_CONDITIONAL_GE; 514 } else { 515 /* Unalias the destination */ 516 this->result = fs_reg(this, ir->type); 517 518 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 519 inst->conditional_mod = BRW_CONDITIONAL_G; 520 521 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 522 inst->predicated = true; 523 } 524 break; 525 526 case ir_binop_pow: 527 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]); 528 break; 529 530 case ir_unop_bit_not: 531 inst = emit(BRW_OPCODE_NOT, this->result, op[0]); 532 break; 533 case ir_binop_bit_and: 534 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 535 break; 536 case ir_binop_bit_xor: 537 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 538 break; 539 case ir_binop_bit_or: 540 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 541 break; 542 543 case ir_binop_lshift: 544 inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]); 545 break; 546 547 case ir_binop_rshift: 548 if (ir->type->base_type == GLSL_TYPE_INT) 549 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]); 550 else 551 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]); 552 break; 553 } 554} 555 556void 557fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, 558 const glsl_type *type, bool predicated) 559{ 560 switch (type->base_type) { 561 case GLSL_TYPE_FLOAT: 562 case GLSL_TYPE_UINT: 563 case GLSL_TYPE_INT: 564 case GLSL_TYPE_BOOL: 565 for (unsigned int i = 0; i < type->components(); i++) { 566 l.type = brw_type_for_base_type(type); 567 r.type = brw_type_for_base_type(type); 568 569 if (predicated || !l.equals(&r)) { 570 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r); 571 inst->predicated = predicated; 572 } 573 574 l.reg_offset++; 575 r.reg_offset++; 576 } 577 break; 578 case GLSL_TYPE_ARRAY: 579 for (unsigned int i = 0; i < type->length; i++) { 580 emit_assignment_writes(l, r, type->fields.array, predicated); 581 } 582 break; 583 584 case GLSL_TYPE_STRUCT: 585 for (unsigned int i = 0; i < type->length; i++) { 586 emit_assignment_writes(l, r, type->fields.structure[i].type, 587 predicated); 588 } 589 break; 590 591 case GLSL_TYPE_SAMPLER: 592 break; 593 594 default: 595 assert(!"not reached"); 596 break; 597 } 598} 599 600/* If the RHS processing resulted in an instruction generating a 601 * temporary value, and it would be easy to rewrite the instruction to 602 * generate its result right into the LHS instead, do so. This ends 603 * up reliably removing instructions where it can be tricky to do so 604 * later without real UD chain information. 605 */ 606bool 607fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 608 fs_reg dst, 609 fs_reg src, 610 fs_inst *pre_rhs_inst, 611 fs_inst *last_rhs_inst) 612{ 613 /* Only attempt if we're doing a direct assignment. */ 614 if (ir->condition || 615 !(ir->lhs->type->is_scalar() || 616 (ir->lhs->type->is_vector() && 617 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1))) 618 return false; 619 620 /* Make sure the last instruction generated our source reg. */ 621 fs_inst *modify = get_instruction_generating_reg(pre_rhs_inst, 622 last_rhs_inst, 623 src); 624 if (!modify) 625 return false; 626 627 /* If last_rhs_inst wrote a different number of components than our LHS, 628 * we can't safely rewrite it. 629 */ 630 if (ir->lhs->type->vector_elements != modify->regs_written()) 631 return false; 632 633 /* Success! Rewrite the instruction. */ 634 modify->dst = dst; 635 636 return true; 637} 638 639void 640fs_visitor::visit(ir_assignment *ir) 641{ 642 fs_reg l, r; 643 fs_inst *inst; 644 645 /* FINISHME: arrays on the lhs */ 646 ir->lhs->accept(this); 647 l = this->result; 648 649 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail(); 650 651 ir->rhs->accept(this); 652 r = this->result; 653 654 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail(); 655 656 assert(l.file != BAD_FILE); 657 assert(r.file != BAD_FILE); 658 659 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst)) 660 return; 661 662 if (ir->condition) { 663 emit_bool_to_cond_code(ir->condition); 664 } 665 666 if (ir->lhs->type->is_scalar() || 667 ir->lhs->type->is_vector()) { 668 for (int i = 0; i < ir->lhs->type->vector_elements; i++) { 669 if (ir->write_mask & (1 << i)) { 670 inst = emit(BRW_OPCODE_MOV, l, r); 671 if (ir->condition) 672 inst->predicated = true; 673 r.reg_offset++; 674 } 675 l.reg_offset++; 676 } 677 } else { 678 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); 679 } 680} 681 682fs_inst * 683fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate, 684 int sampler) 685{ 686 int mlen; 687 int base_mrf = 1; 688 bool simd16 = false; 689 fs_reg orig_dst; 690 691 /* g0 header. */ 692 mlen = 1; 693 694 if (ir->shadow_comparitor && ir->op != ir_txd) { 695 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 696 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 697 coordinate.reg_offset++; 698 } 699 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 700 mlen += 3; 701 702 if (ir->op == ir_tex) { 703 /* There's no plain shadow compare message, so we use shadow 704 * compare with a bias of 0.0. 705 */ 706 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)); 707 mlen++; 708 } else if (ir->op == ir_txb) { 709 ir->lod_info.bias->accept(this); 710 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 711 mlen++; 712 } else { 713 assert(ir->op == ir_txl); 714 ir->lod_info.lod->accept(this); 715 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 716 mlen++; 717 } 718 719 ir->shadow_comparitor->accept(this); 720 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 721 mlen++; 722 } else if (ir->op == ir_tex) { 723 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 724 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 725 coordinate.reg_offset++; 726 } 727 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 728 mlen += 3; 729 } else if (ir->op == ir_txd) { 730 ir->lod_info.grad.dPdx->accept(this); 731 fs_reg dPdx = this->result; 732 733 ir->lod_info.grad.dPdy->accept(this); 734 fs_reg dPdy = this->result; 735 736 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 737 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 738 coordinate.reg_offset++; 739 } 740 /* the slots for u and v are always present, but r is optional */ 741 mlen += MAX2(ir->coordinate->type->vector_elements, 2); 742 743 /* P = u, v, r 744 * dPdx = dudx, dvdx, drdx 745 * dPdy = dudy, dvdy, drdy 746 * 747 * 1-arg: Does not exist. 748 * 749 * 2-arg: dudx dvdx dudy dvdy 750 * dPdx.x dPdx.y dPdy.x dPdy.y 751 * m4 m5 m6 m7 752 * 753 * 3-arg: dudx dvdx drdx dudy dvdy drdy 754 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z 755 * m5 m6 m7 m8 m9 m10 756 */ 757 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 758 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 759 dPdx.reg_offset++; 760 } 761 mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2); 762 763 for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) { 764 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 765 dPdy.reg_offset++; 766 } 767 mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2); 768 } else if (ir->op == ir_txs) { 769 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */ 770 simd16 = true; 771 ir->lod_info.lod->accept(this); 772 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 773 mlen += 2; 774 } else { 775 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod 776 * instructions. We'll need to do SIMD16 here. 777 */ 778 simd16 = true; 779 assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf); 780 781 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 782 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type), 783 coordinate); 784 coordinate.reg_offset++; 785 } 786 787 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to 788 * be necessary for TXF (ld), but seems wise to do for all messages. 789 */ 790 for (int i = ir->coordinate->type->vector_elements; i < 3; i++) { 791 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)); 792 } 793 794 /* lod/bias appears after u/v/r. */ 795 mlen += 6; 796 797 if (ir->op == ir_txb) { 798 ir->lod_info.bias->accept(this); 799 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 800 mlen++; 801 } else { 802 ir->lod_info.lod->accept(this); 803 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type), 804 this->result); 805 mlen++; 806 } 807 808 /* The unused upper half. */ 809 mlen++; 810 } 811 812 if (simd16) { 813 /* Now, since we're doing simd16, the return is 2 interleaved 814 * vec4s where the odd-indexed ones are junk. We'll need to move 815 * this weirdness around to the expected layout. 816 */ 817 orig_dst = dst; 818 const glsl_type *vec_type = 819 glsl_type::get_instance(ir->type->base_type, 4, 1); 820 dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2)); 821 dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type) 822 : BRW_REGISTER_TYPE_F; 823 } 824 825 fs_inst *inst = NULL; 826 switch (ir->op) { 827 case ir_tex: 828 inst = emit(SHADER_OPCODE_TEX, dst); 829 break; 830 case ir_txb: 831 inst = emit(FS_OPCODE_TXB, dst); 832 break; 833 case ir_txl: 834 inst = emit(SHADER_OPCODE_TXL, dst); 835 break; 836 case ir_txd: 837 inst = emit(SHADER_OPCODE_TXD, dst); 838 break; 839 case ir_txs: 840 inst = emit(SHADER_OPCODE_TXS, dst); 841 break; 842 case ir_txf: 843 inst = emit(SHADER_OPCODE_TXF, dst); 844 break; 845 } 846 inst->base_mrf = base_mrf; 847 inst->mlen = mlen; 848 inst->header_present = true; 849 850 if (simd16) { 851 for (int i = 0; i < 4; i++) { 852 emit(BRW_OPCODE_MOV, orig_dst, dst); 853 orig_dst.reg_offset++; 854 dst.reg_offset += 2; 855 } 856 } 857 858 return inst; 859} 860 861/* gen5's sampler has slots for u, v, r, array index, then optional 862 * parameters like shadow comparitor or LOD bias. If optional 863 * parameters aren't present, those base slots are optional and don't 864 * need to be included in the message. 865 * 866 * We don't fill in the unnecessary slots regardless, which may look 867 * surprising in the disassembly. 868 */ 869fs_inst * 870fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate, 871 int sampler) 872{ 873 int mlen = 0; 874 int base_mrf = 2; 875 int reg_width = c->dispatch_width / 8; 876 bool header_present = false; 877 const int vector_elements = 878 ir->coordinate ? ir->coordinate->type->vector_elements : 0; 879 880 if (ir->offset) { 881 /* The offsets set up by the ir_texture visitor are in the 882 * m1 header, so we can't go headerless. 883 */ 884 header_present = true; 885 mlen++; 886 base_mrf--; 887 } 888 889 for (int i = 0; i < vector_elements; i++) { 890 emit(BRW_OPCODE_MOV, 891 fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type), 892 coordinate); 893 coordinate.reg_offset++; 894 } 895 mlen += vector_elements * reg_width; 896 897 if (ir->shadow_comparitor && ir->op != ir_txd) { 898 mlen = MAX2(mlen, header_present + 4 * reg_width); 899 900 ir->shadow_comparitor->accept(this); 901 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 902 mlen += reg_width; 903 } 904 905 fs_inst *inst = NULL; 906 switch (ir->op) { 907 case ir_tex: 908 inst = emit(SHADER_OPCODE_TEX, dst); 909 break; 910 case ir_txb: 911 ir->lod_info.bias->accept(this); 912 mlen = MAX2(mlen, header_present + 4 * reg_width); 913 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 914 mlen += reg_width; 915 916 inst = emit(FS_OPCODE_TXB, dst); 917 918 break; 919 case ir_txl: 920 ir->lod_info.lod->accept(this); 921 mlen = MAX2(mlen, header_present + 4 * reg_width); 922 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 923 mlen += reg_width; 924 925 inst = emit(SHADER_OPCODE_TXL, dst); 926 break; 927 case ir_txd: { 928 ir->lod_info.grad.dPdx->accept(this); 929 fs_reg dPdx = this->result; 930 931 ir->lod_info.grad.dPdy->accept(this); 932 fs_reg dPdy = this->result; 933 934 mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */ 935 936 /** 937 * P = u, v, r 938 * dPdx = dudx, dvdx, drdx 939 * dPdy = dudy, dvdy, drdy 940 * 941 * Load up these values: 942 * - dudx dudy dvdx dvdy drdx drdy 943 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z 944 */ 945 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 946 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 947 dPdx.reg_offset++; 948 mlen += reg_width; 949 950 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 951 dPdy.reg_offset++; 952 mlen += reg_width; 953 } 954 955 inst = emit(SHADER_OPCODE_TXD, dst); 956 break; 957 } 958 case ir_txs: 959 ir->lod_info.lod->accept(this); 960 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 961 mlen += reg_width; 962 inst = emit(SHADER_OPCODE_TXS, dst); 963 break; 964 case ir_txf: 965 mlen = header_present + 4 * reg_width; 966 967 ir->lod_info.lod->accept(this); 968 emit(BRW_OPCODE_MOV, 969 fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), 970 this->result); 971 inst = emit(SHADER_OPCODE_TXF, dst); 972 break; 973 } 974 inst->base_mrf = base_mrf; 975 inst->mlen = mlen; 976 inst->header_present = header_present; 977 978 if (mlen > 11) { 979 fail("Message length >11 disallowed by hardware\n"); 980 } 981 982 return inst; 983} 984 985fs_inst * 986fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate, 987 int sampler) 988{ 989 int mlen = 0; 990 int base_mrf = 2; 991 int reg_width = c->dispatch_width / 8; 992 bool header_present = false; 993 int offsets[3]; 994 995 if (ir->offset && ir->op != ir_txf) { 996 /* The offsets set up by the ir_texture visitor are in the 997 * m1 header, so we can't go headerless. 998 */ 999 header_present = true; 1000 mlen++; 1001 base_mrf--; 1002 } 1003 1004 if (ir->shadow_comparitor && ir->op != ir_txd) { 1005 ir->shadow_comparitor->accept(this); 1006 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1007 mlen += reg_width; 1008 } 1009 1010 /* Set up the LOD info */ 1011 switch (ir->op) { 1012 case ir_tex: 1013 break; 1014 case ir_txb: 1015 ir->lod_info.bias->accept(this); 1016 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1017 mlen += reg_width; 1018 break; 1019 case ir_txl: 1020 ir->lod_info.lod->accept(this); 1021 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1022 mlen += reg_width; 1023 break; 1024 case ir_txd: { 1025 if (c->dispatch_width == 16) 1026 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode."); 1027 1028 ir->lod_info.grad.dPdx->accept(this); 1029 fs_reg dPdx = this->result; 1030 1031 ir->lod_info.grad.dPdy->accept(this); 1032 fs_reg dPdy = this->result; 1033 1034 /* Load dPdx and the coordinate together: 1035 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z 1036 */ 1037 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1038 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1039 coordinate.reg_offset++; 1040 mlen += reg_width; 1041 1042 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 1043 dPdx.reg_offset++; 1044 mlen += reg_width; 1045 1046 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 1047 dPdy.reg_offset++; 1048 mlen += reg_width; 1049 } 1050 break; 1051 } 1052 case ir_txs: 1053 ir->lod_info.lod->accept(this); 1054 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 1055 mlen += reg_width; 1056 break; 1057 case ir_txf: 1058 /* It appears that the ld instruction used for txf does its 1059 * address bounds check before adding in the offset. To work 1060 * around this, just add the integer offset to the integer texel 1061 * coordinate, and don't put the offset in the header. 1062 */ 1063 if (ir->offset) { 1064 ir_constant *offset = ir->offset->as_constant(); 1065 offsets[0] = offset->value.i[0]; 1066 offsets[1] = offset->value.i[1]; 1067 offsets[2] = offset->value.i[2]; 1068 } else { 1069 memset(offsets, 0, sizeof(offsets)); 1070 } 1071 1072 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */ 1073 emit(BRW_OPCODE_ADD, 1074 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[0]); 1075 coordinate.reg_offset++; 1076 mlen += reg_width; 1077 1078 ir->lod_info.lod->accept(this); 1079 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result); 1080 mlen += reg_width; 1081 1082 for (int i = 1; i < ir->coordinate->type->vector_elements; i++) { 1083 emit(BRW_OPCODE_ADD, 1084 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate, offsets[i]); 1085 coordinate.reg_offset++; 1086 mlen += reg_width; 1087 } 1088 break; 1089 } 1090 1091 /* Set up the coordinate (except for cases where it was done above) */ 1092 if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) { 1093 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1094 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1095 coordinate.reg_offset++; 1096 mlen += reg_width; 1097 } 1098 } 1099 1100 /* Generate the SEND */ 1101 fs_inst *inst = NULL; 1102 switch (ir->op) { 1103 case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break; 1104 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break; 1105 case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break; 1106 case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break; 1107 case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break; 1108 case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break; 1109 } 1110 inst->base_mrf = base_mrf; 1111 inst->mlen = mlen; 1112 inst->header_present = header_present; 1113 1114 if (mlen > 11) { 1115 fail("Message length >11 disallowed by hardware\n"); 1116 } 1117 1118 return inst; 1119} 1120 1121void 1122fs_visitor::visit(ir_texture *ir) 1123{ 1124 fs_inst *inst = NULL; 1125 1126 int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base); 1127 sampler = fp->Base.SamplerUnits[sampler]; 1128 1129 /* Our hardware doesn't have a sample_d_c message, so shadow compares 1130 * for textureGrad/TXD need to be emulated with instructions. 1131 */ 1132 bool hw_compare_supported = ir->op != ir_txd; 1133 if (ir->shadow_comparitor && !hw_compare_supported) { 1134 assert(c->key.tex.compare_funcs[sampler] != GL_NONE); 1135 /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */ 1136 if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS) 1137 return swizzle_result(ir, fs_reg(1.0f), sampler); 1138 else if (c->key.tex.compare_funcs[sampler] == GL_NEVER) 1139 return swizzle_result(ir, fs_reg(0.0f), sampler); 1140 } 1141 1142 if (ir->coordinate) 1143 ir->coordinate->accept(this); 1144 fs_reg coordinate = this->result; 1145 1146 if (ir->offset != NULL && !(intel->gen == 7 && ir->op == ir_txf)) { 1147 uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant()); 1148 1149 /* Explicitly set up the message header by copying g0 to msg reg m1. */ 1150 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD), 1151 fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD))); 1152 1153 /* Then set the offset bits in DWord 2 of the message header. */ 1154 emit(BRW_OPCODE_MOV, 1155 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2), 1156 BRW_REGISTER_TYPE_UD)), 1157 fs_reg(brw_imm_uw(offset_bits))); 1158 } 1159 1160 /* Should be lowered by do_lower_texture_projection */ 1161 assert(!ir->projector); 1162 1163 bool needs_gl_clamp = true; 1164 1165 fs_reg scale_x, scale_y; 1166 1167 /* The 965 requires the EU to do the normalization of GL rectangle 1168 * texture coordinates. We use the program parameter state 1169 * tracking to get the scaling factor. 1170 */ 1171 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT && 1172 (intel->gen < 6 || 1173 (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || 1174 c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { 1175 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; 1176 int tokens[STATE_LENGTH] = { 1177 STATE_INTERNAL, 1178 STATE_TEXRECT_SCALE, 1179 sampler, 1180 0, 1181 0 1182 }; 1183 1184 if (c->dispatch_width == 16) { 1185 fail("rectangle scale uniform setup not supported on 16-wide\n"); 1186 this->result = fs_reg(this, ir->type); 1187 return; 1188 } 1189 1190 c->prog_data.param_convert[c->prog_data.nr_params] = 1191 PARAM_NO_CONVERT; 1192 c->prog_data.param_convert[c->prog_data.nr_params + 1] = 1193 PARAM_NO_CONVERT; 1194 1195 scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); 1196 scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); 1197 1198 GLuint index = _mesa_add_state_reference(params, 1199 (gl_state_index *)tokens); 1200 1201 this->param_index[c->prog_data.nr_params] = index; 1202 this->param_offset[c->prog_data.nr_params] = 0; 1203 c->prog_data.nr_params++; 1204 this->param_index[c->prog_data.nr_params] = index; 1205 this->param_offset[c->prog_data.nr_params] = 1; 1206 c->prog_data.nr_params++; 1207 } 1208 1209 /* The 965 requires the EU to do the normalization of GL rectangle 1210 * texture coordinates. We use the program parameter state 1211 * tracking to get the scaling factor. 1212 */ 1213 if (intel->gen < 6 && 1214 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1215 fs_reg dst = fs_reg(this, ir->coordinate->type); 1216 fs_reg src = coordinate; 1217 coordinate = dst; 1218 1219 emit(BRW_OPCODE_MUL, dst, src, scale_x); 1220 dst.reg_offset++; 1221 src.reg_offset++; 1222 emit(BRW_OPCODE_MUL, dst, src, scale_y); 1223 } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1224 /* On gen6+, the sampler handles the rectangle coordinates 1225 * natively, without needing rescaling. But that means we have 1226 * to do GL_CLAMP clamping at the [0, width], [0, height] scale, 1227 * not [0, 1] like the default case below. 1228 */ 1229 needs_gl_clamp = false; 1230 1231 for (int i = 0; i < 2; i++) { 1232 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1233 fs_reg chan = coordinate; 1234 chan.reg_offset += i; 1235 1236 inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0)); 1237 inst->conditional_mod = BRW_CONDITIONAL_G; 1238 1239 /* Our parameter comes in as 1.0/width or 1.0/height, 1240 * because that's what people normally want for doing 1241 * texture rectangle handling. We need width or height 1242 * for clamping, but we don't care enough to make a new 1243 * parameter type, so just invert back. 1244 */ 1245 fs_reg limit = fs_reg(this, glsl_type::float_type); 1246 emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y); 1247 emit(SHADER_OPCODE_RCP, limit, limit); 1248 1249 inst = emit(BRW_OPCODE_SEL, chan, chan, limit); 1250 inst->conditional_mod = BRW_CONDITIONAL_L; 1251 } 1252 } 1253 } 1254 1255 if (ir->coordinate && needs_gl_clamp) { 1256 for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) { 1257 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1258 fs_reg chan = coordinate; 1259 chan.reg_offset += i; 1260 1261 fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan); 1262 inst->saturate = true; 1263 } 1264 } 1265 } 1266 1267 /* Writemasking doesn't eliminate channels on SIMD8 texture 1268 * samples, so don't worry about them. 1269 */ 1270 fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); 1271 1272 if (intel->gen >= 7) { 1273 inst = emit_texture_gen7(ir, dst, coordinate, sampler); 1274 } else if (intel->gen >= 5) { 1275 inst = emit_texture_gen5(ir, dst, coordinate, sampler); 1276 } else { 1277 inst = emit_texture_gen4(ir, dst, coordinate, sampler); 1278 } 1279 1280 /* If there's an offset, we already set up m1. To avoid the implied move, 1281 * use the null register. Otherwise, we want an implied move from g0. 1282 */ 1283 if (ir->offset != NULL || !inst->header_present) 1284 inst->src[0] = reg_undef; 1285 else 1286 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); 1287 1288 inst->sampler = sampler; 1289 1290 if (ir->shadow_comparitor) { 1291 if (hw_compare_supported) { 1292 inst->shadow_compare = true; 1293 } else { 1294 ir->shadow_comparitor->accept(this); 1295 fs_reg ref = this->result; 1296 1297 fs_reg value = dst; 1298 dst = fs_reg(this, glsl_type::vec4_type); 1299 1300 /* FINISHME: This needs to be done pre-filtering. */ 1301 1302 uint32_t conditional = 0; 1303 switch (c->key.tex.compare_funcs[sampler]) { 1304 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */ 1305 case GL_LESS: conditional = BRW_CONDITIONAL_L; break; 1306 case GL_GREATER: conditional = BRW_CONDITIONAL_G; break; 1307 case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break; 1308 case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break; 1309 case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break; 1310 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break; 1311 default: assert(!"Should not get here: bad shadow compare function"); 1312 } 1313 1314 /* Use conditional moves to load 0 or 1 as the result */ 1315 this->current_annotation = "manual shadow comparison"; 1316 for (int i = 0; i < 4; i++) { 1317 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f)); 1318 1319 inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value); 1320 inst->conditional_mod = conditional; 1321 1322 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f)); 1323 inst->predicated = true; 1324 1325 dst.reg_offset++; 1326 value.reg_offset++; 1327 } 1328 dst.reg_offset = 0; 1329 } 1330 } 1331 1332 swizzle_result(ir, dst, sampler); 1333} 1334 1335/** 1336 * Swizzle the result of a texture result. This is necessary for 1337 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons. 1338 */ 1339void 1340fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler) 1341{ 1342 this->result = orig_val; 1343 1344 if (ir->op == ir_txs) 1345 return; 1346 1347 if (ir->type == glsl_type::float_type) { 1348 /* Ignore DEPTH_TEXTURE_MODE swizzling. */ 1349 assert(ir->sampler->type->sampler_shadow); 1350 } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) { 1351 fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type); 1352 1353 for (int i = 0; i < 4; i++) { 1354 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i); 1355 fs_reg l = swizzled_result; 1356 l.reg_offset += i; 1357 1358 if (swiz == SWIZZLE_ZERO) { 1359 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f)); 1360 } else if (swiz == SWIZZLE_ONE) { 1361 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f)); 1362 } else { 1363 fs_reg r = orig_val; 1364 r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i); 1365 emit(BRW_OPCODE_MOV, l, r); 1366 } 1367 } 1368 this->result = swizzled_result; 1369 } 1370} 1371 1372void 1373fs_visitor::visit(ir_swizzle *ir) 1374{ 1375 ir->val->accept(this); 1376 fs_reg val = this->result; 1377 1378 if (ir->type->vector_elements == 1) { 1379 this->result.reg_offset += ir->mask.x; 1380 return; 1381 } 1382 1383 fs_reg result = fs_reg(this, ir->type); 1384 this->result = result; 1385 1386 for (unsigned int i = 0; i < ir->type->vector_elements; i++) { 1387 fs_reg channel = val; 1388 int swiz = 0; 1389 1390 switch (i) { 1391 case 0: 1392 swiz = ir->mask.x; 1393 break; 1394 case 1: 1395 swiz = ir->mask.y; 1396 break; 1397 case 2: 1398 swiz = ir->mask.z; 1399 break; 1400 case 3: 1401 swiz = ir->mask.w; 1402 break; 1403 } 1404 1405 channel.reg_offset += swiz; 1406 emit(BRW_OPCODE_MOV, result, channel); 1407 result.reg_offset++; 1408 } 1409} 1410 1411void 1412fs_visitor::visit(ir_discard *ir) 1413{ 1414 assert(ir->condition == NULL); /* FINISHME */ 1415 1416 emit(FS_OPCODE_DISCARD); 1417 kill_emitted = true; 1418} 1419 1420void 1421fs_visitor::visit(ir_constant *ir) 1422{ 1423 /* Set this->result to reg at the bottom of the function because some code 1424 * paths will cause this visitor to be applied to other fields. This will 1425 * cause the value stored in this->result to be modified. 1426 * 1427 * Make reg constant so that it doesn't get accidentally modified along the 1428 * way. Yes, I actually had this problem. :( 1429 */ 1430 const fs_reg reg(this, ir->type); 1431 fs_reg dst_reg = reg; 1432 1433 if (ir->type->is_array()) { 1434 const unsigned size = type_size(ir->type->fields.array); 1435 1436 for (unsigned i = 0; i < ir->type->length; i++) { 1437 ir->array_elements[i]->accept(this); 1438 fs_reg src_reg = this->result; 1439 1440 dst_reg.type = src_reg.type; 1441 for (unsigned j = 0; j < size; j++) { 1442 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1443 src_reg.reg_offset++; 1444 dst_reg.reg_offset++; 1445 } 1446 } 1447 } else if (ir->type->is_record()) { 1448 foreach_list(node, &ir->components) { 1449 ir_constant *const field = (ir_constant *) node; 1450 const unsigned size = type_size(field->type); 1451 1452 field->accept(this); 1453 fs_reg src_reg = this->result; 1454 1455 dst_reg.type = src_reg.type; 1456 for (unsigned j = 0; j < size; j++) { 1457 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1458 src_reg.reg_offset++; 1459 dst_reg.reg_offset++; 1460 } 1461 } 1462 } else { 1463 const unsigned size = type_size(ir->type); 1464 1465 for (unsigned i = 0; i < size; i++) { 1466 switch (ir->type->base_type) { 1467 case GLSL_TYPE_FLOAT: 1468 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])); 1469 break; 1470 case GLSL_TYPE_UINT: 1471 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])); 1472 break; 1473 case GLSL_TYPE_INT: 1474 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])); 1475 break; 1476 case GLSL_TYPE_BOOL: 1477 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])); 1478 break; 1479 default: 1480 assert(!"Non-float/uint/int/bool constant"); 1481 } 1482 dst_reg.reg_offset++; 1483 } 1484 } 1485 1486 this->result = reg; 1487} 1488 1489void 1490fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) 1491{ 1492 ir_expression *expr = ir->as_expression(); 1493 1494 if (expr) { 1495 fs_reg op[2]; 1496 fs_inst *inst; 1497 1498 assert(expr->get_num_operands() <= 2); 1499 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1500 assert(expr->operands[i]->type->is_scalar()); 1501 1502 expr->operands[i]->accept(this); 1503 op[i] = this->result; 1504 1505 resolve_ud_negate(&op[i]); 1506 } 1507 1508 switch (expr->operation) { 1509 case ir_unop_logic_not: 1510 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)); 1511 inst->conditional_mod = BRW_CONDITIONAL_Z; 1512 break; 1513 1514 case ir_binop_logic_xor: 1515 case ir_binop_logic_or: 1516 case ir_binop_logic_and: 1517 goto out; 1518 1519 case ir_unop_f2b: 1520 if (intel->gen >= 6) { 1521 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f)); 1522 } else { 1523 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]); 1524 } 1525 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1526 break; 1527 1528 case ir_unop_i2b: 1529 if (intel->gen >= 6) { 1530 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)); 1531 } else { 1532 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]); 1533 } 1534 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1535 break; 1536 1537 case ir_binop_greater: 1538 case ir_binop_gequal: 1539 case ir_binop_less: 1540 case ir_binop_lequal: 1541 case ir_binop_equal: 1542 case ir_binop_all_equal: 1543 case ir_binop_nequal: 1544 case ir_binop_any_nequal: 1545 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]); 1546 inst->conditional_mod = 1547 brw_conditional_for_comparison(expr->operation); 1548 break; 1549 1550 default: 1551 assert(!"not reached"); 1552 fail("bad cond code\n"); 1553 break; 1554 } 1555 return; 1556 } 1557 1558out: 1559 ir->accept(this); 1560 1561 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1)); 1562 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1563} 1564 1565/** 1566 * Emit a gen6 IF statement with the comparison folded into the IF 1567 * instruction. 1568 */ 1569void 1570fs_visitor::emit_if_gen6(ir_if *ir) 1571{ 1572 ir_expression *expr = ir->condition->as_expression(); 1573 1574 if (expr) { 1575 fs_reg op[2]; 1576 fs_inst *inst; 1577 fs_reg temp; 1578 1579 assert(expr->get_num_operands() <= 2); 1580 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1581 assert(expr->operands[i]->type->is_scalar()); 1582 1583 expr->operands[i]->accept(this); 1584 op[i] = this->result; 1585 } 1586 1587 switch (expr->operation) { 1588 case ir_unop_logic_not: 1589 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0)); 1590 inst->conditional_mod = BRW_CONDITIONAL_Z; 1591 return; 1592 1593 case ir_binop_logic_xor: 1594 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1595 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1596 return; 1597 1598 case ir_binop_logic_or: 1599 temp = fs_reg(this, glsl_type::bool_type); 1600 emit(BRW_OPCODE_OR, temp, op[0], op[1]); 1601 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1602 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1603 return; 1604 1605 case ir_binop_logic_and: 1606 temp = fs_reg(this, glsl_type::bool_type); 1607 emit(BRW_OPCODE_AND, temp, op[0], op[1]); 1608 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1609 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1610 return; 1611 1612 case ir_unop_f2b: 1613 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)); 1614 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1615 return; 1616 1617 case ir_unop_i2b: 1618 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1619 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1620 return; 1621 1622 case ir_binop_greater: 1623 case ir_binop_gequal: 1624 case ir_binop_less: 1625 case ir_binop_lequal: 1626 case ir_binop_equal: 1627 case ir_binop_all_equal: 1628 case ir_binop_nequal: 1629 case ir_binop_any_nequal: 1630 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1631 inst->conditional_mod = 1632 brw_conditional_for_comparison(expr->operation); 1633 return; 1634 default: 1635 assert(!"not reached"); 1636 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1637 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1638 fail("bad condition\n"); 1639 return; 1640 } 1641 return; 1642 } 1643 1644 ir->condition->accept(this); 1645 1646 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)); 1647 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1648} 1649 1650void 1651fs_visitor::visit(ir_if *ir) 1652{ 1653 fs_inst *inst; 1654 1655 if (intel->gen < 6 && c->dispatch_width == 16) { 1656 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1657 } 1658 1659 /* Don't point the annotation at the if statement, because then it plus 1660 * the then and else blocks get printed. 1661 */ 1662 this->base_ir = ir->condition; 1663 1664 if (intel->gen == 6) { 1665 emit_if_gen6(ir); 1666 } else { 1667 emit_bool_to_cond_code(ir->condition); 1668 1669 inst = emit(BRW_OPCODE_IF); 1670 inst->predicated = true; 1671 } 1672 1673 foreach_list(node, &ir->then_instructions) { 1674 ir_instruction *ir = (ir_instruction *)node; 1675 this->base_ir = ir; 1676 1677 ir->accept(this); 1678 } 1679 1680 if (!ir->else_instructions.is_empty()) { 1681 emit(BRW_OPCODE_ELSE); 1682 1683 foreach_list(node, &ir->else_instructions) { 1684 ir_instruction *ir = (ir_instruction *)node; 1685 this->base_ir = ir; 1686 1687 ir->accept(this); 1688 } 1689 } 1690 1691 emit(BRW_OPCODE_ENDIF); 1692} 1693 1694void 1695fs_visitor::visit(ir_loop *ir) 1696{ 1697 fs_reg counter = reg_undef; 1698 1699 if (intel->gen < 6 && c->dispatch_width == 16) { 1700 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1701 } 1702 1703 if (ir->counter) { 1704 this->base_ir = ir->counter; 1705 ir->counter->accept(this); 1706 counter = *(variable_storage(ir->counter)); 1707 1708 if (ir->from) { 1709 this->base_ir = ir->from; 1710 ir->from->accept(this); 1711 1712 emit(BRW_OPCODE_MOV, counter, this->result); 1713 } 1714 } 1715 1716 this->base_ir = NULL; 1717 emit(BRW_OPCODE_DO); 1718 1719 if (ir->to) { 1720 this->base_ir = ir->to; 1721 ir->to->accept(this); 1722 1723 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result); 1724 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp); 1725 1726 inst = emit(BRW_OPCODE_BREAK); 1727 inst->predicated = true; 1728 } 1729 1730 foreach_list(node, &ir->body_instructions) { 1731 ir_instruction *ir = (ir_instruction *)node; 1732 1733 this->base_ir = ir; 1734 ir->accept(this); 1735 } 1736 1737 if (ir->increment) { 1738 this->base_ir = ir->increment; 1739 ir->increment->accept(this); 1740 emit(BRW_OPCODE_ADD, counter, counter, this->result); 1741 } 1742 1743 this->base_ir = NULL; 1744 emit(BRW_OPCODE_WHILE); 1745} 1746 1747void 1748fs_visitor::visit(ir_loop_jump *ir) 1749{ 1750 switch (ir->mode) { 1751 case ir_loop_jump::jump_break: 1752 emit(BRW_OPCODE_BREAK); 1753 break; 1754 case ir_loop_jump::jump_continue: 1755 emit(BRW_OPCODE_CONTINUE); 1756 break; 1757 } 1758} 1759 1760void 1761fs_visitor::visit(ir_call *ir) 1762{ 1763 assert(!"FINISHME"); 1764} 1765 1766void 1767fs_visitor::visit(ir_return *ir) 1768{ 1769 assert(!"FINISHME"); 1770} 1771 1772void 1773fs_visitor::visit(ir_function *ir) 1774{ 1775 /* Ignore function bodies other than main() -- we shouldn't see calls to 1776 * them since they should all be inlined before we get to ir_to_mesa. 1777 */ 1778 if (strcmp(ir->name, "main") == 0) { 1779 const ir_function_signature *sig; 1780 exec_list empty; 1781 1782 sig = ir->matching_signature(&empty); 1783 1784 assert(sig); 1785 1786 foreach_list(node, &sig->body) { 1787 ir_instruction *ir = (ir_instruction *)node; 1788 this->base_ir = ir; 1789 1790 ir->accept(this); 1791 } 1792 } 1793} 1794 1795void 1796fs_visitor::visit(ir_function_signature *ir) 1797{ 1798 assert(!"not reached"); 1799 (void)ir; 1800} 1801 1802fs_inst * 1803fs_visitor::emit(fs_inst inst) 1804{ 1805 fs_inst *list_inst = new(mem_ctx) fs_inst; 1806 *list_inst = inst; 1807 1808 if (force_uncompressed_stack > 0) 1809 list_inst->force_uncompressed = true; 1810 else if (force_sechalf_stack > 0) 1811 list_inst->force_sechalf = true; 1812 1813 list_inst->annotation = this->current_annotation; 1814 list_inst->ir = this->base_ir; 1815 1816 this->instructions.push_tail(list_inst); 1817 1818 return list_inst; 1819} 1820 1821/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */ 1822void 1823fs_visitor::emit_dummy_fs() 1824{ 1825 int reg_width = c->dispatch_width / 8; 1826 1827 /* Everyone's favorite color. */ 1828 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f)); 1829 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f)); 1830 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f)); 1831 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f)); 1832 1833 fs_inst *write; 1834 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0)); 1835 write->base_mrf = 2; 1836 write->mlen = 4 * reg_width; 1837 write->eot = true; 1838} 1839 1840/* The register location here is relative to the start of the URB 1841 * data. It will get adjusted to be a real location before 1842 * generate_code() time. 1843 */ 1844struct brw_reg 1845fs_visitor::interp_reg(int location, int channel) 1846{ 1847 int regnr = urb_setup[location] * 2 + channel / 2; 1848 int stride = (channel & 1) * 4; 1849 1850 assert(urb_setup[location] != -1); 1851 1852 return brw_vec1_grf(regnr, stride); 1853} 1854 1855/** Emits the interpolation for the varying inputs. */ 1856void 1857fs_visitor::emit_interpolation_setup_gen4() 1858{ 1859 this->current_annotation = "compute pixel centers"; 1860 this->pixel_x = fs_reg(this, glsl_type::uint_type); 1861 this->pixel_y = fs_reg(this, glsl_type::uint_type); 1862 this->pixel_x.type = BRW_REGISTER_TYPE_UW; 1863 this->pixel_y.type = BRW_REGISTER_TYPE_UW; 1864 1865 emit(FS_OPCODE_PIXEL_X, this->pixel_x); 1866 emit(FS_OPCODE_PIXEL_Y, this->pixel_y); 1867 1868 this->current_annotation = "compute pixel deltas from v0"; 1869 if (brw->has_pln) { 1870 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1871 fs_reg(this, glsl_type::vec2_type); 1872 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1873 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC]; 1874 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++; 1875 } else { 1876 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1877 fs_reg(this, glsl_type::float_type); 1878 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1879 fs_reg(this, glsl_type::float_type); 1880 } 1881 emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1882 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))); 1883 emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1884 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))); 1885 1886 this->current_annotation = "compute pos.w and 1/pos.w"; 1887 /* Compute wpos.w. It's always in our setup, since it's needed to 1888 * interpolate the other attributes. 1889 */ 1890 this->wpos_w = fs_reg(this, glsl_type::float_type); 1891 emit(FS_OPCODE_LINTERP, wpos_w, 1892 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1893 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1894 interp_reg(FRAG_ATTRIB_WPOS, 3)); 1895 /* Compute the pixel 1/W value from wpos.w. */ 1896 this->pixel_w = fs_reg(this, glsl_type::float_type); 1897 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w); 1898 this->current_annotation = NULL; 1899} 1900 1901/** Emits the interpolation for the varying inputs. */ 1902void 1903fs_visitor::emit_interpolation_setup_gen6() 1904{ 1905 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); 1906 1907 /* If the pixel centers end up used, the setup is the same as for gen4. */ 1908 this->current_annotation = "compute pixel centers"; 1909 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); 1910 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); 1911 int_pixel_x.type = BRW_REGISTER_TYPE_UW; 1912 int_pixel_y.type = BRW_REGISTER_TYPE_UW; 1913 emit(BRW_OPCODE_ADD, 1914 int_pixel_x, 1915 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), 1916 fs_reg(brw_imm_v(0x10101010))); 1917 emit(BRW_OPCODE_ADD, 1918 int_pixel_y, 1919 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), 1920 fs_reg(brw_imm_v(0x11001100))); 1921 1922 /* As of gen6, we can no longer mix float and int sources. We have 1923 * to turn the integer pixel centers into floats for their actual 1924 * use. 1925 */ 1926 this->pixel_x = fs_reg(this, glsl_type::float_type); 1927 this->pixel_y = fs_reg(this, glsl_type::float_type); 1928 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x); 1929 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y); 1930 1931 this->current_annotation = "compute pos.w"; 1932 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0)); 1933 this->wpos_w = fs_reg(this, glsl_type::float_type); 1934 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w); 1935 1936 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) { 1937 uint8_t reg = c->barycentric_coord_reg[i]; 1938 this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0)); 1939 this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0)); 1940 } 1941 1942 this->current_annotation = NULL; 1943} 1944 1945void 1946fs_visitor::emit_color_write(int target, int index, int first_color_mrf) 1947{ 1948 int reg_width = c->dispatch_width / 8; 1949 fs_inst *inst; 1950 fs_reg color = outputs[target]; 1951 fs_reg mrf; 1952 1953 /* If there's no color data to be written, skip it. */ 1954 if (color.file == BAD_FILE) 1955 return; 1956 1957 color.reg_offset += index; 1958 1959 if (c->dispatch_width == 8 || intel->gen >= 6) { 1960 /* SIMD8 write looks like: 1961 * m + 0: r0 1962 * m + 1: r1 1963 * m + 2: g0 1964 * m + 3: g1 1965 * 1966 * gen6 SIMD16 DP write looks like: 1967 * m + 0: r0 1968 * m + 1: r1 1969 * m + 2: g0 1970 * m + 3: g1 1971 * m + 4: b0 1972 * m + 5: b1 1973 * m + 6: a0 1974 * m + 7: a1 1975 */ 1976 inst = emit(BRW_OPCODE_MOV, 1977 fs_reg(MRF, first_color_mrf + index * reg_width, color.type), 1978 color); 1979 inst->saturate = c->key.clamp_fragment_color; 1980 } else { 1981 /* pre-gen6 SIMD16 single source DP write looks like: 1982 * m + 0: r0 1983 * m + 1: g0 1984 * m + 2: b0 1985 * m + 3: a0 1986 * m + 4: r1 1987 * m + 5: g1 1988 * m + 6: b1 1989 * m + 7: a1 1990 */ 1991 if (brw->has_compr4) { 1992 /* By setting the high bit of the MRF register number, we 1993 * indicate that we want COMPR4 mode - instead of doing the 1994 * usual destination + 1 for the second half we get 1995 * destination + 4. 1996 */ 1997 inst = emit(BRW_OPCODE_MOV, 1998 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index, 1999 color.type), 2000 color); 2001 inst->saturate = c->key.clamp_fragment_color; 2002 } else { 2003 push_force_uncompressed(); 2004 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index, 2005 color.type), 2006 color); 2007 inst->saturate = c->key.clamp_fragment_color; 2008 pop_force_uncompressed(); 2009 2010 push_force_sechalf(); 2011 color.sechalf = true; 2012 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4, 2013 color.type), 2014 color); 2015 inst->saturate = c->key.clamp_fragment_color; 2016 pop_force_sechalf(); 2017 color.sechalf = false; 2018 } 2019 } 2020} 2021 2022void 2023fs_visitor::emit_fb_writes() 2024{ 2025 this->current_annotation = "FB write header"; 2026 bool header_present = true; 2027 /* We can potentially have a message length of up to 15, so we have to set 2028 * base_mrf to either 0 or 1 in order to fit in m0..m15. 2029 */ 2030 int base_mrf = 1; 2031 int nr = base_mrf; 2032 int reg_width = c->dispatch_width / 8; 2033 2034 if (intel->gen >= 6 && 2035 !this->kill_emitted && 2036 c->key.nr_color_regions == 1) { 2037 header_present = false; 2038 } 2039 2040 if (header_present) { 2041 /* m2, m3 header */ 2042 nr += 2; 2043 } 2044 2045 if (c->aa_dest_stencil_reg) { 2046 push_force_uncompressed(); 2047 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++), 2048 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))); 2049 pop_force_uncompressed(); 2050 } 2051 2052 /* Reserve space for color. It'll be filled in per MRT below. */ 2053 int color_mrf = nr; 2054 nr += 4 * reg_width; 2055 2056 if (c->source_depth_to_render_target) { 2057 if (intel->gen == 6 && c->dispatch_width == 16) { 2058 /* For outputting oDepth on gen6, SIMD8 writes have to be 2059 * used. This would require 8-wide moves of each half to 2060 * message regs, kind of like pre-gen5 SIMD16 FB writes. 2061 * Just bail on doing so for now. 2062 */ 2063 fail("Missing support for simd16 depth writes on gen6\n"); 2064 } 2065 2066 if (c->computes_depth) { 2067 /* Hand over gl_FragDepth. */ 2068 assert(this->frag_depth); 2069 fs_reg depth = *(variable_storage(this->frag_depth)); 2070 2071 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth); 2072 } else { 2073 /* Pass through the payload depth. */ 2074 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2075 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))); 2076 } 2077 nr += reg_width; 2078 } 2079 2080 if (c->dest_depth_reg) { 2081 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2082 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))); 2083 nr += reg_width; 2084 } 2085 2086 for (int target = 0; target < c->key.nr_color_regions; target++) { 2087 this->current_annotation = ralloc_asprintf(this->mem_ctx, 2088 "FB write target %d", 2089 target); 2090 for (int i = 0; i < 4; i++) 2091 emit_color_write(target, i, color_mrf); 2092 2093 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2094 inst->target = target; 2095 inst->base_mrf = base_mrf; 2096 inst->mlen = nr - base_mrf; 2097 if (target == c->key.nr_color_regions - 1) 2098 inst->eot = true; 2099 inst->header_present = header_present; 2100 } 2101 2102 if (c->key.nr_color_regions == 0) { 2103 if (c->key.alpha_test) { 2104 /* If the alpha test is enabled but there's no color buffer, 2105 * we still need to send alpha out the pipeline to our null 2106 * renderbuffer. 2107 */ 2108 emit_color_write(0, 3, color_mrf); 2109 } 2110 2111 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2112 inst->base_mrf = base_mrf; 2113 inst->mlen = nr - base_mrf; 2114 inst->eot = true; 2115 inst->header_present = header_present; 2116 } 2117 2118 this->current_annotation = NULL; 2119} 2120 2121void 2122fs_visitor::resolve_ud_negate(fs_reg *reg) 2123{ 2124 if (reg->type != BRW_REGISTER_TYPE_UD || 2125 !reg->negate) 2126 return; 2127 2128 fs_reg temp = fs_reg(this, glsl_type::uint_type); 2129 emit(BRW_OPCODE_MOV, temp, *reg); 2130 *reg = temp; 2131} 2132