brw_fs_visitor.cpp revision 4b274068204c7f0bacaa4639f24feb433353b861
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_visitor.cpp 25 * 26 * This file supports generating the FS LIR from the GLSL IR. The LIR 27 * makes it easier to do backend-specific optimizations than doing so 28 * in the GLSL IR or in the native code. 29 */ 30extern "C" { 31 32#include <sys/types.h> 33 34#include "main/macros.h" 35#include "main/shaderobj.h" 36#include "main/uniforms.h" 37#include "program/prog_parameter.h" 38#include "program/prog_print.h" 39#include "program/prog_optimize.h" 40#include "program/register_allocate.h" 41#include "program/sampler.h" 42#include "program/hash_table.h" 43#include "brw_context.h" 44#include "brw_eu.h" 45#include "brw_wm.h" 46} 47#include "brw_shader.h" 48#include "brw_fs.h" 49#include "glsl/glsl_types.h" 50#include "glsl/ir_optimization.h" 51#include "glsl/ir_print_visitor.h" 52 53void 54fs_visitor::visit(ir_variable *ir) 55{ 56 fs_reg *reg = NULL; 57 58 if (variable_storage(ir)) 59 return; 60 61 if (ir->mode == ir_var_in) { 62 if (!strcmp(ir->name, "gl_FragCoord")) { 63 reg = emit_fragcoord_interpolation(ir); 64 } else if (!strcmp(ir->name, "gl_FrontFacing")) { 65 reg = emit_frontfacing_interpolation(ir); 66 } else { 67 reg = emit_general_interpolation(ir); 68 } 69 assert(reg); 70 hash_table_insert(this->variable_ht, reg, ir); 71 return; 72 } else if (ir->mode == ir_var_out) { 73 reg = new(this->mem_ctx) fs_reg(this, ir->type); 74 75 if (ir->location == FRAG_RESULT_COLOR) { 76 /* Writing gl_FragColor outputs to all color regions. */ 77 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) { 78 this->outputs[i] = *reg; 79 } 80 } else if (ir->location == FRAG_RESULT_DEPTH) { 81 this->frag_depth = ir; 82 } else { 83 /* gl_FragData or a user-defined FS output */ 84 assert(ir->location >= FRAG_RESULT_DATA0 && 85 ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS); 86 87 /* General color output. */ 88 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) { 89 int output = ir->location - FRAG_RESULT_DATA0 + i; 90 this->outputs[output] = *reg; 91 this->outputs[output].reg_offset += 4 * i; 92 } 93 } 94 } else if (ir->mode == ir_var_uniform) { 95 int param_index = c->prog_data.nr_params; 96 97 if (c->dispatch_width == 16) { 98 if (!variable_storage(ir)) { 99 fail("Failed to find uniform '%s' in 16-wide\n", ir->name); 100 } 101 return; 102 } 103 104 if (!strncmp(ir->name, "gl_", 3)) { 105 setup_builtin_uniform_values(ir); 106 } else { 107 setup_uniform_values(ir->location, ir->type); 108 } 109 110 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); 111 reg->type = brw_type_for_base_type(ir->type); 112 } 113 114 if (!reg) 115 reg = new(this->mem_ctx) fs_reg(this, ir->type); 116 117 hash_table_insert(this->variable_ht, reg, ir); 118} 119 120void 121fs_visitor::visit(ir_dereference_variable *ir) 122{ 123 fs_reg *reg = variable_storage(ir->var); 124 this->result = *reg; 125} 126 127void 128fs_visitor::visit(ir_dereference_record *ir) 129{ 130 const glsl_type *struct_type = ir->record->type; 131 132 ir->record->accept(this); 133 134 unsigned int offset = 0; 135 for (unsigned int i = 0; i < struct_type->length; i++) { 136 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 137 break; 138 offset += type_size(struct_type->fields.structure[i].type); 139 } 140 this->result.reg_offset += offset; 141 this->result.type = brw_type_for_base_type(ir->type); 142} 143 144void 145fs_visitor::visit(ir_dereference_array *ir) 146{ 147 ir_constant *index; 148 int element_size; 149 150 ir->array->accept(this); 151 index = ir->array_index->as_constant(); 152 153 element_size = type_size(ir->type); 154 this->result.type = brw_type_for_base_type(ir->type); 155 156 if (index) { 157 assert(this->result.file == UNIFORM || this->result.file == GRF); 158 this->result.reg_offset += index->value.i[0] * element_size; 159 } else { 160 assert(!"FINISHME: non-constant array element"); 161 } 162} 163 164/* Instruction selection: Produce a MOV.sat instead of 165 * MIN(MAX(val, 0), 1) when possible. 166 */ 167bool 168fs_visitor::try_emit_saturate(ir_expression *ir) 169{ 170 ir_rvalue *sat_val = ir->as_rvalue_to_saturate(); 171 172 if (!sat_val) 173 return false; 174 175 sat_val->accept(this); 176 fs_reg src = this->result; 177 178 this->result = fs_reg(this, ir->type); 179 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src); 180 inst->saturate = true; 181 182 return true; 183} 184 185bool 186fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) 187{ 188 /* 3-src instructions were introduced in gen6. */ 189 if (intel->gen < 6) 190 return false; 191 192 /* MAD can only handle floating-point data. */ 193 if (ir->type != glsl_type::float_type) 194 return false; 195 196 ir_rvalue *nonmul = ir->operands[1 - mul_arg]; 197 ir_expression *mul = ir->operands[mul_arg]->as_expression(); 198 199 if (!mul || mul->operation != ir_binop_mul) 200 return false; 201 202 if (nonmul->as_constant() || 203 mul->operands[0]->as_constant() || 204 mul->operands[1]->as_constant()) 205 return false; 206 207 nonmul->accept(this); 208 fs_reg src0 = this->result; 209 210 mul->operands[0]->accept(this); 211 fs_reg src1 = this->result; 212 213 mul->operands[1]->accept(this); 214 fs_reg src2 = this->result; 215 216 this->result = fs_reg(this, ir->type); 217 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2); 218 219 return true; 220} 221 222void 223fs_visitor::visit(ir_expression *ir) 224{ 225 unsigned int operand; 226 fs_reg op[2], temp; 227 fs_inst *inst; 228 229 assert(ir->get_num_operands() <= 2); 230 231 if (try_emit_saturate(ir)) 232 return; 233 if (ir->operation == ir_binop_add) { 234 if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1)) 235 return; 236 } 237 238 for (operand = 0; operand < ir->get_num_operands(); operand++) { 239 ir->operands[operand]->accept(this); 240 if (this->result.file == BAD_FILE) { 241 ir_print_visitor v; 242 fail("Failed to get tree for expression operand:\n"); 243 ir->operands[operand]->accept(&v); 244 } 245 op[operand] = this->result; 246 247 /* Matrix expression operands should have been broken down to vector 248 * operations already. 249 */ 250 assert(!ir->operands[operand]->type->is_matrix()); 251 /* And then those vector operands should have been broken down to scalar. 252 */ 253 assert(!ir->operands[operand]->type->is_vector()); 254 } 255 256 /* Storage for our result. If our result goes into an assignment, it will 257 * just get copy-propagated out, so no worries. 258 */ 259 this->result = fs_reg(this, ir->type); 260 261 switch (ir->operation) { 262 case ir_unop_logic_not: 263 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 264 * ones complement of the whole register, not just bit 0. 265 */ 266 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)); 267 break; 268 case ir_unop_neg: 269 op[0].negate = !op[0].negate; 270 this->result = op[0]; 271 break; 272 case ir_unop_abs: 273 op[0].abs = true; 274 op[0].negate = false; 275 this->result = op[0]; 276 break; 277 case ir_unop_sign: 278 temp = fs_reg(this, ir->type); 279 280 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)); 281 282 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 283 inst->conditional_mod = BRW_CONDITIONAL_G; 284 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)); 285 inst->predicated = true; 286 287 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 288 inst->conditional_mod = BRW_CONDITIONAL_L; 289 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)); 290 inst->predicated = true; 291 292 break; 293 case ir_unop_rcp: 294 emit_math(SHADER_OPCODE_RCP, this->result, op[0]); 295 break; 296 297 case ir_unop_exp2: 298 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]); 299 break; 300 case ir_unop_log2: 301 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]); 302 break; 303 case ir_unop_exp: 304 case ir_unop_log: 305 assert(!"not reached: should be handled by ir_explog_to_explog2"); 306 break; 307 case ir_unop_sin: 308 case ir_unop_sin_reduced: 309 emit_math(SHADER_OPCODE_SIN, this->result, op[0]); 310 break; 311 case ir_unop_cos: 312 case ir_unop_cos_reduced: 313 emit_math(SHADER_OPCODE_COS, this->result, op[0]); 314 break; 315 316 case ir_unop_dFdx: 317 emit(FS_OPCODE_DDX, this->result, op[0]); 318 break; 319 case ir_unop_dFdy: 320 emit(FS_OPCODE_DDY, this->result, op[0]); 321 break; 322 323 case ir_binop_add: 324 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]); 325 break; 326 case ir_binop_sub: 327 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 328 break; 329 330 case ir_binop_mul: 331 if (ir->type->is_integer()) { 332 /* For integer multiplication, the MUL uses the low 16 bits 333 * of one of the operands (src0 on gen6, src1 on gen7). The 334 * MACH accumulates in the contribution of the upper 16 bits 335 * of that operand. 336 * 337 * FINISHME: Emit just the MUL if we know an operand is small 338 * enough. 339 */ 340 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 341 342 emit(BRW_OPCODE_MUL, acc, op[0], op[1]); 343 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]); 344 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc)); 345 } else { 346 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]); 347 } 348 break; 349 case ir_binop_div: 350 if (intel->gen >= 7 && c->dispatch_width == 16) 351 fail("16-wide INTDIV unsupported\n"); 352 353 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 354 assert(ir->type->is_integer()); 355 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]); 356 break; 357 case ir_binop_mod: 358 if (intel->gen >= 7 && c->dispatch_width == 16) 359 fail("16-wide INTDIV unsupported\n"); 360 361 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 362 assert(ir->type->is_integer()); 363 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]); 364 break; 365 366 case ir_binop_less: 367 case ir_binop_greater: 368 case ir_binop_lequal: 369 case ir_binop_gequal: 370 case ir_binop_equal: 371 case ir_binop_all_equal: 372 case ir_binop_nequal: 373 case ir_binop_any_nequal: 374 temp = this->result; 375 /* original gen4 does implicit conversion before comparison. */ 376 if (intel->gen < 5) 377 temp.type = op[0].type; 378 379 resolve_ud_negate(&op[0]); 380 resolve_ud_negate(&op[1]); 381 382 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]); 383 inst->conditional_mod = brw_conditional_for_comparison(ir->operation); 384 emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)); 385 break; 386 387 case ir_binop_logic_xor: 388 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 389 break; 390 391 case ir_binop_logic_or: 392 emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 393 break; 394 395 case ir_binop_logic_and: 396 emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 397 break; 398 399 case ir_binop_dot: 400 case ir_unop_any: 401 assert(!"not reached: should be handled by brw_fs_channel_expressions"); 402 break; 403 404 case ir_unop_noise: 405 assert(!"not reached: should be handled by lower_noise"); 406 break; 407 408 case ir_quadop_vector: 409 assert(!"not reached: should be handled by lower_quadop_vector"); 410 break; 411 412 case ir_unop_sqrt: 413 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]); 414 break; 415 416 case ir_unop_rsq: 417 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]); 418 break; 419 420 case ir_unop_i2u: 421 op[0].type = BRW_REGISTER_TYPE_UD; 422 this->result = op[0]; 423 break; 424 case ir_unop_u2i: 425 op[0].type = BRW_REGISTER_TYPE_D; 426 this->result = op[0]; 427 break; 428 case ir_unop_i2f: 429 case ir_unop_u2f: 430 case ir_unop_b2f: 431 case ir_unop_b2i: 432 case ir_unop_f2i: 433 emit(BRW_OPCODE_MOV, this->result, op[0]); 434 break; 435 case ir_unop_f2b: 436 case ir_unop_i2b: 437 temp = this->result; 438 /* original gen4 does implicit conversion before comparison. */ 439 if (intel->gen < 5) 440 temp.type = op[0].type; 441 442 resolve_ud_negate(&op[0]); 443 444 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f)); 445 inst->conditional_mod = BRW_CONDITIONAL_NZ; 446 inst = emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(1)); 447 break; 448 449 case ir_unop_trunc: 450 emit(BRW_OPCODE_RNDZ, this->result, op[0]); 451 break; 452 case ir_unop_ceil: 453 op[0].negate = !op[0].negate; 454 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 455 this->result.negate = true; 456 break; 457 case ir_unop_floor: 458 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 459 break; 460 case ir_unop_fract: 461 inst = emit(BRW_OPCODE_FRC, this->result, op[0]); 462 break; 463 case ir_unop_round_even: 464 emit(BRW_OPCODE_RNDE, this->result, op[0]); 465 break; 466 467 case ir_binop_min: 468 resolve_ud_negate(&op[0]); 469 resolve_ud_negate(&op[1]); 470 471 if (intel->gen >= 6) { 472 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 473 inst->conditional_mod = BRW_CONDITIONAL_L; 474 } else { 475 /* Unalias the destination */ 476 this->result = fs_reg(this, ir->type); 477 478 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 479 inst->conditional_mod = BRW_CONDITIONAL_L; 480 481 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 482 inst->predicated = true; 483 } 484 break; 485 case ir_binop_max: 486 resolve_ud_negate(&op[0]); 487 resolve_ud_negate(&op[1]); 488 489 if (intel->gen >= 6) { 490 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 491 inst->conditional_mod = BRW_CONDITIONAL_GE; 492 } else { 493 /* Unalias the destination */ 494 this->result = fs_reg(this, ir->type); 495 496 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 497 inst->conditional_mod = BRW_CONDITIONAL_G; 498 499 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 500 inst->predicated = true; 501 } 502 break; 503 504 case ir_binop_pow: 505 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]); 506 break; 507 508 case ir_unop_bit_not: 509 inst = emit(BRW_OPCODE_NOT, this->result, op[0]); 510 break; 511 case ir_binop_bit_and: 512 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 513 break; 514 case ir_binop_bit_xor: 515 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 516 break; 517 case ir_binop_bit_or: 518 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 519 break; 520 521 case ir_binop_lshift: 522 inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]); 523 break; 524 525 case ir_binop_rshift: 526 if (ir->type->base_type == GLSL_TYPE_INT) 527 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]); 528 else 529 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]); 530 break; 531 } 532} 533 534void 535fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, 536 const glsl_type *type, bool predicated) 537{ 538 switch (type->base_type) { 539 case GLSL_TYPE_FLOAT: 540 case GLSL_TYPE_UINT: 541 case GLSL_TYPE_INT: 542 case GLSL_TYPE_BOOL: 543 for (unsigned int i = 0; i < type->components(); i++) { 544 l.type = brw_type_for_base_type(type); 545 r.type = brw_type_for_base_type(type); 546 547 if (predicated || !l.equals(&r)) { 548 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r); 549 inst->predicated = predicated; 550 } 551 552 l.reg_offset++; 553 r.reg_offset++; 554 } 555 break; 556 case GLSL_TYPE_ARRAY: 557 for (unsigned int i = 0; i < type->length; i++) { 558 emit_assignment_writes(l, r, type->fields.array, predicated); 559 } 560 break; 561 562 case GLSL_TYPE_STRUCT: 563 for (unsigned int i = 0; i < type->length; i++) { 564 emit_assignment_writes(l, r, type->fields.structure[i].type, 565 predicated); 566 } 567 break; 568 569 case GLSL_TYPE_SAMPLER: 570 break; 571 572 default: 573 assert(!"not reached"); 574 break; 575 } 576} 577 578/* If the RHS processing resulted in an instruction generating a 579 * temporary value, and it would be easy to rewrite the instruction to 580 * generate its result right into the LHS instead, do so. This ends 581 * up reliably removing instructions where it can be tricky to do so 582 * later without real UD chain information. 583 */ 584bool 585fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 586 fs_reg dst, 587 fs_reg src, 588 fs_inst *pre_rhs_inst, 589 fs_inst *last_rhs_inst) 590{ 591 if (pre_rhs_inst == last_rhs_inst) 592 return false; /* No instructions generated to work with. */ 593 594 /* Only attempt if we're doing a direct assignment. */ 595 if (ir->condition || 596 !(ir->lhs->type->is_scalar() || 597 (ir->lhs->type->is_vector() && 598 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1))) 599 return false; 600 601 /* Make sure the last instruction generated our source reg. */ 602 if (last_rhs_inst->predicated || 603 last_rhs_inst->force_uncompressed || 604 last_rhs_inst->force_sechalf || 605 !src.equals(&last_rhs_inst->dst)) 606 return false; 607 608 /* If last_rhs_inst wrote a different number of components than our LHS, 609 * we can't safely rewrite it. 610 */ 611 if (ir->lhs->type->vector_elements != last_rhs_inst->regs_written()) 612 return false; 613 614 /* Success! Rewrite the instruction. */ 615 last_rhs_inst->dst = dst; 616 617 return true; 618} 619 620void 621fs_visitor::visit(ir_assignment *ir) 622{ 623 fs_reg l, r; 624 fs_inst *inst; 625 626 /* FINISHME: arrays on the lhs */ 627 ir->lhs->accept(this); 628 l = this->result; 629 630 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail(); 631 632 ir->rhs->accept(this); 633 r = this->result; 634 635 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail(); 636 637 assert(l.file != BAD_FILE); 638 assert(r.file != BAD_FILE); 639 640 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst)) 641 return; 642 643 if (ir->condition) { 644 emit_bool_to_cond_code(ir->condition); 645 } 646 647 if (ir->lhs->type->is_scalar() || 648 ir->lhs->type->is_vector()) { 649 for (int i = 0; i < ir->lhs->type->vector_elements; i++) { 650 if (ir->write_mask & (1 << i)) { 651 inst = emit(BRW_OPCODE_MOV, l, r); 652 if (ir->condition) 653 inst->predicated = true; 654 r.reg_offset++; 655 } 656 l.reg_offset++; 657 } 658 } else { 659 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); 660 } 661} 662 663fs_inst * 664fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate, 665 int sampler) 666{ 667 int mlen; 668 int base_mrf = 1; 669 bool simd16 = false; 670 fs_reg orig_dst; 671 672 /* g0 header. */ 673 mlen = 1; 674 675 if (ir->shadow_comparitor && ir->op != ir_txd) { 676 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 677 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 678 coordinate.reg_offset++; 679 } 680 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 681 mlen += 3; 682 683 if (ir->op == ir_tex) { 684 /* There's no plain shadow compare message, so we use shadow 685 * compare with a bias of 0.0. 686 */ 687 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)); 688 mlen++; 689 } else if (ir->op == ir_txb) { 690 ir->lod_info.bias->accept(this); 691 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 692 mlen++; 693 } else { 694 assert(ir->op == ir_txl); 695 ir->lod_info.lod->accept(this); 696 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 697 mlen++; 698 } 699 700 ir->shadow_comparitor->accept(this); 701 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 702 mlen++; 703 } else if (ir->op == ir_tex) { 704 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 705 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 706 coordinate.reg_offset++; 707 } 708 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 709 mlen += 3; 710 } else if (ir->op == ir_txd) { 711 ir->lod_info.grad.dPdx->accept(this); 712 fs_reg dPdx = this->result; 713 714 ir->lod_info.grad.dPdy->accept(this); 715 fs_reg dPdy = this->result; 716 717 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 718 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 719 coordinate.reg_offset++; 720 } 721 /* the slots for u and v are always present, but r is optional */ 722 mlen += MAX2(ir->coordinate->type->vector_elements, 2); 723 724 /* P = u, v, r 725 * dPdx = dudx, dvdx, drdx 726 * dPdy = dudy, dvdy, drdy 727 * 728 * 1-arg: Does not exist. 729 * 730 * 2-arg: dudx dvdx dudy dvdy 731 * dPdx.x dPdx.y dPdy.x dPdy.y 732 * m4 m5 m6 m7 733 * 734 * 3-arg: dudx dvdx drdx dudy dvdy drdy 735 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z 736 * m5 m6 m7 m8 m9 m10 737 */ 738 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 739 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 740 dPdx.reg_offset++; 741 } 742 mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2); 743 744 for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) { 745 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 746 dPdy.reg_offset++; 747 } 748 mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2); 749 } else if (ir->op == ir_txs) { 750 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */ 751 simd16 = true; 752 ir->lod_info.lod->accept(this); 753 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 754 mlen += 2; 755 } else { 756 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod 757 * instructions. We'll need to do SIMD16 here. 758 */ 759 simd16 = true; 760 assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf); 761 762 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 763 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type), 764 coordinate); 765 coordinate.reg_offset++; 766 } 767 768 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to 769 * be necessary for TXF (ld), but seems wise to do for all messages. 770 */ 771 for (int i = ir->coordinate->type->vector_elements; i < 3; i++) { 772 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)); 773 } 774 775 /* lod/bias appears after u/v/r. */ 776 mlen += 6; 777 778 if (ir->op == ir_txb) { 779 ir->lod_info.bias->accept(this); 780 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 781 mlen++; 782 } else { 783 ir->lod_info.lod->accept(this); 784 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type), 785 this->result); 786 mlen++; 787 } 788 789 /* The unused upper half. */ 790 mlen++; 791 } 792 793 if (simd16) { 794 /* Now, since we're doing simd16, the return is 2 interleaved 795 * vec4s where the odd-indexed ones are junk. We'll need to move 796 * this weirdness around to the expected layout. 797 */ 798 orig_dst = dst; 799 const glsl_type *vec_type = 800 glsl_type::get_instance(ir->type->base_type, 4, 1); 801 dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2)); 802 dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type) 803 : BRW_REGISTER_TYPE_F; 804 } 805 806 fs_inst *inst = NULL; 807 switch (ir->op) { 808 case ir_tex: 809 inst = emit(SHADER_OPCODE_TEX, dst); 810 break; 811 case ir_txb: 812 inst = emit(FS_OPCODE_TXB, dst); 813 break; 814 case ir_txl: 815 inst = emit(SHADER_OPCODE_TXL, dst); 816 break; 817 case ir_txd: 818 inst = emit(SHADER_OPCODE_TXD, dst); 819 break; 820 case ir_txs: 821 inst = emit(SHADER_OPCODE_TXS, dst); 822 break; 823 case ir_txf: 824 inst = emit(SHADER_OPCODE_TXF, dst); 825 break; 826 } 827 inst->base_mrf = base_mrf; 828 inst->mlen = mlen; 829 inst->header_present = true; 830 831 if (simd16) { 832 for (int i = 0; i < 4; i++) { 833 emit(BRW_OPCODE_MOV, orig_dst, dst); 834 orig_dst.reg_offset++; 835 dst.reg_offset += 2; 836 } 837 } 838 839 return inst; 840} 841 842/* gen5's sampler has slots for u, v, r, array index, then optional 843 * parameters like shadow comparitor or LOD bias. If optional 844 * parameters aren't present, those base slots are optional and don't 845 * need to be included in the message. 846 * 847 * We don't fill in the unnecessary slots regardless, which may look 848 * surprising in the disassembly. 849 */ 850fs_inst * 851fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate, 852 int sampler) 853{ 854 int mlen = 0; 855 int base_mrf = 2; 856 int reg_width = c->dispatch_width / 8; 857 bool header_present = false; 858 const int vector_elements = 859 ir->coordinate ? ir->coordinate->type->vector_elements : 0; 860 861 if (ir->offset) { 862 /* The offsets set up by the ir_texture visitor are in the 863 * m1 header, so we can't go headerless. 864 */ 865 header_present = true; 866 mlen++; 867 base_mrf--; 868 } 869 870 for (int i = 0; i < vector_elements; i++) { 871 emit(BRW_OPCODE_MOV, 872 fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type), 873 coordinate); 874 coordinate.reg_offset++; 875 } 876 mlen += vector_elements * reg_width; 877 878 if (ir->shadow_comparitor && ir->op != ir_txd) { 879 mlen = MAX2(mlen, header_present + 4 * reg_width); 880 881 ir->shadow_comparitor->accept(this); 882 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 883 mlen += reg_width; 884 } 885 886 fs_inst *inst = NULL; 887 switch (ir->op) { 888 case ir_tex: 889 inst = emit(SHADER_OPCODE_TEX, dst); 890 break; 891 case ir_txb: 892 ir->lod_info.bias->accept(this); 893 mlen = MAX2(mlen, header_present + 4 * reg_width); 894 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 895 mlen += reg_width; 896 897 inst = emit(FS_OPCODE_TXB, dst); 898 899 break; 900 case ir_txl: 901 ir->lod_info.lod->accept(this); 902 mlen = MAX2(mlen, header_present + 4 * reg_width); 903 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 904 mlen += reg_width; 905 906 inst = emit(SHADER_OPCODE_TXL, dst); 907 break; 908 case ir_txd: { 909 ir->lod_info.grad.dPdx->accept(this); 910 fs_reg dPdx = this->result; 911 912 ir->lod_info.grad.dPdy->accept(this); 913 fs_reg dPdy = this->result; 914 915 mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */ 916 917 /** 918 * P = u, v, r 919 * dPdx = dudx, dvdx, drdx 920 * dPdy = dudy, dvdy, drdy 921 * 922 * Load up these values: 923 * - dudx dudy dvdx dvdy drdx drdy 924 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z 925 */ 926 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 927 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 928 dPdx.reg_offset++; 929 mlen += reg_width; 930 931 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 932 dPdy.reg_offset++; 933 mlen += reg_width; 934 } 935 936 inst = emit(SHADER_OPCODE_TXD, dst); 937 break; 938 } 939 case ir_txs: 940 ir->lod_info.lod->accept(this); 941 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 942 mlen += reg_width; 943 inst = emit(SHADER_OPCODE_TXS, dst); 944 break; 945 case ir_txf: 946 mlen = header_present + 4 * reg_width; 947 948 ir->lod_info.lod->accept(this); 949 emit(BRW_OPCODE_MOV, 950 fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), 951 this->result); 952 inst = emit(SHADER_OPCODE_TXF, dst); 953 break; 954 } 955 inst->base_mrf = base_mrf; 956 inst->mlen = mlen; 957 inst->header_present = header_present; 958 959 if (mlen > 11) { 960 fail("Message length >11 disallowed by hardware\n"); 961 } 962 963 return inst; 964} 965 966fs_inst * 967fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate, 968 int sampler) 969{ 970 int mlen = 0; 971 int base_mrf = 2; 972 int reg_width = c->dispatch_width / 8; 973 bool header_present = false; 974 975 if (ir->offset) { 976 /* The offsets set up by the ir_texture visitor are in the 977 * m1 header, so we can't go headerless. 978 */ 979 header_present = true; 980 mlen++; 981 base_mrf--; 982 } 983 984 if (ir->shadow_comparitor && ir->op != ir_txd) { 985 ir->shadow_comparitor->accept(this); 986 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 987 mlen += reg_width; 988 } 989 990 /* Set up the LOD info */ 991 switch (ir->op) { 992 case ir_tex: 993 break; 994 case ir_txb: 995 ir->lod_info.bias->accept(this); 996 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 997 mlen += reg_width; 998 break; 999 case ir_txl: 1000 ir->lod_info.lod->accept(this); 1001 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1002 mlen += reg_width; 1003 break; 1004 case ir_txd: { 1005 if (c->dispatch_width == 16) 1006 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode."); 1007 1008 ir->lod_info.grad.dPdx->accept(this); 1009 fs_reg dPdx = this->result; 1010 1011 ir->lod_info.grad.dPdy->accept(this); 1012 fs_reg dPdy = this->result; 1013 1014 /* Load dPdx and the coordinate together: 1015 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z 1016 */ 1017 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1018 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1019 coordinate.reg_offset++; 1020 mlen += reg_width; 1021 1022 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 1023 dPdx.reg_offset++; 1024 mlen += reg_width; 1025 1026 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 1027 dPdy.reg_offset++; 1028 mlen += reg_width; 1029 } 1030 break; 1031 } 1032 case ir_txs: 1033 ir->lod_info.lod->accept(this); 1034 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 1035 mlen += reg_width; 1036 break; 1037 case ir_txf: 1038 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */ 1039 emit(BRW_OPCODE_MOV, 1040 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1041 coordinate.reg_offset++; 1042 mlen += reg_width; 1043 1044 ir->lod_info.lod->accept(this); 1045 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result); 1046 mlen += reg_width; 1047 1048 for (int i = 1; i < ir->coordinate->type->vector_elements; i++) { 1049 emit(BRW_OPCODE_MOV, 1050 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1051 coordinate.reg_offset++; 1052 mlen += reg_width; 1053 } 1054 break; 1055 } 1056 1057 /* Set up the coordinate (except for cases where it was done above) */ 1058 if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) { 1059 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1060 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1061 coordinate.reg_offset++; 1062 mlen += reg_width; 1063 } 1064 } 1065 1066 /* Generate the SEND */ 1067 fs_inst *inst = NULL; 1068 switch (ir->op) { 1069 case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break; 1070 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break; 1071 case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break; 1072 case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break; 1073 case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break; 1074 case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break; 1075 } 1076 inst->base_mrf = base_mrf; 1077 inst->mlen = mlen; 1078 inst->header_present = header_present; 1079 1080 if (mlen > 11) { 1081 fail("Message length >11 disallowed by hardware\n"); 1082 } 1083 1084 return inst; 1085} 1086 1087void 1088fs_visitor::visit(ir_texture *ir) 1089{ 1090 fs_inst *inst = NULL; 1091 1092 int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base); 1093 sampler = fp->Base.SamplerUnits[sampler]; 1094 1095 /* Our hardware doesn't have a sample_d_c message, so shadow compares 1096 * for textureGrad/TXD need to be emulated with instructions. 1097 */ 1098 bool hw_compare_supported = ir->op != ir_txd; 1099 if (ir->shadow_comparitor && !hw_compare_supported) { 1100 assert(c->key.tex.compare_funcs[sampler] != GL_NONE); 1101 /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */ 1102 if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS) 1103 return swizzle_result(ir, fs_reg(1.0f), sampler); 1104 else if (c->key.tex.compare_funcs[sampler] == GL_NEVER) 1105 return swizzle_result(ir, fs_reg(0.0f), sampler); 1106 } 1107 1108 if (ir->coordinate) 1109 ir->coordinate->accept(this); 1110 fs_reg coordinate = this->result; 1111 1112 if (ir->offset != NULL) { 1113 uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant()); 1114 1115 /* Explicitly set up the message header by copying g0 to msg reg m1. */ 1116 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD), 1117 fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD))); 1118 1119 /* Then set the offset bits in DWord 2 of the message header. */ 1120 emit(BRW_OPCODE_MOV, 1121 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2), 1122 BRW_REGISTER_TYPE_UD)), 1123 fs_reg(brw_imm_uw(offset_bits))); 1124 } 1125 1126 /* Should be lowered by do_lower_texture_projection */ 1127 assert(!ir->projector); 1128 1129 bool needs_gl_clamp = true; 1130 1131 fs_reg scale_x, scale_y; 1132 1133 /* The 965 requires the EU to do the normalization of GL rectangle 1134 * texture coordinates. We use the program parameter state 1135 * tracking to get the scaling factor. 1136 */ 1137 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT && 1138 (intel->gen < 6 || 1139 (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || 1140 c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { 1141 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; 1142 int tokens[STATE_LENGTH] = { 1143 STATE_INTERNAL, 1144 STATE_TEXRECT_SCALE, 1145 sampler, 1146 0, 1147 0 1148 }; 1149 1150 if (c->dispatch_width == 16) { 1151 fail("rectangle scale uniform setup not supported on 16-wide\n"); 1152 this->result = fs_reg(this, ir->type); 1153 return; 1154 } 1155 1156 c->prog_data.param_convert[c->prog_data.nr_params] = 1157 PARAM_NO_CONVERT; 1158 c->prog_data.param_convert[c->prog_data.nr_params + 1] = 1159 PARAM_NO_CONVERT; 1160 1161 scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); 1162 scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); 1163 1164 GLuint index = _mesa_add_state_reference(params, 1165 (gl_state_index *)tokens); 1166 1167 this->param_index[c->prog_data.nr_params] = index; 1168 this->param_offset[c->prog_data.nr_params] = 0; 1169 c->prog_data.nr_params++; 1170 this->param_index[c->prog_data.nr_params] = index; 1171 this->param_offset[c->prog_data.nr_params] = 1; 1172 c->prog_data.nr_params++; 1173 } 1174 1175 /* The 965 requires the EU to do the normalization of GL rectangle 1176 * texture coordinates. We use the program parameter state 1177 * tracking to get the scaling factor. 1178 */ 1179 if (intel->gen < 6 && 1180 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1181 fs_reg dst = fs_reg(this, ir->coordinate->type); 1182 fs_reg src = coordinate; 1183 coordinate = dst; 1184 1185 emit(BRW_OPCODE_MUL, dst, src, scale_x); 1186 dst.reg_offset++; 1187 src.reg_offset++; 1188 emit(BRW_OPCODE_MUL, dst, src, scale_y); 1189 } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1190 /* On gen6+, the sampler handles the rectangle coordinates 1191 * natively, without needing rescaling. But that means we have 1192 * to do GL_CLAMP clamping at the [0, width], [0, height] scale, 1193 * not [0, 1] like the default case below. 1194 */ 1195 needs_gl_clamp = false; 1196 1197 for (int i = 0; i < 2; i++) { 1198 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1199 fs_reg chan = coordinate; 1200 chan.reg_offset += i; 1201 1202 inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0)); 1203 inst->conditional_mod = BRW_CONDITIONAL_G; 1204 1205 /* Our parameter comes in as 1.0/width or 1.0/height, 1206 * because that's what people normally want for doing 1207 * texture rectangle handling. We need width or height 1208 * for clamping, but we don't care enough to make a new 1209 * parameter type, so just invert back. 1210 */ 1211 fs_reg limit = fs_reg(this, glsl_type::float_type); 1212 emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y); 1213 emit(SHADER_OPCODE_RCP, limit, limit); 1214 1215 inst = emit(BRW_OPCODE_SEL, chan, chan, limit); 1216 inst->conditional_mod = BRW_CONDITIONAL_L; 1217 } 1218 } 1219 } 1220 1221 if (ir->coordinate && needs_gl_clamp) { 1222 for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) { 1223 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1224 fs_reg chan = coordinate; 1225 chan.reg_offset += i; 1226 1227 fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan); 1228 inst->saturate = true; 1229 } 1230 } 1231 } 1232 1233 /* Writemasking doesn't eliminate channels on SIMD8 texture 1234 * samples, so don't worry about them. 1235 */ 1236 fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); 1237 1238 if (intel->gen >= 7) { 1239 inst = emit_texture_gen7(ir, dst, coordinate, sampler); 1240 } else if (intel->gen >= 5) { 1241 inst = emit_texture_gen5(ir, dst, coordinate, sampler); 1242 } else { 1243 inst = emit_texture_gen4(ir, dst, coordinate, sampler); 1244 } 1245 1246 /* If there's an offset, we already set up m1. To avoid the implied move, 1247 * use the null register. Otherwise, we want an implied move from g0. 1248 */ 1249 if (ir->offset != NULL || !inst->header_present) 1250 inst->src[0] = reg_undef; 1251 else 1252 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); 1253 1254 inst->sampler = sampler; 1255 1256 if (ir->shadow_comparitor) { 1257 if (hw_compare_supported) { 1258 inst->shadow_compare = true; 1259 } else { 1260 ir->shadow_comparitor->accept(this); 1261 fs_reg ref = this->result; 1262 1263 fs_reg value = dst; 1264 dst = fs_reg(this, glsl_type::vec4_type); 1265 1266 /* FINISHME: This needs to be done pre-filtering. */ 1267 1268 uint32_t conditional = 0; 1269 switch (c->key.tex.compare_funcs[sampler]) { 1270 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */ 1271 case GL_LESS: conditional = BRW_CONDITIONAL_L; break; 1272 case GL_GREATER: conditional = BRW_CONDITIONAL_G; break; 1273 case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break; 1274 case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break; 1275 case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break; 1276 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break; 1277 default: assert(!"Should not get here: bad shadow compare function"); 1278 } 1279 1280 /* Use conditional moves to load 0 or 1 as the result */ 1281 this->current_annotation = "manual shadow comparison"; 1282 for (int i = 0; i < 4; i++) { 1283 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f)); 1284 1285 inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value); 1286 inst->conditional_mod = conditional; 1287 1288 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f)); 1289 inst->predicated = true; 1290 1291 dst.reg_offset++; 1292 value.reg_offset++; 1293 } 1294 dst.reg_offset = 0; 1295 } 1296 } 1297 1298 swizzle_result(ir, dst, sampler); 1299} 1300 1301/** 1302 * Swizzle the result of a texture result. This is necessary for 1303 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons. 1304 */ 1305void 1306fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler) 1307{ 1308 this->result = orig_val; 1309 1310 if (ir->op == ir_txs) 1311 return; 1312 1313 if (ir->type == glsl_type::float_type) { 1314 /* Ignore DEPTH_TEXTURE_MODE swizzling. */ 1315 assert(ir->sampler->type->sampler_shadow); 1316 } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) { 1317 fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type); 1318 1319 for (int i = 0; i < 4; i++) { 1320 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i); 1321 fs_reg l = swizzled_result; 1322 l.reg_offset += i; 1323 1324 if (swiz == SWIZZLE_ZERO) { 1325 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f)); 1326 } else if (swiz == SWIZZLE_ONE) { 1327 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f)); 1328 } else { 1329 fs_reg r = orig_val; 1330 r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i); 1331 emit(BRW_OPCODE_MOV, l, r); 1332 } 1333 } 1334 this->result = swizzled_result; 1335 } 1336} 1337 1338void 1339fs_visitor::visit(ir_swizzle *ir) 1340{ 1341 ir->val->accept(this); 1342 fs_reg val = this->result; 1343 1344 if (ir->type->vector_elements == 1) { 1345 this->result.reg_offset += ir->mask.x; 1346 return; 1347 } 1348 1349 fs_reg result = fs_reg(this, ir->type); 1350 this->result = result; 1351 1352 for (unsigned int i = 0; i < ir->type->vector_elements; i++) { 1353 fs_reg channel = val; 1354 int swiz = 0; 1355 1356 switch (i) { 1357 case 0: 1358 swiz = ir->mask.x; 1359 break; 1360 case 1: 1361 swiz = ir->mask.y; 1362 break; 1363 case 2: 1364 swiz = ir->mask.z; 1365 break; 1366 case 3: 1367 swiz = ir->mask.w; 1368 break; 1369 } 1370 1371 channel.reg_offset += swiz; 1372 emit(BRW_OPCODE_MOV, result, channel); 1373 result.reg_offset++; 1374 } 1375} 1376 1377void 1378fs_visitor::visit(ir_discard *ir) 1379{ 1380 assert(ir->condition == NULL); /* FINISHME */ 1381 1382 emit(FS_OPCODE_DISCARD); 1383 kill_emitted = true; 1384} 1385 1386void 1387fs_visitor::visit(ir_constant *ir) 1388{ 1389 /* Set this->result to reg at the bottom of the function because some code 1390 * paths will cause this visitor to be applied to other fields. This will 1391 * cause the value stored in this->result to be modified. 1392 * 1393 * Make reg constant so that it doesn't get accidentally modified along the 1394 * way. Yes, I actually had this problem. :( 1395 */ 1396 const fs_reg reg(this, ir->type); 1397 fs_reg dst_reg = reg; 1398 1399 if (ir->type->is_array()) { 1400 const unsigned size = type_size(ir->type->fields.array); 1401 1402 for (unsigned i = 0; i < ir->type->length; i++) { 1403 ir->array_elements[i]->accept(this); 1404 fs_reg src_reg = this->result; 1405 1406 dst_reg.type = src_reg.type; 1407 for (unsigned j = 0; j < size; j++) { 1408 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1409 src_reg.reg_offset++; 1410 dst_reg.reg_offset++; 1411 } 1412 } 1413 } else if (ir->type->is_record()) { 1414 foreach_list(node, &ir->components) { 1415 ir_instruction *const field = (ir_instruction *) node; 1416 const unsigned size = type_size(field->type); 1417 1418 field->accept(this); 1419 fs_reg src_reg = this->result; 1420 1421 dst_reg.type = src_reg.type; 1422 for (unsigned j = 0; j < size; j++) { 1423 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1424 src_reg.reg_offset++; 1425 dst_reg.reg_offset++; 1426 } 1427 } 1428 } else { 1429 const unsigned size = type_size(ir->type); 1430 1431 for (unsigned i = 0; i < size; i++) { 1432 switch (ir->type->base_type) { 1433 case GLSL_TYPE_FLOAT: 1434 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])); 1435 break; 1436 case GLSL_TYPE_UINT: 1437 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])); 1438 break; 1439 case GLSL_TYPE_INT: 1440 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])); 1441 break; 1442 case GLSL_TYPE_BOOL: 1443 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])); 1444 break; 1445 default: 1446 assert(!"Non-float/uint/int/bool constant"); 1447 } 1448 dst_reg.reg_offset++; 1449 } 1450 } 1451 1452 this->result = reg; 1453} 1454 1455void 1456fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) 1457{ 1458 ir_expression *expr = ir->as_expression(); 1459 1460 if (expr) { 1461 fs_reg op[2]; 1462 fs_inst *inst; 1463 1464 assert(expr->get_num_operands() <= 2); 1465 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1466 assert(expr->operands[i]->type->is_scalar()); 1467 1468 expr->operands[i]->accept(this); 1469 op[i] = this->result; 1470 1471 resolve_ud_negate(&op[i]); 1472 } 1473 1474 switch (expr->operation) { 1475 case ir_unop_logic_not: 1476 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)); 1477 inst->conditional_mod = BRW_CONDITIONAL_Z; 1478 break; 1479 1480 case ir_binop_logic_xor: 1481 inst = emit(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]); 1482 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1483 break; 1484 1485 case ir_binop_logic_or: 1486 inst = emit(BRW_OPCODE_OR, reg_null_d, op[0], op[1]); 1487 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1488 break; 1489 1490 case ir_binop_logic_and: 1491 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], op[1]); 1492 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1493 break; 1494 1495 case ir_unop_f2b: 1496 if (intel->gen >= 6) { 1497 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f)); 1498 } else { 1499 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]); 1500 } 1501 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1502 break; 1503 1504 case ir_unop_i2b: 1505 if (intel->gen >= 6) { 1506 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)); 1507 } else { 1508 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]); 1509 } 1510 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1511 break; 1512 1513 case ir_binop_greater: 1514 case ir_binop_gequal: 1515 case ir_binop_less: 1516 case ir_binop_lequal: 1517 case ir_binop_equal: 1518 case ir_binop_all_equal: 1519 case ir_binop_nequal: 1520 case ir_binop_any_nequal: 1521 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]); 1522 inst->conditional_mod = 1523 brw_conditional_for_comparison(expr->operation); 1524 break; 1525 1526 default: 1527 assert(!"not reached"); 1528 fail("bad cond code\n"); 1529 break; 1530 } 1531 return; 1532 } 1533 1534 ir->accept(this); 1535 1536 if (intel->gen >= 6) { 1537 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1)); 1538 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1539 } else { 1540 fs_inst *inst = emit(BRW_OPCODE_MOV, reg_null_d, this->result); 1541 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1542 } 1543} 1544 1545/** 1546 * Emit a gen6 IF statement with the comparison folded into the IF 1547 * instruction. 1548 */ 1549void 1550fs_visitor::emit_if_gen6(ir_if *ir) 1551{ 1552 ir_expression *expr = ir->condition->as_expression(); 1553 1554 if (expr) { 1555 fs_reg op[2]; 1556 fs_inst *inst; 1557 fs_reg temp; 1558 1559 assert(expr->get_num_operands() <= 2); 1560 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1561 assert(expr->operands[i]->type->is_scalar()); 1562 1563 expr->operands[i]->accept(this); 1564 op[i] = this->result; 1565 } 1566 1567 switch (expr->operation) { 1568 case ir_unop_logic_not: 1569 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0)); 1570 inst->conditional_mod = BRW_CONDITIONAL_Z; 1571 return; 1572 1573 case ir_binop_logic_xor: 1574 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1575 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1576 return; 1577 1578 case ir_binop_logic_or: 1579 temp = fs_reg(this, glsl_type::bool_type); 1580 emit(BRW_OPCODE_OR, temp, op[0], op[1]); 1581 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1582 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1583 return; 1584 1585 case ir_binop_logic_and: 1586 temp = fs_reg(this, glsl_type::bool_type); 1587 emit(BRW_OPCODE_AND, temp, op[0], op[1]); 1588 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1589 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1590 return; 1591 1592 case ir_unop_f2b: 1593 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)); 1594 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1595 return; 1596 1597 case ir_unop_i2b: 1598 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1599 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1600 return; 1601 1602 case ir_binop_greater: 1603 case ir_binop_gequal: 1604 case ir_binop_less: 1605 case ir_binop_lequal: 1606 case ir_binop_equal: 1607 case ir_binop_all_equal: 1608 case ir_binop_nequal: 1609 case ir_binop_any_nequal: 1610 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1611 inst->conditional_mod = 1612 brw_conditional_for_comparison(expr->operation); 1613 return; 1614 default: 1615 assert(!"not reached"); 1616 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1617 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1618 fail("bad condition\n"); 1619 return; 1620 } 1621 return; 1622 } 1623 1624 ir->condition->accept(this); 1625 1626 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)); 1627 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1628} 1629 1630void 1631fs_visitor::visit(ir_if *ir) 1632{ 1633 fs_inst *inst; 1634 1635 if (intel->gen < 6 && c->dispatch_width == 16) { 1636 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1637 } 1638 1639 /* Don't point the annotation at the if statement, because then it plus 1640 * the then and else blocks get printed. 1641 */ 1642 this->base_ir = ir->condition; 1643 1644 if (intel->gen == 6) { 1645 emit_if_gen6(ir); 1646 } else { 1647 emit_bool_to_cond_code(ir->condition); 1648 1649 inst = emit(BRW_OPCODE_IF); 1650 inst->predicated = true; 1651 } 1652 1653 foreach_list(node, &ir->then_instructions) { 1654 ir_instruction *ir = (ir_instruction *)node; 1655 this->base_ir = ir; 1656 1657 ir->accept(this); 1658 } 1659 1660 if (!ir->else_instructions.is_empty()) { 1661 emit(BRW_OPCODE_ELSE); 1662 1663 foreach_list(node, &ir->else_instructions) { 1664 ir_instruction *ir = (ir_instruction *)node; 1665 this->base_ir = ir; 1666 1667 ir->accept(this); 1668 } 1669 } 1670 1671 emit(BRW_OPCODE_ENDIF); 1672} 1673 1674void 1675fs_visitor::visit(ir_loop *ir) 1676{ 1677 fs_reg counter = reg_undef; 1678 1679 if (c->dispatch_width == 16) { 1680 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1681 } 1682 1683 if (ir->counter) { 1684 this->base_ir = ir->counter; 1685 ir->counter->accept(this); 1686 counter = *(variable_storage(ir->counter)); 1687 1688 if (ir->from) { 1689 this->base_ir = ir->from; 1690 ir->from->accept(this); 1691 1692 emit(BRW_OPCODE_MOV, counter, this->result); 1693 } 1694 } 1695 1696 emit(BRW_OPCODE_DO); 1697 1698 if (ir->to) { 1699 this->base_ir = ir->to; 1700 ir->to->accept(this); 1701 1702 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result); 1703 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp); 1704 1705 inst = emit(BRW_OPCODE_BREAK); 1706 inst->predicated = true; 1707 } 1708 1709 foreach_list(node, &ir->body_instructions) { 1710 ir_instruction *ir = (ir_instruction *)node; 1711 1712 this->base_ir = ir; 1713 ir->accept(this); 1714 } 1715 1716 if (ir->increment) { 1717 this->base_ir = ir->increment; 1718 ir->increment->accept(this); 1719 emit(BRW_OPCODE_ADD, counter, counter, this->result); 1720 } 1721 1722 emit(BRW_OPCODE_WHILE); 1723} 1724 1725void 1726fs_visitor::visit(ir_loop_jump *ir) 1727{ 1728 switch (ir->mode) { 1729 case ir_loop_jump::jump_break: 1730 emit(BRW_OPCODE_BREAK); 1731 break; 1732 case ir_loop_jump::jump_continue: 1733 emit(BRW_OPCODE_CONTINUE); 1734 break; 1735 } 1736} 1737 1738void 1739fs_visitor::visit(ir_call *ir) 1740{ 1741 assert(!"FINISHME"); 1742} 1743 1744void 1745fs_visitor::visit(ir_return *ir) 1746{ 1747 assert(!"FINISHME"); 1748} 1749 1750void 1751fs_visitor::visit(ir_function *ir) 1752{ 1753 /* Ignore function bodies other than main() -- we shouldn't see calls to 1754 * them since they should all be inlined before we get to ir_to_mesa. 1755 */ 1756 if (strcmp(ir->name, "main") == 0) { 1757 const ir_function_signature *sig; 1758 exec_list empty; 1759 1760 sig = ir->matching_signature(&empty); 1761 1762 assert(sig); 1763 1764 foreach_list(node, &sig->body) { 1765 ir_instruction *ir = (ir_instruction *)node; 1766 this->base_ir = ir; 1767 1768 ir->accept(this); 1769 } 1770 } 1771} 1772 1773void 1774fs_visitor::visit(ir_function_signature *ir) 1775{ 1776 assert(!"not reached"); 1777 (void)ir; 1778} 1779 1780fs_inst * 1781fs_visitor::emit(fs_inst inst) 1782{ 1783 fs_inst *list_inst = new(mem_ctx) fs_inst; 1784 *list_inst = inst; 1785 1786 if (force_uncompressed_stack > 0) 1787 list_inst->force_uncompressed = true; 1788 else if (force_sechalf_stack > 0) 1789 list_inst->force_sechalf = true; 1790 1791 list_inst->annotation = this->current_annotation; 1792 list_inst->ir = this->base_ir; 1793 1794 this->instructions.push_tail(list_inst); 1795 1796 return list_inst; 1797} 1798 1799/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */ 1800void 1801fs_visitor::emit_dummy_fs() 1802{ 1803 /* Everyone's favorite color. */ 1804 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2), fs_reg(1.0f)); 1805 emit(BRW_OPCODE_MOV, fs_reg(MRF, 3), fs_reg(0.0f)); 1806 emit(BRW_OPCODE_MOV, fs_reg(MRF, 4), fs_reg(1.0f)); 1807 emit(BRW_OPCODE_MOV, fs_reg(MRF, 5), fs_reg(0.0f)); 1808 1809 fs_inst *write; 1810 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0)); 1811 write->base_mrf = 2; 1812} 1813 1814/* The register location here is relative to the start of the URB 1815 * data. It will get adjusted to be a real location before 1816 * generate_code() time. 1817 */ 1818struct brw_reg 1819fs_visitor::interp_reg(int location, int channel) 1820{ 1821 int regnr = urb_setup[location] * 2 + channel / 2; 1822 int stride = (channel & 1) * 4; 1823 1824 assert(urb_setup[location] != -1); 1825 1826 return brw_vec1_grf(regnr, stride); 1827} 1828 1829/** Emits the interpolation for the varying inputs. */ 1830void 1831fs_visitor::emit_interpolation_setup_gen4() 1832{ 1833 this->current_annotation = "compute pixel centers"; 1834 this->pixel_x = fs_reg(this, glsl_type::uint_type); 1835 this->pixel_y = fs_reg(this, glsl_type::uint_type); 1836 this->pixel_x.type = BRW_REGISTER_TYPE_UW; 1837 this->pixel_y.type = BRW_REGISTER_TYPE_UW; 1838 1839 emit(FS_OPCODE_PIXEL_X, this->pixel_x); 1840 emit(FS_OPCODE_PIXEL_Y, this->pixel_y); 1841 1842 this->current_annotation = "compute pixel deltas from v0"; 1843 if (brw->has_pln) { 1844 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1845 fs_reg(this, glsl_type::vec2_type); 1846 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1847 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC]; 1848 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++; 1849 } else { 1850 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1851 fs_reg(this, glsl_type::float_type); 1852 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1853 fs_reg(this, glsl_type::float_type); 1854 } 1855 emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1856 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))); 1857 emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1858 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))); 1859 1860 this->current_annotation = "compute pos.w and 1/pos.w"; 1861 /* Compute wpos.w. It's always in our setup, since it's needed to 1862 * interpolate the other attributes. 1863 */ 1864 this->wpos_w = fs_reg(this, glsl_type::float_type); 1865 emit(FS_OPCODE_LINTERP, wpos_w, 1866 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1867 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1868 interp_reg(FRAG_ATTRIB_WPOS, 3)); 1869 /* Compute the pixel 1/W value from wpos.w. */ 1870 this->pixel_w = fs_reg(this, glsl_type::float_type); 1871 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w); 1872 this->current_annotation = NULL; 1873} 1874 1875/** Emits the interpolation for the varying inputs. */ 1876void 1877fs_visitor::emit_interpolation_setup_gen6() 1878{ 1879 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); 1880 1881 /* If the pixel centers end up used, the setup is the same as for gen4. */ 1882 this->current_annotation = "compute pixel centers"; 1883 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); 1884 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); 1885 int_pixel_x.type = BRW_REGISTER_TYPE_UW; 1886 int_pixel_y.type = BRW_REGISTER_TYPE_UW; 1887 emit(BRW_OPCODE_ADD, 1888 int_pixel_x, 1889 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), 1890 fs_reg(brw_imm_v(0x10101010))); 1891 emit(BRW_OPCODE_ADD, 1892 int_pixel_y, 1893 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), 1894 fs_reg(brw_imm_v(0x11001100))); 1895 1896 /* As of gen6, we can no longer mix float and int sources. We have 1897 * to turn the integer pixel centers into floats for their actual 1898 * use. 1899 */ 1900 this->pixel_x = fs_reg(this, glsl_type::float_type); 1901 this->pixel_y = fs_reg(this, glsl_type::float_type); 1902 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x); 1903 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y); 1904 1905 this->current_annotation = "compute pos.w"; 1906 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0)); 1907 this->wpos_w = fs_reg(this, glsl_type::float_type); 1908 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w); 1909 1910 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) { 1911 uint8_t reg = c->barycentric_coord_reg[i]; 1912 this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0)); 1913 this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0)); 1914 } 1915 1916 this->current_annotation = NULL; 1917} 1918 1919void 1920fs_visitor::emit_color_write(int target, int index, int first_color_mrf) 1921{ 1922 int reg_width = c->dispatch_width / 8; 1923 fs_inst *inst; 1924 fs_reg color = outputs[target]; 1925 fs_reg mrf; 1926 1927 /* If there's no color data to be written, skip it. */ 1928 if (color.file == BAD_FILE) 1929 return; 1930 1931 color.reg_offset += index; 1932 1933 if (c->dispatch_width == 8 || intel->gen >= 6) { 1934 /* SIMD8 write looks like: 1935 * m + 0: r0 1936 * m + 1: r1 1937 * m + 2: g0 1938 * m + 3: g1 1939 * 1940 * gen6 SIMD16 DP write looks like: 1941 * m + 0: r0 1942 * m + 1: r1 1943 * m + 2: g0 1944 * m + 3: g1 1945 * m + 4: b0 1946 * m + 5: b1 1947 * m + 6: a0 1948 * m + 7: a1 1949 */ 1950 inst = emit(BRW_OPCODE_MOV, 1951 fs_reg(MRF, first_color_mrf + index * reg_width, color.type), 1952 color); 1953 inst->saturate = c->key.clamp_fragment_color; 1954 } else { 1955 /* pre-gen6 SIMD16 single source DP write looks like: 1956 * m + 0: r0 1957 * m + 1: g0 1958 * m + 2: b0 1959 * m + 3: a0 1960 * m + 4: r1 1961 * m + 5: g1 1962 * m + 6: b1 1963 * m + 7: a1 1964 */ 1965 if (brw->has_compr4) { 1966 /* By setting the high bit of the MRF register number, we 1967 * indicate that we want COMPR4 mode - instead of doing the 1968 * usual destination + 1 for the second half we get 1969 * destination + 4. 1970 */ 1971 inst = emit(BRW_OPCODE_MOV, 1972 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index, 1973 color.type), 1974 color); 1975 inst->saturate = c->key.clamp_fragment_color; 1976 } else { 1977 push_force_uncompressed(); 1978 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index, 1979 color.type), 1980 color); 1981 inst->saturate = c->key.clamp_fragment_color; 1982 pop_force_uncompressed(); 1983 1984 push_force_sechalf(); 1985 color.sechalf = true; 1986 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4, 1987 color.type), 1988 color); 1989 inst->saturate = c->key.clamp_fragment_color; 1990 pop_force_sechalf(); 1991 color.sechalf = false; 1992 } 1993 } 1994} 1995 1996void 1997fs_visitor::emit_fb_writes() 1998{ 1999 this->current_annotation = "FB write header"; 2000 bool header_present = true; 2001 int base_mrf = 2; 2002 int nr = base_mrf; 2003 int reg_width = c->dispatch_width / 8; 2004 2005 if (intel->gen >= 6 && 2006 !this->kill_emitted && 2007 c->key.nr_color_regions == 1) { 2008 header_present = false; 2009 } 2010 2011 if (header_present) { 2012 /* m2, m3 header */ 2013 nr += 2; 2014 } 2015 2016 if (c->aa_dest_stencil_reg) { 2017 push_force_uncompressed(); 2018 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++), 2019 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))); 2020 pop_force_uncompressed(); 2021 } 2022 2023 /* Reserve space for color. It'll be filled in per MRT below. */ 2024 int color_mrf = nr; 2025 nr += 4 * reg_width; 2026 2027 if (c->source_depth_to_render_target) { 2028 if (intel->gen == 6 && c->dispatch_width == 16) { 2029 /* For outputting oDepth on gen6, SIMD8 writes have to be 2030 * used. This would require 8-wide moves of each half to 2031 * message regs, kind of like pre-gen5 SIMD16 FB writes. 2032 * Just bail on doing so for now. 2033 */ 2034 fail("Missing support for simd16 depth writes on gen6\n"); 2035 } 2036 2037 if (c->computes_depth) { 2038 /* Hand over gl_FragDepth. */ 2039 assert(this->frag_depth); 2040 fs_reg depth = *(variable_storage(this->frag_depth)); 2041 2042 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth); 2043 } else { 2044 /* Pass through the payload depth. */ 2045 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2046 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))); 2047 } 2048 nr += reg_width; 2049 } 2050 2051 if (c->dest_depth_reg) { 2052 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2053 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))); 2054 nr += reg_width; 2055 } 2056 2057 for (int target = 0; target < c->key.nr_color_regions; target++) { 2058 this->current_annotation = ralloc_asprintf(this->mem_ctx, 2059 "FB write target %d", 2060 target); 2061 for (int i = 0; i < 4; i++) 2062 emit_color_write(target, i, color_mrf); 2063 2064 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2065 inst->target = target; 2066 inst->base_mrf = base_mrf; 2067 inst->mlen = nr - base_mrf; 2068 if (target == c->key.nr_color_regions - 1) 2069 inst->eot = true; 2070 inst->header_present = header_present; 2071 } 2072 2073 if (c->key.nr_color_regions == 0) { 2074 if (c->key.alpha_test) { 2075 /* If the alpha test is enabled but there's no color buffer, 2076 * we still need to send alpha out the pipeline to our null 2077 * renderbuffer. 2078 */ 2079 emit_color_write(0, 3, color_mrf); 2080 } 2081 2082 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2083 inst->base_mrf = base_mrf; 2084 inst->mlen = nr - base_mrf; 2085 inst->eot = true; 2086 inst->header_present = header_present; 2087 } 2088 2089 this->current_annotation = NULL; 2090} 2091 2092void 2093fs_visitor::resolve_ud_negate(fs_reg *reg) 2094{ 2095 if (reg->type != BRW_REGISTER_TYPE_UD || 2096 !reg->negate) 2097 return; 2098 2099 fs_reg temp = fs_reg(this, glsl_type::uint_type); 2100 emit(BRW_OPCODE_MOV, temp, *reg); 2101 *reg = temp; 2102} 2103