brw_fs_visitor.cpp revision 80ecb8f15b9ad7d6edcc85bd19f1867c368b09b6
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_visitor.cpp 25 * 26 * This file supports generating the FS LIR from the GLSL IR. The LIR 27 * makes it easier to do backend-specific optimizations than doing so 28 * in the GLSL IR or in the native code. 29 */ 30extern "C" { 31 32#include <sys/types.h> 33 34#include "main/macros.h" 35#include "main/shaderobj.h" 36#include "main/uniforms.h" 37#include "program/prog_parameter.h" 38#include "program/prog_print.h" 39#include "program/prog_optimize.h" 40#include "program/register_allocate.h" 41#include "program/sampler.h" 42#include "program/hash_table.h" 43#include "brw_context.h" 44#include "brw_eu.h" 45#include "brw_wm.h" 46} 47#include "brw_shader.h" 48#include "brw_fs.h" 49#include "glsl/glsl_types.h" 50#include "glsl/ir_optimization.h" 51#include "glsl/ir_print_visitor.h" 52 53void 54fs_visitor::visit(ir_variable *ir) 55{ 56 fs_reg *reg = NULL; 57 58 if (variable_storage(ir)) 59 return; 60 61 if (ir->mode == ir_var_in) { 62 if (!strcmp(ir->name, "gl_FragCoord")) { 63 reg = emit_fragcoord_interpolation(ir); 64 } else if (!strcmp(ir->name, "gl_FrontFacing")) { 65 reg = emit_frontfacing_interpolation(ir); 66 } else { 67 reg = emit_general_interpolation(ir); 68 } 69 assert(reg); 70 hash_table_insert(this->variable_ht, reg, ir); 71 return; 72 } else if (ir->mode == ir_var_out) { 73 reg = new(this->mem_ctx) fs_reg(this, ir->type); 74 75 if (ir->location == FRAG_RESULT_COLOR) { 76 /* Writing gl_FragColor outputs to all color regions. */ 77 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) { 78 this->outputs[i] = *reg; 79 } 80 } else if (ir->location == FRAG_RESULT_DEPTH) { 81 this->frag_depth = ir; 82 } else { 83 /* gl_FragData or a user-defined FS output */ 84 assert(ir->location >= FRAG_RESULT_DATA0 && 85 ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS); 86 87 /* General color output. */ 88 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) { 89 int output = ir->location - FRAG_RESULT_DATA0 + i; 90 this->outputs[output] = *reg; 91 this->outputs[output].reg_offset += 4 * i; 92 } 93 } 94 } else if (ir->mode == ir_var_uniform) { 95 int param_index = c->prog_data.nr_params; 96 97 if (c->dispatch_width == 16) { 98 if (!variable_storage(ir)) { 99 fail("Failed to find uniform '%s' in 16-wide\n", ir->name); 100 } 101 return; 102 } 103 104 if (!strncmp(ir->name, "gl_", 3)) { 105 setup_builtin_uniform_values(ir); 106 } else { 107 setup_uniform_values(ir->location, ir->type); 108 } 109 110 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); 111 reg->type = brw_type_for_base_type(ir->type); 112 } 113 114 if (!reg) 115 reg = new(this->mem_ctx) fs_reg(this, ir->type); 116 117 hash_table_insert(this->variable_ht, reg, ir); 118} 119 120void 121fs_visitor::visit(ir_dereference_variable *ir) 122{ 123 fs_reg *reg = variable_storage(ir->var); 124 this->result = *reg; 125} 126 127void 128fs_visitor::visit(ir_dereference_record *ir) 129{ 130 const glsl_type *struct_type = ir->record->type; 131 132 ir->record->accept(this); 133 134 unsigned int offset = 0; 135 for (unsigned int i = 0; i < struct_type->length; i++) { 136 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 137 break; 138 offset += type_size(struct_type->fields.structure[i].type); 139 } 140 this->result.reg_offset += offset; 141 this->result.type = brw_type_for_base_type(ir->type); 142} 143 144void 145fs_visitor::visit(ir_dereference_array *ir) 146{ 147 ir_constant *index; 148 int element_size; 149 150 ir->array->accept(this); 151 index = ir->array_index->as_constant(); 152 153 element_size = type_size(ir->type); 154 this->result.type = brw_type_for_base_type(ir->type); 155 156 if (index) { 157 assert(this->result.file == UNIFORM || this->result.file == GRF); 158 this->result.reg_offset += index->value.i[0] * element_size; 159 } else { 160 assert(!"FINISHME: non-constant array element"); 161 } 162} 163 164/* Instruction selection: Produce a MOV.sat instead of 165 * MIN(MAX(val, 0), 1) when possible. 166 */ 167bool 168fs_visitor::try_emit_saturate(ir_expression *ir) 169{ 170 ir_rvalue *sat_val = ir->as_rvalue_to_saturate(); 171 172 if (!sat_val) 173 return false; 174 175 fs_inst *pre_inst = (fs_inst *) this->instructions.get_tail(); 176 177 sat_val->accept(this); 178 fs_reg src = this->result; 179 180 fs_inst *last_inst = (fs_inst *) this->instructions.get_tail(); 181 182 /* If the last instruction from our accept() didn't generate our 183 * src, generate a saturated MOV 184 */ 185 fs_inst *modify = get_instruction_generating_reg(pre_inst, last_inst, src); 186 if (!modify || modify->regs_written() != 1) { 187 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src); 188 inst->saturate = true; 189 } else { 190 modify->saturate = true; 191 this->result = src; 192 } 193 194 195 return true; 196} 197 198bool 199fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) 200{ 201 /* 3-src instructions were introduced in gen6. */ 202 if (intel->gen < 6) 203 return false; 204 205 /* MAD can only handle floating-point data. */ 206 if (ir->type != glsl_type::float_type) 207 return false; 208 209 ir_rvalue *nonmul = ir->operands[1 - mul_arg]; 210 ir_expression *mul = ir->operands[mul_arg]->as_expression(); 211 212 if (!mul || mul->operation != ir_binop_mul) 213 return false; 214 215 if (nonmul->as_constant() || 216 mul->operands[0]->as_constant() || 217 mul->operands[1]->as_constant()) 218 return false; 219 220 nonmul->accept(this); 221 fs_reg src0 = this->result; 222 223 mul->operands[0]->accept(this); 224 fs_reg src1 = this->result; 225 226 mul->operands[1]->accept(this); 227 fs_reg src2 = this->result; 228 229 this->result = fs_reg(this, ir->type); 230 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2); 231 232 return true; 233} 234 235void 236fs_visitor::visit(ir_expression *ir) 237{ 238 unsigned int operand; 239 fs_reg op[2], temp; 240 fs_inst *inst; 241 242 assert(ir->get_num_operands() <= 2); 243 244 if (try_emit_saturate(ir)) 245 return; 246 if (ir->operation == ir_binop_add) { 247 if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1)) 248 return; 249 } 250 251 for (operand = 0; operand < ir->get_num_operands(); operand++) { 252 ir->operands[operand]->accept(this); 253 if (this->result.file == BAD_FILE) { 254 ir_print_visitor v; 255 fail("Failed to get tree for expression operand:\n"); 256 ir->operands[operand]->accept(&v); 257 } 258 op[operand] = this->result; 259 260 /* Matrix expression operands should have been broken down to vector 261 * operations already. 262 */ 263 assert(!ir->operands[operand]->type->is_matrix()); 264 /* And then those vector operands should have been broken down to scalar. 265 */ 266 assert(!ir->operands[operand]->type->is_vector()); 267 } 268 269 /* Storage for our result. If our result goes into an assignment, it will 270 * just get copy-propagated out, so no worries. 271 */ 272 this->result = fs_reg(this, ir->type); 273 274 switch (ir->operation) { 275 case ir_unop_logic_not: 276 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 277 * ones complement of the whole register, not just bit 0. 278 */ 279 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)); 280 break; 281 case ir_unop_neg: 282 op[0].negate = !op[0].negate; 283 this->result = op[0]; 284 break; 285 case ir_unop_abs: 286 op[0].abs = true; 287 op[0].negate = false; 288 this->result = op[0]; 289 break; 290 case ir_unop_sign: 291 temp = fs_reg(this, ir->type); 292 293 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)); 294 295 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 296 inst->conditional_mod = BRW_CONDITIONAL_G; 297 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)); 298 inst->predicated = true; 299 300 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 301 inst->conditional_mod = BRW_CONDITIONAL_L; 302 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)); 303 inst->predicated = true; 304 305 break; 306 case ir_unop_rcp: 307 emit_math(SHADER_OPCODE_RCP, this->result, op[0]); 308 break; 309 310 case ir_unop_exp2: 311 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]); 312 break; 313 case ir_unop_log2: 314 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]); 315 break; 316 case ir_unop_exp: 317 case ir_unop_log: 318 assert(!"not reached: should be handled by ir_explog_to_explog2"); 319 break; 320 case ir_unop_sin: 321 case ir_unop_sin_reduced: 322 emit_math(SHADER_OPCODE_SIN, this->result, op[0]); 323 break; 324 case ir_unop_cos: 325 case ir_unop_cos_reduced: 326 emit_math(SHADER_OPCODE_COS, this->result, op[0]); 327 break; 328 329 case ir_unop_dFdx: 330 emit(FS_OPCODE_DDX, this->result, op[0]); 331 break; 332 case ir_unop_dFdy: 333 emit(FS_OPCODE_DDY, this->result, op[0]); 334 break; 335 336 case ir_binop_add: 337 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]); 338 break; 339 case ir_binop_sub: 340 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 341 break; 342 343 case ir_binop_mul: 344 if (ir->type->is_integer()) { 345 /* For integer multiplication, the MUL uses the low 16 bits 346 * of one of the operands (src0 on gen6, src1 on gen7). The 347 * MACH accumulates in the contribution of the upper 16 bits 348 * of that operand. 349 * 350 * FINISHME: Emit just the MUL if we know an operand is small 351 * enough. 352 */ 353 if (intel->gen >= 7 && c->dispatch_width == 16) 354 fail("16-wide explicit accumulator operands unsupported\n"); 355 356 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 357 358 emit(BRW_OPCODE_MUL, acc, op[0], op[1]); 359 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]); 360 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc)); 361 } else { 362 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]); 363 } 364 break; 365 case ir_binop_div: 366 if (intel->gen >= 7 && c->dispatch_width == 16) 367 fail("16-wide INTDIV unsupported\n"); 368 369 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 370 assert(ir->type->is_integer()); 371 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]); 372 break; 373 case ir_binop_mod: 374 if (intel->gen >= 7 && c->dispatch_width == 16) 375 fail("16-wide INTDIV unsupported\n"); 376 377 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 378 assert(ir->type->is_integer()); 379 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]); 380 break; 381 382 case ir_binop_less: 383 case ir_binop_greater: 384 case ir_binop_lequal: 385 case ir_binop_gequal: 386 case ir_binop_equal: 387 case ir_binop_all_equal: 388 case ir_binop_nequal: 389 case ir_binop_any_nequal: 390 temp = this->result; 391 /* original gen4 does implicit conversion before comparison. */ 392 if (intel->gen < 5) 393 temp.type = op[0].type; 394 395 resolve_ud_negate(&op[0]); 396 resolve_ud_negate(&op[1]); 397 398 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]); 399 inst->conditional_mod = brw_conditional_for_comparison(ir->operation); 400 break; 401 402 case ir_binop_logic_xor: 403 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 404 break; 405 406 case ir_binop_logic_or: 407 emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 408 break; 409 410 case ir_binop_logic_and: 411 emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 412 break; 413 414 case ir_binop_dot: 415 case ir_unop_any: 416 assert(!"not reached: should be handled by brw_fs_channel_expressions"); 417 break; 418 419 case ir_unop_noise: 420 assert(!"not reached: should be handled by lower_noise"); 421 break; 422 423 case ir_quadop_vector: 424 assert(!"not reached: should be handled by lower_quadop_vector"); 425 break; 426 427 case ir_unop_sqrt: 428 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]); 429 break; 430 431 case ir_unop_rsq: 432 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]); 433 break; 434 435 case ir_unop_i2u: 436 op[0].type = BRW_REGISTER_TYPE_UD; 437 this->result = op[0]; 438 break; 439 case ir_unop_u2i: 440 op[0].type = BRW_REGISTER_TYPE_D; 441 this->result = op[0]; 442 break; 443 case ir_unop_i2f: 444 case ir_unop_u2f: 445 case ir_unop_f2i: 446 emit(BRW_OPCODE_MOV, this->result, op[0]); 447 break; 448 449 case ir_unop_b2i: 450 inst = emit(BRW_OPCODE_AND, this->result, op[0], fs_reg(1)); 451 break; 452 case ir_unop_b2f: 453 temp = fs_reg(this, glsl_type::int_type); 454 emit(BRW_OPCODE_AND, temp, op[0], fs_reg(1)); 455 emit(BRW_OPCODE_MOV, this->result, temp); 456 break; 457 458 case ir_unop_f2b: 459 case ir_unop_i2b: 460 temp = this->result; 461 /* original gen4 does implicit conversion before comparison. */ 462 if (intel->gen < 5) 463 temp.type = op[0].type; 464 465 resolve_ud_negate(&op[0]); 466 467 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f)); 468 inst->conditional_mod = BRW_CONDITIONAL_NZ; 469 break; 470 471 case ir_unop_trunc: 472 emit(BRW_OPCODE_RNDZ, this->result, op[0]); 473 break; 474 case ir_unop_ceil: 475 op[0].negate = !op[0].negate; 476 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 477 this->result.negate = true; 478 break; 479 case ir_unop_floor: 480 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 481 break; 482 case ir_unop_fract: 483 inst = emit(BRW_OPCODE_FRC, this->result, op[0]); 484 break; 485 case ir_unop_round_even: 486 emit(BRW_OPCODE_RNDE, this->result, op[0]); 487 break; 488 489 case ir_binop_min: 490 resolve_ud_negate(&op[0]); 491 resolve_ud_negate(&op[1]); 492 493 if (intel->gen >= 6) { 494 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 495 inst->conditional_mod = BRW_CONDITIONAL_L; 496 } else { 497 /* Unalias the destination */ 498 this->result = fs_reg(this, ir->type); 499 500 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 501 inst->conditional_mod = BRW_CONDITIONAL_L; 502 503 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 504 inst->predicated = true; 505 } 506 break; 507 case ir_binop_max: 508 resolve_ud_negate(&op[0]); 509 resolve_ud_negate(&op[1]); 510 511 if (intel->gen >= 6) { 512 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 513 inst->conditional_mod = BRW_CONDITIONAL_GE; 514 } else { 515 /* Unalias the destination */ 516 this->result = fs_reg(this, ir->type); 517 518 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 519 inst->conditional_mod = BRW_CONDITIONAL_G; 520 521 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 522 inst->predicated = true; 523 } 524 break; 525 526 case ir_binop_pow: 527 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]); 528 break; 529 530 case ir_unop_bit_not: 531 inst = emit(BRW_OPCODE_NOT, this->result, op[0]); 532 break; 533 case ir_binop_bit_and: 534 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 535 break; 536 case ir_binop_bit_xor: 537 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 538 break; 539 case ir_binop_bit_or: 540 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 541 break; 542 543 case ir_binop_lshift: 544 inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]); 545 break; 546 547 case ir_binop_rshift: 548 if (ir->type->base_type == GLSL_TYPE_INT) 549 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]); 550 else 551 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]); 552 break; 553 } 554} 555 556void 557fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, 558 const glsl_type *type, bool predicated) 559{ 560 switch (type->base_type) { 561 case GLSL_TYPE_FLOAT: 562 case GLSL_TYPE_UINT: 563 case GLSL_TYPE_INT: 564 case GLSL_TYPE_BOOL: 565 for (unsigned int i = 0; i < type->components(); i++) { 566 l.type = brw_type_for_base_type(type); 567 r.type = brw_type_for_base_type(type); 568 569 if (predicated || !l.equals(&r)) { 570 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r); 571 inst->predicated = predicated; 572 } 573 574 l.reg_offset++; 575 r.reg_offset++; 576 } 577 break; 578 case GLSL_TYPE_ARRAY: 579 for (unsigned int i = 0; i < type->length; i++) { 580 emit_assignment_writes(l, r, type->fields.array, predicated); 581 } 582 break; 583 584 case GLSL_TYPE_STRUCT: 585 for (unsigned int i = 0; i < type->length; i++) { 586 emit_assignment_writes(l, r, type->fields.structure[i].type, 587 predicated); 588 } 589 break; 590 591 case GLSL_TYPE_SAMPLER: 592 break; 593 594 default: 595 assert(!"not reached"); 596 break; 597 } 598} 599 600/* If the RHS processing resulted in an instruction generating a 601 * temporary value, and it would be easy to rewrite the instruction to 602 * generate its result right into the LHS instead, do so. This ends 603 * up reliably removing instructions where it can be tricky to do so 604 * later without real UD chain information. 605 */ 606bool 607fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 608 fs_reg dst, 609 fs_reg src, 610 fs_inst *pre_rhs_inst, 611 fs_inst *last_rhs_inst) 612{ 613 /* Only attempt if we're doing a direct assignment. */ 614 if (ir->condition || 615 !(ir->lhs->type->is_scalar() || 616 (ir->lhs->type->is_vector() && 617 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1))) 618 return false; 619 620 /* Make sure the last instruction generated our source reg. */ 621 fs_inst *modify = get_instruction_generating_reg(pre_rhs_inst, 622 last_rhs_inst, 623 src); 624 if (!modify) 625 return false; 626 627 /* If last_rhs_inst wrote a different number of components than our LHS, 628 * we can't safely rewrite it. 629 */ 630 if (ir->lhs->type->vector_elements != modify->regs_written()) 631 return false; 632 633 /* Success! Rewrite the instruction. */ 634 modify->dst = dst; 635 636 return true; 637} 638 639void 640fs_visitor::visit(ir_assignment *ir) 641{ 642 fs_reg l, r; 643 fs_inst *inst; 644 645 /* FINISHME: arrays on the lhs */ 646 ir->lhs->accept(this); 647 l = this->result; 648 649 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail(); 650 651 ir->rhs->accept(this); 652 r = this->result; 653 654 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail(); 655 656 assert(l.file != BAD_FILE); 657 assert(r.file != BAD_FILE); 658 659 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst)) 660 return; 661 662 if (ir->condition) { 663 emit_bool_to_cond_code(ir->condition); 664 } 665 666 if (ir->lhs->type->is_scalar() || 667 ir->lhs->type->is_vector()) { 668 for (int i = 0; i < ir->lhs->type->vector_elements; i++) { 669 if (ir->write_mask & (1 << i)) { 670 inst = emit(BRW_OPCODE_MOV, l, r); 671 if (ir->condition) 672 inst->predicated = true; 673 r.reg_offset++; 674 } 675 l.reg_offset++; 676 } 677 } else { 678 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); 679 } 680} 681 682fs_inst * 683fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate, 684 int sampler) 685{ 686 int mlen; 687 int base_mrf = 1; 688 bool simd16 = false; 689 fs_reg orig_dst; 690 691 /* g0 header. */ 692 mlen = 1; 693 694 if (ir->shadow_comparitor && ir->op != ir_txd) { 695 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 696 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 697 coordinate.reg_offset++; 698 } 699 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 700 mlen += 3; 701 702 if (ir->op == ir_tex) { 703 /* There's no plain shadow compare message, so we use shadow 704 * compare with a bias of 0.0. 705 */ 706 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)); 707 mlen++; 708 } else if (ir->op == ir_txb) { 709 ir->lod_info.bias->accept(this); 710 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 711 mlen++; 712 } else { 713 assert(ir->op == ir_txl); 714 ir->lod_info.lod->accept(this); 715 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 716 mlen++; 717 } 718 719 ir->shadow_comparitor->accept(this); 720 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 721 mlen++; 722 } else if (ir->op == ir_tex) { 723 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 724 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 725 coordinate.reg_offset++; 726 } 727 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 728 mlen += 3; 729 } else if (ir->op == ir_txd) { 730 ir->lod_info.grad.dPdx->accept(this); 731 fs_reg dPdx = this->result; 732 733 ir->lod_info.grad.dPdy->accept(this); 734 fs_reg dPdy = this->result; 735 736 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 737 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 738 coordinate.reg_offset++; 739 } 740 /* the slots for u and v are always present, but r is optional */ 741 mlen += MAX2(ir->coordinate->type->vector_elements, 2); 742 743 /* P = u, v, r 744 * dPdx = dudx, dvdx, drdx 745 * dPdy = dudy, dvdy, drdy 746 * 747 * 1-arg: Does not exist. 748 * 749 * 2-arg: dudx dvdx dudy dvdy 750 * dPdx.x dPdx.y dPdy.x dPdy.y 751 * m4 m5 m6 m7 752 * 753 * 3-arg: dudx dvdx drdx dudy dvdy drdy 754 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z 755 * m5 m6 m7 m8 m9 m10 756 */ 757 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 758 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 759 dPdx.reg_offset++; 760 } 761 mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2); 762 763 for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) { 764 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 765 dPdy.reg_offset++; 766 } 767 mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2); 768 } else if (ir->op == ir_txs) { 769 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */ 770 simd16 = true; 771 ir->lod_info.lod->accept(this); 772 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 773 mlen += 2; 774 } else { 775 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod 776 * instructions. We'll need to do SIMD16 here. 777 */ 778 simd16 = true; 779 assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf); 780 781 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 782 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type), 783 coordinate); 784 coordinate.reg_offset++; 785 } 786 787 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to 788 * be necessary for TXF (ld), but seems wise to do for all messages. 789 */ 790 for (int i = ir->coordinate->type->vector_elements; i < 3; i++) { 791 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)); 792 } 793 794 /* lod/bias appears after u/v/r. */ 795 mlen += 6; 796 797 if (ir->op == ir_txb) { 798 ir->lod_info.bias->accept(this); 799 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 800 mlen++; 801 } else { 802 ir->lod_info.lod->accept(this); 803 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type), 804 this->result); 805 mlen++; 806 } 807 808 /* The unused upper half. */ 809 mlen++; 810 } 811 812 if (simd16) { 813 /* Now, since we're doing simd16, the return is 2 interleaved 814 * vec4s where the odd-indexed ones are junk. We'll need to move 815 * this weirdness around to the expected layout. 816 */ 817 orig_dst = dst; 818 const glsl_type *vec_type = 819 glsl_type::get_instance(ir->type->base_type, 4, 1); 820 dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2)); 821 dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type) 822 : BRW_REGISTER_TYPE_F; 823 } 824 825 fs_inst *inst = NULL; 826 switch (ir->op) { 827 case ir_tex: 828 inst = emit(SHADER_OPCODE_TEX, dst); 829 break; 830 case ir_txb: 831 inst = emit(FS_OPCODE_TXB, dst); 832 break; 833 case ir_txl: 834 inst = emit(SHADER_OPCODE_TXL, dst); 835 break; 836 case ir_txd: 837 inst = emit(SHADER_OPCODE_TXD, dst); 838 break; 839 case ir_txs: 840 inst = emit(SHADER_OPCODE_TXS, dst); 841 break; 842 case ir_txf: 843 inst = emit(SHADER_OPCODE_TXF, dst); 844 break; 845 } 846 inst->base_mrf = base_mrf; 847 inst->mlen = mlen; 848 inst->header_present = true; 849 850 if (simd16) { 851 for (int i = 0; i < 4; i++) { 852 emit(BRW_OPCODE_MOV, orig_dst, dst); 853 orig_dst.reg_offset++; 854 dst.reg_offset += 2; 855 } 856 } 857 858 return inst; 859} 860 861/* gen5's sampler has slots for u, v, r, array index, then optional 862 * parameters like shadow comparitor or LOD bias. If optional 863 * parameters aren't present, those base slots are optional and don't 864 * need to be included in the message. 865 * 866 * We don't fill in the unnecessary slots regardless, which may look 867 * surprising in the disassembly. 868 */ 869fs_inst * 870fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate, 871 int sampler) 872{ 873 int mlen = 0; 874 int base_mrf = 2; 875 int reg_width = c->dispatch_width / 8; 876 bool header_present = false; 877 const int vector_elements = 878 ir->coordinate ? ir->coordinate->type->vector_elements : 0; 879 880 if (ir->offset) { 881 /* The offsets set up by the ir_texture visitor are in the 882 * m1 header, so we can't go headerless. 883 */ 884 header_present = true; 885 mlen++; 886 base_mrf--; 887 } 888 889 for (int i = 0; i < vector_elements; i++) { 890 emit(BRW_OPCODE_MOV, 891 fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type), 892 coordinate); 893 coordinate.reg_offset++; 894 } 895 mlen += vector_elements * reg_width; 896 897 if (ir->shadow_comparitor && ir->op != ir_txd) { 898 mlen = MAX2(mlen, header_present + 4 * reg_width); 899 900 ir->shadow_comparitor->accept(this); 901 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 902 mlen += reg_width; 903 } 904 905 fs_inst *inst = NULL; 906 switch (ir->op) { 907 case ir_tex: 908 inst = emit(SHADER_OPCODE_TEX, dst); 909 break; 910 case ir_txb: 911 ir->lod_info.bias->accept(this); 912 mlen = MAX2(mlen, header_present + 4 * reg_width); 913 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 914 mlen += reg_width; 915 916 inst = emit(FS_OPCODE_TXB, dst); 917 918 break; 919 case ir_txl: 920 ir->lod_info.lod->accept(this); 921 mlen = MAX2(mlen, header_present + 4 * reg_width); 922 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 923 mlen += reg_width; 924 925 inst = emit(SHADER_OPCODE_TXL, dst); 926 break; 927 case ir_txd: { 928 ir->lod_info.grad.dPdx->accept(this); 929 fs_reg dPdx = this->result; 930 931 ir->lod_info.grad.dPdy->accept(this); 932 fs_reg dPdy = this->result; 933 934 mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */ 935 936 /** 937 * P = u, v, r 938 * dPdx = dudx, dvdx, drdx 939 * dPdy = dudy, dvdy, drdy 940 * 941 * Load up these values: 942 * - dudx dudy dvdx dvdy drdx drdy 943 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z 944 */ 945 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 946 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 947 dPdx.reg_offset++; 948 mlen += reg_width; 949 950 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 951 dPdy.reg_offset++; 952 mlen += reg_width; 953 } 954 955 inst = emit(SHADER_OPCODE_TXD, dst); 956 break; 957 } 958 case ir_txs: 959 ir->lod_info.lod->accept(this); 960 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 961 mlen += reg_width; 962 inst = emit(SHADER_OPCODE_TXS, dst); 963 break; 964 case ir_txf: 965 mlen = header_present + 4 * reg_width; 966 967 ir->lod_info.lod->accept(this); 968 emit(BRW_OPCODE_MOV, 969 fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), 970 this->result); 971 inst = emit(SHADER_OPCODE_TXF, dst); 972 break; 973 } 974 inst->base_mrf = base_mrf; 975 inst->mlen = mlen; 976 inst->header_present = header_present; 977 978 if (mlen > 11) { 979 fail("Message length >11 disallowed by hardware\n"); 980 } 981 982 return inst; 983} 984 985fs_inst * 986fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate, 987 int sampler) 988{ 989 int mlen = 0; 990 int base_mrf = 2; 991 int reg_width = c->dispatch_width / 8; 992 bool header_present = false; 993 994 if (ir->offset) { 995 /* The offsets set up by the ir_texture visitor are in the 996 * m1 header, so we can't go headerless. 997 */ 998 header_present = true; 999 mlen++; 1000 base_mrf--; 1001 } 1002 1003 if (ir->shadow_comparitor && ir->op != ir_txd) { 1004 ir->shadow_comparitor->accept(this); 1005 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1006 mlen += reg_width; 1007 } 1008 1009 /* Set up the LOD info */ 1010 switch (ir->op) { 1011 case ir_tex: 1012 break; 1013 case ir_txb: 1014 ir->lod_info.bias->accept(this); 1015 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1016 mlen += reg_width; 1017 break; 1018 case ir_txl: 1019 ir->lod_info.lod->accept(this); 1020 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 1021 mlen += reg_width; 1022 break; 1023 case ir_txd: { 1024 if (c->dispatch_width == 16) 1025 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode."); 1026 1027 ir->lod_info.grad.dPdx->accept(this); 1028 fs_reg dPdx = this->result; 1029 1030 ir->lod_info.grad.dPdy->accept(this); 1031 fs_reg dPdy = this->result; 1032 1033 /* Load dPdx and the coordinate together: 1034 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z 1035 */ 1036 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1037 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1038 coordinate.reg_offset++; 1039 mlen += reg_width; 1040 1041 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 1042 dPdx.reg_offset++; 1043 mlen += reg_width; 1044 1045 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 1046 dPdy.reg_offset++; 1047 mlen += reg_width; 1048 } 1049 break; 1050 } 1051 case ir_txs: 1052 ir->lod_info.lod->accept(this); 1053 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 1054 mlen += reg_width; 1055 break; 1056 case ir_txf: 1057 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */ 1058 emit(BRW_OPCODE_MOV, 1059 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1060 coordinate.reg_offset++; 1061 mlen += reg_width; 1062 1063 ir->lod_info.lod->accept(this); 1064 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result); 1065 mlen += reg_width; 1066 1067 for (int i = 1; i < ir->coordinate->type->vector_elements; i++) { 1068 emit(BRW_OPCODE_MOV, 1069 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1070 coordinate.reg_offset++; 1071 mlen += reg_width; 1072 } 1073 break; 1074 } 1075 1076 /* Set up the coordinate (except for cases where it was done above) */ 1077 if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) { 1078 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1079 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1080 coordinate.reg_offset++; 1081 mlen += reg_width; 1082 } 1083 } 1084 1085 /* Generate the SEND */ 1086 fs_inst *inst = NULL; 1087 switch (ir->op) { 1088 case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break; 1089 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break; 1090 case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break; 1091 case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break; 1092 case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break; 1093 case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break; 1094 } 1095 inst->base_mrf = base_mrf; 1096 inst->mlen = mlen; 1097 inst->header_present = header_present; 1098 1099 if (mlen > 11) { 1100 fail("Message length >11 disallowed by hardware\n"); 1101 } 1102 1103 return inst; 1104} 1105 1106void 1107fs_visitor::visit(ir_texture *ir) 1108{ 1109 fs_inst *inst = NULL; 1110 1111 int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base); 1112 sampler = fp->Base.SamplerUnits[sampler]; 1113 1114 /* Our hardware doesn't have a sample_d_c message, so shadow compares 1115 * for textureGrad/TXD need to be emulated with instructions. 1116 */ 1117 bool hw_compare_supported = ir->op != ir_txd; 1118 if (ir->shadow_comparitor && !hw_compare_supported) { 1119 assert(c->key.tex.compare_funcs[sampler] != GL_NONE); 1120 /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */ 1121 if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS) 1122 return swizzle_result(ir, fs_reg(1.0f), sampler); 1123 else if (c->key.tex.compare_funcs[sampler] == GL_NEVER) 1124 return swizzle_result(ir, fs_reg(0.0f), sampler); 1125 } 1126 1127 if (ir->coordinate) 1128 ir->coordinate->accept(this); 1129 fs_reg coordinate = this->result; 1130 1131 if (ir->offset != NULL) { 1132 uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant()); 1133 1134 /* Explicitly set up the message header by copying g0 to msg reg m1. */ 1135 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD), 1136 fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD))); 1137 1138 /* Then set the offset bits in DWord 2 of the message header. */ 1139 emit(BRW_OPCODE_MOV, 1140 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2), 1141 BRW_REGISTER_TYPE_UD)), 1142 fs_reg(brw_imm_uw(offset_bits))); 1143 } 1144 1145 /* Should be lowered by do_lower_texture_projection */ 1146 assert(!ir->projector); 1147 1148 bool needs_gl_clamp = true; 1149 1150 fs_reg scale_x, scale_y; 1151 1152 /* The 965 requires the EU to do the normalization of GL rectangle 1153 * texture coordinates. We use the program parameter state 1154 * tracking to get the scaling factor. 1155 */ 1156 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT && 1157 (intel->gen < 6 || 1158 (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || 1159 c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { 1160 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; 1161 int tokens[STATE_LENGTH] = { 1162 STATE_INTERNAL, 1163 STATE_TEXRECT_SCALE, 1164 sampler, 1165 0, 1166 0 1167 }; 1168 1169 if (c->dispatch_width == 16) { 1170 fail("rectangle scale uniform setup not supported on 16-wide\n"); 1171 this->result = fs_reg(this, ir->type); 1172 return; 1173 } 1174 1175 c->prog_data.param_convert[c->prog_data.nr_params] = 1176 PARAM_NO_CONVERT; 1177 c->prog_data.param_convert[c->prog_data.nr_params + 1] = 1178 PARAM_NO_CONVERT; 1179 1180 scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); 1181 scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); 1182 1183 GLuint index = _mesa_add_state_reference(params, 1184 (gl_state_index *)tokens); 1185 1186 this->param_index[c->prog_data.nr_params] = index; 1187 this->param_offset[c->prog_data.nr_params] = 0; 1188 c->prog_data.nr_params++; 1189 this->param_index[c->prog_data.nr_params] = index; 1190 this->param_offset[c->prog_data.nr_params] = 1; 1191 c->prog_data.nr_params++; 1192 } 1193 1194 /* The 965 requires the EU to do the normalization of GL rectangle 1195 * texture coordinates. We use the program parameter state 1196 * tracking to get the scaling factor. 1197 */ 1198 if (intel->gen < 6 && 1199 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1200 fs_reg dst = fs_reg(this, ir->coordinate->type); 1201 fs_reg src = coordinate; 1202 coordinate = dst; 1203 1204 emit(BRW_OPCODE_MUL, dst, src, scale_x); 1205 dst.reg_offset++; 1206 src.reg_offset++; 1207 emit(BRW_OPCODE_MUL, dst, src, scale_y); 1208 } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1209 /* On gen6+, the sampler handles the rectangle coordinates 1210 * natively, without needing rescaling. But that means we have 1211 * to do GL_CLAMP clamping at the [0, width], [0, height] scale, 1212 * not [0, 1] like the default case below. 1213 */ 1214 needs_gl_clamp = false; 1215 1216 for (int i = 0; i < 2; i++) { 1217 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1218 fs_reg chan = coordinate; 1219 chan.reg_offset += i; 1220 1221 inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0)); 1222 inst->conditional_mod = BRW_CONDITIONAL_G; 1223 1224 /* Our parameter comes in as 1.0/width or 1.0/height, 1225 * because that's what people normally want for doing 1226 * texture rectangle handling. We need width or height 1227 * for clamping, but we don't care enough to make a new 1228 * parameter type, so just invert back. 1229 */ 1230 fs_reg limit = fs_reg(this, glsl_type::float_type); 1231 emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y); 1232 emit(SHADER_OPCODE_RCP, limit, limit); 1233 1234 inst = emit(BRW_OPCODE_SEL, chan, chan, limit); 1235 inst->conditional_mod = BRW_CONDITIONAL_L; 1236 } 1237 } 1238 } 1239 1240 if (ir->coordinate && needs_gl_clamp) { 1241 for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) { 1242 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1243 fs_reg chan = coordinate; 1244 chan.reg_offset += i; 1245 1246 fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan); 1247 inst->saturate = true; 1248 } 1249 } 1250 } 1251 1252 /* Writemasking doesn't eliminate channels on SIMD8 texture 1253 * samples, so don't worry about them. 1254 */ 1255 fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); 1256 1257 if (intel->gen >= 7) { 1258 inst = emit_texture_gen7(ir, dst, coordinate, sampler); 1259 } else if (intel->gen >= 5) { 1260 inst = emit_texture_gen5(ir, dst, coordinate, sampler); 1261 } else { 1262 inst = emit_texture_gen4(ir, dst, coordinate, sampler); 1263 } 1264 1265 /* If there's an offset, we already set up m1. To avoid the implied move, 1266 * use the null register. Otherwise, we want an implied move from g0. 1267 */ 1268 if (ir->offset != NULL || !inst->header_present) 1269 inst->src[0] = reg_undef; 1270 else 1271 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); 1272 1273 inst->sampler = sampler; 1274 1275 if (ir->shadow_comparitor) { 1276 if (hw_compare_supported) { 1277 inst->shadow_compare = true; 1278 } else { 1279 ir->shadow_comparitor->accept(this); 1280 fs_reg ref = this->result; 1281 1282 fs_reg value = dst; 1283 dst = fs_reg(this, glsl_type::vec4_type); 1284 1285 /* FINISHME: This needs to be done pre-filtering. */ 1286 1287 uint32_t conditional = 0; 1288 switch (c->key.tex.compare_funcs[sampler]) { 1289 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */ 1290 case GL_LESS: conditional = BRW_CONDITIONAL_L; break; 1291 case GL_GREATER: conditional = BRW_CONDITIONAL_G; break; 1292 case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break; 1293 case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break; 1294 case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break; 1295 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break; 1296 default: assert(!"Should not get here: bad shadow compare function"); 1297 } 1298 1299 /* Use conditional moves to load 0 or 1 as the result */ 1300 this->current_annotation = "manual shadow comparison"; 1301 for (int i = 0; i < 4; i++) { 1302 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f)); 1303 1304 inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value); 1305 inst->conditional_mod = conditional; 1306 1307 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f)); 1308 inst->predicated = true; 1309 1310 dst.reg_offset++; 1311 value.reg_offset++; 1312 } 1313 dst.reg_offset = 0; 1314 } 1315 } 1316 1317 swizzle_result(ir, dst, sampler); 1318} 1319 1320/** 1321 * Swizzle the result of a texture result. This is necessary for 1322 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons. 1323 */ 1324void 1325fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler) 1326{ 1327 this->result = orig_val; 1328 1329 if (ir->op == ir_txs) 1330 return; 1331 1332 if (ir->type == glsl_type::float_type) { 1333 /* Ignore DEPTH_TEXTURE_MODE swizzling. */ 1334 assert(ir->sampler->type->sampler_shadow); 1335 } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) { 1336 fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type); 1337 1338 for (int i = 0; i < 4; i++) { 1339 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i); 1340 fs_reg l = swizzled_result; 1341 l.reg_offset += i; 1342 1343 if (swiz == SWIZZLE_ZERO) { 1344 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f)); 1345 } else if (swiz == SWIZZLE_ONE) { 1346 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f)); 1347 } else { 1348 fs_reg r = orig_val; 1349 r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i); 1350 emit(BRW_OPCODE_MOV, l, r); 1351 } 1352 } 1353 this->result = swizzled_result; 1354 } 1355} 1356 1357void 1358fs_visitor::visit(ir_swizzle *ir) 1359{ 1360 ir->val->accept(this); 1361 fs_reg val = this->result; 1362 1363 if (ir->type->vector_elements == 1) { 1364 this->result.reg_offset += ir->mask.x; 1365 return; 1366 } 1367 1368 fs_reg result = fs_reg(this, ir->type); 1369 this->result = result; 1370 1371 for (unsigned int i = 0; i < ir->type->vector_elements; i++) { 1372 fs_reg channel = val; 1373 int swiz = 0; 1374 1375 switch (i) { 1376 case 0: 1377 swiz = ir->mask.x; 1378 break; 1379 case 1: 1380 swiz = ir->mask.y; 1381 break; 1382 case 2: 1383 swiz = ir->mask.z; 1384 break; 1385 case 3: 1386 swiz = ir->mask.w; 1387 break; 1388 } 1389 1390 channel.reg_offset += swiz; 1391 emit(BRW_OPCODE_MOV, result, channel); 1392 result.reg_offset++; 1393 } 1394} 1395 1396void 1397fs_visitor::visit(ir_discard *ir) 1398{ 1399 assert(ir->condition == NULL); /* FINISHME */ 1400 1401 emit(FS_OPCODE_DISCARD); 1402 kill_emitted = true; 1403} 1404 1405void 1406fs_visitor::visit(ir_constant *ir) 1407{ 1408 /* Set this->result to reg at the bottom of the function because some code 1409 * paths will cause this visitor to be applied to other fields. This will 1410 * cause the value stored in this->result to be modified. 1411 * 1412 * Make reg constant so that it doesn't get accidentally modified along the 1413 * way. Yes, I actually had this problem. :( 1414 */ 1415 const fs_reg reg(this, ir->type); 1416 fs_reg dst_reg = reg; 1417 1418 if (ir->type->is_array()) { 1419 const unsigned size = type_size(ir->type->fields.array); 1420 1421 for (unsigned i = 0; i < ir->type->length; i++) { 1422 ir->array_elements[i]->accept(this); 1423 fs_reg src_reg = this->result; 1424 1425 dst_reg.type = src_reg.type; 1426 for (unsigned j = 0; j < size; j++) { 1427 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1428 src_reg.reg_offset++; 1429 dst_reg.reg_offset++; 1430 } 1431 } 1432 } else if (ir->type->is_record()) { 1433 foreach_list(node, &ir->components) { 1434 ir_constant *const field = (ir_constant *) node; 1435 const unsigned size = type_size(field->type); 1436 1437 field->accept(this); 1438 fs_reg src_reg = this->result; 1439 1440 dst_reg.type = src_reg.type; 1441 for (unsigned j = 0; j < size; j++) { 1442 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1443 src_reg.reg_offset++; 1444 dst_reg.reg_offset++; 1445 } 1446 } 1447 } else { 1448 const unsigned size = type_size(ir->type); 1449 1450 for (unsigned i = 0; i < size; i++) { 1451 switch (ir->type->base_type) { 1452 case GLSL_TYPE_FLOAT: 1453 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])); 1454 break; 1455 case GLSL_TYPE_UINT: 1456 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])); 1457 break; 1458 case GLSL_TYPE_INT: 1459 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])); 1460 break; 1461 case GLSL_TYPE_BOOL: 1462 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])); 1463 break; 1464 default: 1465 assert(!"Non-float/uint/int/bool constant"); 1466 } 1467 dst_reg.reg_offset++; 1468 } 1469 } 1470 1471 this->result = reg; 1472} 1473 1474void 1475fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) 1476{ 1477 ir_expression *expr = ir->as_expression(); 1478 1479 if (expr) { 1480 fs_reg op[2]; 1481 fs_inst *inst; 1482 1483 assert(expr->get_num_operands() <= 2); 1484 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1485 assert(expr->operands[i]->type->is_scalar()); 1486 1487 expr->operands[i]->accept(this); 1488 op[i] = this->result; 1489 1490 resolve_ud_negate(&op[i]); 1491 } 1492 1493 switch (expr->operation) { 1494 case ir_unop_logic_not: 1495 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)); 1496 inst->conditional_mod = BRW_CONDITIONAL_Z; 1497 break; 1498 1499 case ir_binop_logic_xor: 1500 case ir_binop_logic_or: 1501 case ir_binop_logic_and: 1502 goto out; 1503 1504 case ir_unop_f2b: 1505 if (intel->gen >= 6) { 1506 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f)); 1507 } else { 1508 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]); 1509 } 1510 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1511 break; 1512 1513 case ir_unop_i2b: 1514 if (intel->gen >= 6) { 1515 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)); 1516 } else { 1517 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]); 1518 } 1519 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1520 break; 1521 1522 case ir_binop_greater: 1523 case ir_binop_gequal: 1524 case ir_binop_less: 1525 case ir_binop_lequal: 1526 case ir_binop_equal: 1527 case ir_binop_all_equal: 1528 case ir_binop_nequal: 1529 case ir_binop_any_nequal: 1530 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]); 1531 inst->conditional_mod = 1532 brw_conditional_for_comparison(expr->operation); 1533 break; 1534 1535 default: 1536 assert(!"not reached"); 1537 fail("bad cond code\n"); 1538 break; 1539 } 1540 return; 1541 } 1542 1543out: 1544 ir->accept(this); 1545 1546 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1)); 1547 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1548} 1549 1550/** 1551 * Emit a gen6 IF statement with the comparison folded into the IF 1552 * instruction. 1553 */ 1554void 1555fs_visitor::emit_if_gen6(ir_if *ir) 1556{ 1557 ir_expression *expr = ir->condition->as_expression(); 1558 1559 if (expr) { 1560 fs_reg op[2]; 1561 fs_inst *inst; 1562 fs_reg temp; 1563 1564 assert(expr->get_num_operands() <= 2); 1565 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1566 assert(expr->operands[i]->type->is_scalar()); 1567 1568 expr->operands[i]->accept(this); 1569 op[i] = this->result; 1570 } 1571 1572 switch (expr->operation) { 1573 case ir_unop_logic_not: 1574 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0)); 1575 inst->conditional_mod = BRW_CONDITIONAL_Z; 1576 return; 1577 1578 case ir_binop_logic_xor: 1579 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1580 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1581 return; 1582 1583 case ir_binop_logic_or: 1584 temp = fs_reg(this, glsl_type::bool_type); 1585 emit(BRW_OPCODE_OR, temp, op[0], op[1]); 1586 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1587 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1588 return; 1589 1590 case ir_binop_logic_and: 1591 temp = fs_reg(this, glsl_type::bool_type); 1592 emit(BRW_OPCODE_AND, temp, op[0], op[1]); 1593 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1594 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1595 return; 1596 1597 case ir_unop_f2b: 1598 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)); 1599 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1600 return; 1601 1602 case ir_unop_i2b: 1603 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1604 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1605 return; 1606 1607 case ir_binop_greater: 1608 case ir_binop_gequal: 1609 case ir_binop_less: 1610 case ir_binop_lequal: 1611 case ir_binop_equal: 1612 case ir_binop_all_equal: 1613 case ir_binop_nequal: 1614 case ir_binop_any_nequal: 1615 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1616 inst->conditional_mod = 1617 brw_conditional_for_comparison(expr->operation); 1618 return; 1619 default: 1620 assert(!"not reached"); 1621 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1622 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1623 fail("bad condition\n"); 1624 return; 1625 } 1626 return; 1627 } 1628 1629 ir->condition->accept(this); 1630 1631 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)); 1632 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1633} 1634 1635void 1636fs_visitor::visit(ir_if *ir) 1637{ 1638 fs_inst *inst; 1639 1640 if (intel->gen < 6 && c->dispatch_width == 16) { 1641 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1642 } 1643 1644 /* Don't point the annotation at the if statement, because then it plus 1645 * the then and else blocks get printed. 1646 */ 1647 this->base_ir = ir->condition; 1648 1649 if (intel->gen == 6) { 1650 emit_if_gen6(ir); 1651 } else { 1652 emit_bool_to_cond_code(ir->condition); 1653 1654 inst = emit(BRW_OPCODE_IF); 1655 inst->predicated = true; 1656 } 1657 1658 foreach_list(node, &ir->then_instructions) { 1659 ir_instruction *ir = (ir_instruction *)node; 1660 this->base_ir = ir; 1661 1662 ir->accept(this); 1663 } 1664 1665 if (!ir->else_instructions.is_empty()) { 1666 emit(BRW_OPCODE_ELSE); 1667 1668 foreach_list(node, &ir->else_instructions) { 1669 ir_instruction *ir = (ir_instruction *)node; 1670 this->base_ir = ir; 1671 1672 ir->accept(this); 1673 } 1674 } 1675 1676 emit(BRW_OPCODE_ENDIF); 1677} 1678 1679void 1680fs_visitor::visit(ir_loop *ir) 1681{ 1682 fs_reg counter = reg_undef; 1683 1684 if (intel->gen < 6 && c->dispatch_width == 16) { 1685 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1686 } 1687 1688 if (ir->counter) { 1689 this->base_ir = ir->counter; 1690 ir->counter->accept(this); 1691 counter = *(variable_storage(ir->counter)); 1692 1693 if (ir->from) { 1694 this->base_ir = ir->from; 1695 ir->from->accept(this); 1696 1697 emit(BRW_OPCODE_MOV, counter, this->result); 1698 } 1699 } 1700 1701 emit(BRW_OPCODE_DO); 1702 1703 if (ir->to) { 1704 this->base_ir = ir->to; 1705 ir->to->accept(this); 1706 1707 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result); 1708 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp); 1709 1710 inst = emit(BRW_OPCODE_BREAK); 1711 inst->predicated = true; 1712 } 1713 1714 foreach_list(node, &ir->body_instructions) { 1715 ir_instruction *ir = (ir_instruction *)node; 1716 1717 this->base_ir = ir; 1718 ir->accept(this); 1719 } 1720 1721 if (ir->increment) { 1722 this->base_ir = ir->increment; 1723 ir->increment->accept(this); 1724 emit(BRW_OPCODE_ADD, counter, counter, this->result); 1725 } 1726 1727 emit(BRW_OPCODE_WHILE); 1728} 1729 1730void 1731fs_visitor::visit(ir_loop_jump *ir) 1732{ 1733 switch (ir->mode) { 1734 case ir_loop_jump::jump_break: 1735 emit(BRW_OPCODE_BREAK); 1736 break; 1737 case ir_loop_jump::jump_continue: 1738 emit(BRW_OPCODE_CONTINUE); 1739 break; 1740 } 1741} 1742 1743void 1744fs_visitor::visit(ir_call *ir) 1745{ 1746 assert(!"FINISHME"); 1747} 1748 1749void 1750fs_visitor::visit(ir_return *ir) 1751{ 1752 assert(!"FINISHME"); 1753} 1754 1755void 1756fs_visitor::visit(ir_function *ir) 1757{ 1758 /* Ignore function bodies other than main() -- we shouldn't see calls to 1759 * them since they should all be inlined before we get to ir_to_mesa. 1760 */ 1761 if (strcmp(ir->name, "main") == 0) { 1762 const ir_function_signature *sig; 1763 exec_list empty; 1764 1765 sig = ir->matching_signature(&empty); 1766 1767 assert(sig); 1768 1769 foreach_list(node, &sig->body) { 1770 ir_instruction *ir = (ir_instruction *)node; 1771 this->base_ir = ir; 1772 1773 ir->accept(this); 1774 } 1775 } 1776} 1777 1778void 1779fs_visitor::visit(ir_function_signature *ir) 1780{ 1781 assert(!"not reached"); 1782 (void)ir; 1783} 1784 1785fs_inst * 1786fs_visitor::emit(fs_inst inst) 1787{ 1788 fs_inst *list_inst = new(mem_ctx) fs_inst; 1789 *list_inst = inst; 1790 1791 if (force_uncompressed_stack > 0) 1792 list_inst->force_uncompressed = true; 1793 else if (force_sechalf_stack > 0) 1794 list_inst->force_sechalf = true; 1795 1796 list_inst->annotation = this->current_annotation; 1797 list_inst->ir = this->base_ir; 1798 1799 this->instructions.push_tail(list_inst); 1800 1801 return list_inst; 1802} 1803 1804/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */ 1805void 1806fs_visitor::emit_dummy_fs() 1807{ 1808 int reg_width = c->dispatch_width / 8; 1809 1810 /* Everyone's favorite color. */ 1811 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 0 * reg_width), fs_reg(1.0f)); 1812 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 1 * reg_width), fs_reg(0.0f)); 1813 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 2 * reg_width), fs_reg(1.0f)); 1814 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2 + 3 * reg_width), fs_reg(0.0f)); 1815 1816 fs_inst *write; 1817 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0)); 1818 write->base_mrf = 2; 1819 write->mlen = 4 * reg_width; 1820 write->eot = true; 1821} 1822 1823/* The register location here is relative to the start of the URB 1824 * data. It will get adjusted to be a real location before 1825 * generate_code() time. 1826 */ 1827struct brw_reg 1828fs_visitor::interp_reg(int location, int channel) 1829{ 1830 int regnr = urb_setup[location] * 2 + channel / 2; 1831 int stride = (channel & 1) * 4; 1832 1833 assert(urb_setup[location] != -1); 1834 1835 return brw_vec1_grf(regnr, stride); 1836} 1837 1838/** Emits the interpolation for the varying inputs. */ 1839void 1840fs_visitor::emit_interpolation_setup_gen4() 1841{ 1842 this->current_annotation = "compute pixel centers"; 1843 this->pixel_x = fs_reg(this, glsl_type::uint_type); 1844 this->pixel_y = fs_reg(this, glsl_type::uint_type); 1845 this->pixel_x.type = BRW_REGISTER_TYPE_UW; 1846 this->pixel_y.type = BRW_REGISTER_TYPE_UW; 1847 1848 emit(FS_OPCODE_PIXEL_X, this->pixel_x); 1849 emit(FS_OPCODE_PIXEL_Y, this->pixel_y); 1850 1851 this->current_annotation = "compute pixel deltas from v0"; 1852 if (brw->has_pln) { 1853 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1854 fs_reg(this, glsl_type::vec2_type); 1855 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1856 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC]; 1857 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++; 1858 } else { 1859 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1860 fs_reg(this, glsl_type::float_type); 1861 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1862 fs_reg(this, glsl_type::float_type); 1863 } 1864 emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1865 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))); 1866 emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1867 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))); 1868 1869 this->current_annotation = "compute pos.w and 1/pos.w"; 1870 /* Compute wpos.w. It's always in our setup, since it's needed to 1871 * interpolate the other attributes. 1872 */ 1873 this->wpos_w = fs_reg(this, glsl_type::float_type); 1874 emit(FS_OPCODE_LINTERP, wpos_w, 1875 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1876 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1877 interp_reg(FRAG_ATTRIB_WPOS, 3)); 1878 /* Compute the pixel 1/W value from wpos.w. */ 1879 this->pixel_w = fs_reg(this, glsl_type::float_type); 1880 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w); 1881 this->current_annotation = NULL; 1882} 1883 1884/** Emits the interpolation for the varying inputs. */ 1885void 1886fs_visitor::emit_interpolation_setup_gen6() 1887{ 1888 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); 1889 1890 /* If the pixel centers end up used, the setup is the same as for gen4. */ 1891 this->current_annotation = "compute pixel centers"; 1892 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); 1893 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); 1894 int_pixel_x.type = BRW_REGISTER_TYPE_UW; 1895 int_pixel_y.type = BRW_REGISTER_TYPE_UW; 1896 emit(BRW_OPCODE_ADD, 1897 int_pixel_x, 1898 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), 1899 fs_reg(brw_imm_v(0x10101010))); 1900 emit(BRW_OPCODE_ADD, 1901 int_pixel_y, 1902 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), 1903 fs_reg(brw_imm_v(0x11001100))); 1904 1905 /* As of gen6, we can no longer mix float and int sources. We have 1906 * to turn the integer pixel centers into floats for their actual 1907 * use. 1908 */ 1909 this->pixel_x = fs_reg(this, glsl_type::float_type); 1910 this->pixel_y = fs_reg(this, glsl_type::float_type); 1911 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x); 1912 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y); 1913 1914 this->current_annotation = "compute pos.w"; 1915 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0)); 1916 this->wpos_w = fs_reg(this, glsl_type::float_type); 1917 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w); 1918 1919 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) { 1920 uint8_t reg = c->barycentric_coord_reg[i]; 1921 this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0)); 1922 this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0)); 1923 } 1924 1925 this->current_annotation = NULL; 1926} 1927 1928void 1929fs_visitor::emit_color_write(int target, int index, int first_color_mrf) 1930{ 1931 int reg_width = c->dispatch_width / 8; 1932 fs_inst *inst; 1933 fs_reg color = outputs[target]; 1934 fs_reg mrf; 1935 1936 /* If there's no color data to be written, skip it. */ 1937 if (color.file == BAD_FILE) 1938 return; 1939 1940 color.reg_offset += index; 1941 1942 if (c->dispatch_width == 8 || intel->gen >= 6) { 1943 /* SIMD8 write looks like: 1944 * m + 0: r0 1945 * m + 1: r1 1946 * m + 2: g0 1947 * m + 3: g1 1948 * 1949 * gen6 SIMD16 DP write looks like: 1950 * m + 0: r0 1951 * m + 1: r1 1952 * m + 2: g0 1953 * m + 3: g1 1954 * m + 4: b0 1955 * m + 5: b1 1956 * m + 6: a0 1957 * m + 7: a1 1958 */ 1959 inst = emit(BRW_OPCODE_MOV, 1960 fs_reg(MRF, first_color_mrf + index * reg_width, color.type), 1961 color); 1962 inst->saturate = c->key.clamp_fragment_color; 1963 } else { 1964 /* pre-gen6 SIMD16 single source DP write looks like: 1965 * m + 0: r0 1966 * m + 1: g0 1967 * m + 2: b0 1968 * m + 3: a0 1969 * m + 4: r1 1970 * m + 5: g1 1971 * m + 6: b1 1972 * m + 7: a1 1973 */ 1974 if (brw->has_compr4) { 1975 /* By setting the high bit of the MRF register number, we 1976 * indicate that we want COMPR4 mode - instead of doing the 1977 * usual destination + 1 for the second half we get 1978 * destination + 4. 1979 */ 1980 inst = emit(BRW_OPCODE_MOV, 1981 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index, 1982 color.type), 1983 color); 1984 inst->saturate = c->key.clamp_fragment_color; 1985 } else { 1986 push_force_uncompressed(); 1987 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index, 1988 color.type), 1989 color); 1990 inst->saturate = c->key.clamp_fragment_color; 1991 pop_force_uncompressed(); 1992 1993 push_force_sechalf(); 1994 color.sechalf = true; 1995 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4, 1996 color.type), 1997 color); 1998 inst->saturate = c->key.clamp_fragment_color; 1999 pop_force_sechalf(); 2000 color.sechalf = false; 2001 } 2002 } 2003} 2004 2005void 2006fs_visitor::emit_fb_writes() 2007{ 2008 this->current_annotation = "FB write header"; 2009 bool header_present = true; 2010 int base_mrf = 2; 2011 int nr = base_mrf; 2012 int reg_width = c->dispatch_width / 8; 2013 2014 if (intel->gen >= 6 && 2015 !this->kill_emitted && 2016 c->key.nr_color_regions == 1) { 2017 header_present = false; 2018 } 2019 2020 if (header_present) { 2021 /* m2, m3 header */ 2022 nr += 2; 2023 } 2024 2025 if (c->aa_dest_stencil_reg) { 2026 push_force_uncompressed(); 2027 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++), 2028 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))); 2029 pop_force_uncompressed(); 2030 } 2031 2032 /* Reserve space for color. It'll be filled in per MRT below. */ 2033 int color_mrf = nr; 2034 nr += 4 * reg_width; 2035 2036 if (c->source_depth_to_render_target) { 2037 if (intel->gen == 6 && c->dispatch_width == 16) { 2038 /* For outputting oDepth on gen6, SIMD8 writes have to be 2039 * used. This would require 8-wide moves of each half to 2040 * message regs, kind of like pre-gen5 SIMD16 FB writes. 2041 * Just bail on doing so for now. 2042 */ 2043 fail("Missing support for simd16 depth writes on gen6\n"); 2044 } 2045 2046 if (c->computes_depth) { 2047 /* Hand over gl_FragDepth. */ 2048 assert(this->frag_depth); 2049 fs_reg depth = *(variable_storage(this->frag_depth)); 2050 2051 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth); 2052 } else { 2053 /* Pass through the payload depth. */ 2054 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2055 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))); 2056 } 2057 nr += reg_width; 2058 } 2059 2060 if (c->dest_depth_reg) { 2061 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2062 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))); 2063 nr += reg_width; 2064 } 2065 2066 for (int target = 0; target < c->key.nr_color_regions; target++) { 2067 this->current_annotation = ralloc_asprintf(this->mem_ctx, 2068 "FB write target %d", 2069 target); 2070 for (int i = 0; i < 4; i++) 2071 emit_color_write(target, i, color_mrf); 2072 2073 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2074 inst->target = target; 2075 inst->base_mrf = base_mrf; 2076 inst->mlen = nr - base_mrf; 2077 if (target == c->key.nr_color_regions - 1) 2078 inst->eot = true; 2079 inst->header_present = header_present; 2080 } 2081 2082 if (c->key.nr_color_regions == 0) { 2083 if (c->key.alpha_test) { 2084 /* If the alpha test is enabled but there's no color buffer, 2085 * we still need to send alpha out the pipeline to our null 2086 * renderbuffer. 2087 */ 2088 emit_color_write(0, 3, color_mrf); 2089 } 2090 2091 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2092 inst->base_mrf = base_mrf; 2093 inst->mlen = nr - base_mrf; 2094 inst->eot = true; 2095 inst->header_present = header_present; 2096 } 2097 2098 this->current_annotation = NULL; 2099} 2100 2101void 2102fs_visitor::resolve_ud_negate(fs_reg *reg) 2103{ 2104 if (reg->type != BRW_REGISTER_TYPE_UD || 2105 !reg->negate) 2106 return; 2107 2108 fs_reg temp = fs_reg(this, glsl_type::uint_type); 2109 emit(BRW_OPCODE_MOV, temp, *reg); 2110 *reg = temp; 2111} 2112