brw_fs_visitor.cpp revision 7d55f37b0e87db9b3806088797075161a1c9a8bb
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_visitor.cpp 25 * 26 * This file supports generating the FS LIR from the GLSL IR. The LIR 27 * makes it easier to do backend-specific optimizations than doing so 28 * in the GLSL IR or in the native code. 29 */ 30extern "C" { 31 32#include <sys/types.h> 33 34#include "main/macros.h" 35#include "main/shaderobj.h" 36#include "main/uniforms.h" 37#include "program/prog_parameter.h" 38#include "program/prog_print.h" 39#include "program/prog_optimize.h" 40#include "program/register_allocate.h" 41#include "program/sampler.h" 42#include "program/hash_table.h" 43#include "brw_context.h" 44#include "brw_eu.h" 45#include "brw_wm.h" 46} 47#include "brw_shader.h" 48#include "brw_fs.h" 49#include "glsl/glsl_types.h" 50#include "glsl/ir_optimization.h" 51#include "glsl/ir_print_visitor.h" 52 53void 54fs_visitor::visit(ir_variable *ir) 55{ 56 fs_reg *reg = NULL; 57 58 if (variable_storage(ir)) 59 return; 60 61 if (ir->mode == ir_var_in) { 62 if (!strcmp(ir->name, "gl_FragCoord")) { 63 reg = emit_fragcoord_interpolation(ir); 64 } else if (!strcmp(ir->name, "gl_FrontFacing")) { 65 reg = emit_frontfacing_interpolation(ir); 66 } else { 67 reg = emit_general_interpolation(ir); 68 } 69 assert(reg); 70 hash_table_insert(this->variable_ht, reg, ir); 71 return; 72 } else if (ir->mode == ir_var_out) { 73 reg = new(this->mem_ctx) fs_reg(this, ir->type); 74 75 if (ir->location == FRAG_RESULT_COLOR) { 76 /* Writing gl_FragColor outputs to all color regions. */ 77 for (int i = 0; i < MAX2(c->key.nr_color_regions, 1); i++) { 78 this->outputs[i] = *reg; 79 } 80 } else if (ir->location == FRAG_RESULT_DEPTH) { 81 this->frag_depth = ir; 82 } else { 83 /* gl_FragData or a user-defined FS output */ 84 assert(ir->location >= FRAG_RESULT_DATA0 && 85 ir->location < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS); 86 87 /* General color output. */ 88 for (unsigned int i = 0; i < MAX2(1, ir->type->length); i++) { 89 int output = ir->location - FRAG_RESULT_DATA0 + i; 90 this->outputs[output] = *reg; 91 this->outputs[output].reg_offset += 4 * i; 92 } 93 } 94 } else if (ir->mode == ir_var_uniform) { 95 int param_index = c->prog_data.nr_params; 96 97 if (c->dispatch_width == 16) { 98 if (!variable_storage(ir)) { 99 fail("Failed to find uniform '%s' in 16-wide\n", ir->name); 100 } 101 return; 102 } 103 104 if (!strncmp(ir->name, "gl_", 3)) { 105 setup_builtin_uniform_values(ir); 106 } else { 107 setup_uniform_values(ir->location, ir->type); 108 } 109 110 reg = new(this->mem_ctx) fs_reg(UNIFORM, param_index); 111 reg->type = brw_type_for_base_type(ir->type); 112 } 113 114 if (!reg) 115 reg = new(this->mem_ctx) fs_reg(this, ir->type); 116 117 hash_table_insert(this->variable_ht, reg, ir); 118} 119 120void 121fs_visitor::visit(ir_dereference_variable *ir) 122{ 123 fs_reg *reg = variable_storage(ir->var); 124 this->result = *reg; 125} 126 127void 128fs_visitor::visit(ir_dereference_record *ir) 129{ 130 const glsl_type *struct_type = ir->record->type; 131 132 ir->record->accept(this); 133 134 unsigned int offset = 0; 135 for (unsigned int i = 0; i < struct_type->length; i++) { 136 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 137 break; 138 offset += type_size(struct_type->fields.structure[i].type); 139 } 140 this->result.reg_offset += offset; 141 this->result.type = brw_type_for_base_type(ir->type); 142} 143 144void 145fs_visitor::visit(ir_dereference_array *ir) 146{ 147 ir_constant *index; 148 int element_size; 149 150 ir->array->accept(this); 151 index = ir->array_index->as_constant(); 152 153 element_size = type_size(ir->type); 154 this->result.type = brw_type_for_base_type(ir->type); 155 156 if (index) { 157 assert(this->result.file == UNIFORM || this->result.file == GRF); 158 this->result.reg_offset += index->value.i[0] * element_size; 159 } else { 160 assert(!"FINISHME: non-constant array element"); 161 } 162} 163 164/* Instruction selection: Produce a MOV.sat instead of 165 * MIN(MAX(val, 0), 1) when possible. 166 */ 167bool 168fs_visitor::try_emit_saturate(ir_expression *ir) 169{ 170 ir_rvalue *sat_val = ir->as_rvalue_to_saturate(); 171 172 if (!sat_val) 173 return false; 174 175 sat_val->accept(this); 176 fs_reg src = this->result; 177 178 this->result = fs_reg(this, ir->type); 179 fs_inst *inst = emit(BRW_OPCODE_MOV, this->result, src); 180 inst->saturate = true; 181 182 return true; 183} 184 185bool 186fs_visitor::try_emit_mad(ir_expression *ir, int mul_arg) 187{ 188 /* 3-src instructions were introduced in gen6. */ 189 if (intel->gen < 6) 190 return false; 191 192 /* MAD can only handle floating-point data. */ 193 if (ir->type != glsl_type::float_type) 194 return false; 195 196 ir_rvalue *nonmul = ir->operands[1 - mul_arg]; 197 ir_expression *mul = ir->operands[mul_arg]->as_expression(); 198 199 if (!mul || mul->operation != ir_binop_mul) 200 return false; 201 202 if (nonmul->as_constant() || 203 mul->operands[0]->as_constant() || 204 mul->operands[1]->as_constant()) 205 return false; 206 207 nonmul->accept(this); 208 fs_reg src0 = this->result; 209 210 mul->operands[0]->accept(this); 211 fs_reg src1 = this->result; 212 213 mul->operands[1]->accept(this); 214 fs_reg src2 = this->result; 215 216 this->result = fs_reg(this, ir->type); 217 emit(BRW_OPCODE_MAD, this->result, src0, src1, src2); 218 219 return true; 220} 221 222void 223fs_visitor::visit(ir_expression *ir) 224{ 225 unsigned int operand; 226 fs_reg op[2], temp; 227 fs_inst *inst; 228 229 assert(ir->get_num_operands() <= 2); 230 231 if (try_emit_saturate(ir)) 232 return; 233 if (ir->operation == ir_binop_add) { 234 if (try_emit_mad(ir, 0) || try_emit_mad(ir, 1)) 235 return; 236 } 237 238 for (operand = 0; operand < ir->get_num_operands(); operand++) { 239 ir->operands[operand]->accept(this); 240 if (this->result.file == BAD_FILE) { 241 ir_print_visitor v; 242 fail("Failed to get tree for expression operand:\n"); 243 ir->operands[operand]->accept(&v); 244 } 245 op[operand] = this->result; 246 247 /* Matrix expression operands should have been broken down to vector 248 * operations already. 249 */ 250 assert(!ir->operands[operand]->type->is_matrix()); 251 /* And then those vector operands should have been broken down to scalar. 252 */ 253 assert(!ir->operands[operand]->type->is_vector()); 254 } 255 256 /* Storage for our result. If our result goes into an assignment, it will 257 * just get copy-propagated out, so no worries. 258 */ 259 this->result = fs_reg(this, ir->type); 260 261 switch (ir->operation) { 262 case ir_unop_logic_not: 263 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 264 * ones complement of the whole register, not just bit 0. 265 */ 266 emit(BRW_OPCODE_XOR, this->result, op[0], fs_reg(1)); 267 break; 268 case ir_unop_neg: 269 op[0].negate = !op[0].negate; 270 this->result = op[0]; 271 break; 272 case ir_unop_abs: 273 op[0].abs = true; 274 op[0].negate = false; 275 this->result = op[0]; 276 break; 277 case ir_unop_sign: 278 temp = fs_reg(this, ir->type); 279 280 emit(BRW_OPCODE_MOV, this->result, fs_reg(0.0f)); 281 282 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 283 inst->conditional_mod = BRW_CONDITIONAL_G; 284 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(1.0f)); 285 inst->predicated = true; 286 287 inst = emit(BRW_OPCODE_CMP, reg_null_f, op[0], fs_reg(0.0f)); 288 inst->conditional_mod = BRW_CONDITIONAL_L; 289 inst = emit(BRW_OPCODE_MOV, this->result, fs_reg(-1.0f)); 290 inst->predicated = true; 291 292 break; 293 case ir_unop_rcp: 294 emit_math(SHADER_OPCODE_RCP, this->result, op[0]); 295 break; 296 297 case ir_unop_exp2: 298 emit_math(SHADER_OPCODE_EXP2, this->result, op[0]); 299 break; 300 case ir_unop_log2: 301 emit_math(SHADER_OPCODE_LOG2, this->result, op[0]); 302 break; 303 case ir_unop_exp: 304 case ir_unop_log: 305 assert(!"not reached: should be handled by ir_explog_to_explog2"); 306 break; 307 case ir_unop_sin: 308 case ir_unop_sin_reduced: 309 emit_math(SHADER_OPCODE_SIN, this->result, op[0]); 310 break; 311 case ir_unop_cos: 312 case ir_unop_cos_reduced: 313 emit_math(SHADER_OPCODE_COS, this->result, op[0]); 314 break; 315 316 case ir_unop_dFdx: 317 emit(FS_OPCODE_DDX, this->result, op[0]); 318 break; 319 case ir_unop_dFdy: 320 emit(FS_OPCODE_DDY, this->result, op[0]); 321 break; 322 323 case ir_binop_add: 324 emit(BRW_OPCODE_ADD, this->result, op[0], op[1]); 325 break; 326 case ir_binop_sub: 327 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 328 break; 329 330 case ir_binop_mul: 331 if (ir->type->is_integer()) { 332 /* For integer multiplication, the MUL uses the low 16 bits 333 * of one of the operands (src0 on gen6, src1 on gen7). The 334 * MACH accumulates in the contribution of the upper 16 bits 335 * of that operand. 336 * 337 * FINISHME: Emit just the MUL if we know an operand is small 338 * enough. 339 */ 340 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 341 342 emit(BRW_OPCODE_MUL, acc, op[0], op[1]); 343 emit(BRW_OPCODE_MACH, reg_null_d, op[0], op[1]); 344 emit(BRW_OPCODE_MOV, this->result, fs_reg(acc)); 345 } else { 346 emit(BRW_OPCODE_MUL, this->result, op[0], op[1]); 347 } 348 break; 349 case ir_binop_div: 350 if (intel->gen >= 7 && c->dispatch_width == 16) 351 fail("16-wide INTDIV unsupported\n"); 352 353 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 354 assert(ir->type->is_integer()); 355 emit_math(SHADER_OPCODE_INT_QUOTIENT, this->result, op[0], op[1]); 356 break; 357 case ir_binop_mod: 358 if (intel->gen >= 7 && c->dispatch_width == 16) 359 fail("16-wide INTDIV unsupported\n"); 360 361 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 362 assert(ir->type->is_integer()); 363 emit_math(SHADER_OPCODE_INT_REMAINDER, this->result, op[0], op[1]); 364 break; 365 366 case ir_binop_less: 367 case ir_binop_greater: 368 case ir_binop_lequal: 369 case ir_binop_gequal: 370 case ir_binop_equal: 371 case ir_binop_all_equal: 372 case ir_binop_nequal: 373 case ir_binop_any_nequal: 374 temp = this->result; 375 /* original gen4 does implicit conversion before comparison. */ 376 if (intel->gen < 5) 377 temp.type = op[0].type; 378 379 resolve_ud_negate(&op[0]); 380 resolve_ud_negate(&op[1]); 381 382 inst = emit(BRW_OPCODE_CMP, temp, op[0], op[1]); 383 inst->conditional_mod = brw_conditional_for_comparison(ir->operation); 384 emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(0x1)); 385 break; 386 387 case ir_binop_logic_xor: 388 emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 389 break; 390 391 case ir_binop_logic_or: 392 emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 393 break; 394 395 case ir_binop_logic_and: 396 emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 397 break; 398 399 case ir_binop_dot: 400 case ir_unop_any: 401 assert(!"not reached: should be handled by brw_fs_channel_expressions"); 402 break; 403 404 case ir_unop_noise: 405 assert(!"not reached: should be handled by lower_noise"); 406 break; 407 408 case ir_quadop_vector: 409 assert(!"not reached: should be handled by lower_quadop_vector"); 410 break; 411 412 case ir_unop_sqrt: 413 emit_math(SHADER_OPCODE_SQRT, this->result, op[0]); 414 break; 415 416 case ir_unop_rsq: 417 emit_math(SHADER_OPCODE_RSQ, this->result, op[0]); 418 break; 419 420 case ir_unop_i2u: 421 op[0].type = BRW_REGISTER_TYPE_UD; 422 this->result = op[0]; 423 break; 424 case ir_unop_u2i: 425 op[0].type = BRW_REGISTER_TYPE_D; 426 this->result = op[0]; 427 break; 428 case ir_unop_i2f: 429 case ir_unop_u2f: 430 case ir_unop_b2f: 431 case ir_unop_b2i: 432 case ir_unop_f2i: 433 emit(BRW_OPCODE_MOV, this->result, op[0]); 434 break; 435 case ir_unop_f2b: 436 case ir_unop_i2b: 437 temp = this->result; 438 /* original gen4 does implicit conversion before comparison. */ 439 if (intel->gen < 5) 440 temp.type = op[0].type; 441 442 resolve_ud_negate(&op[0]); 443 444 inst = emit(BRW_OPCODE_CMP, temp, op[0], fs_reg(0.0f)); 445 inst->conditional_mod = BRW_CONDITIONAL_NZ; 446 inst = emit(BRW_OPCODE_AND, this->result, this->result, fs_reg(1)); 447 break; 448 449 case ir_unop_trunc: 450 emit(BRW_OPCODE_RNDZ, this->result, op[0]); 451 break; 452 case ir_unop_ceil: 453 op[0].negate = !op[0].negate; 454 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 455 this->result.negate = true; 456 break; 457 case ir_unop_floor: 458 inst = emit(BRW_OPCODE_RNDD, this->result, op[0]); 459 break; 460 case ir_unop_fract: 461 inst = emit(BRW_OPCODE_FRC, this->result, op[0]); 462 break; 463 case ir_unop_round_even: 464 emit(BRW_OPCODE_RNDE, this->result, op[0]); 465 break; 466 467 case ir_binop_min: 468 resolve_ud_negate(&op[0]); 469 resolve_ud_negate(&op[1]); 470 471 if (intel->gen >= 6) { 472 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 473 inst->conditional_mod = BRW_CONDITIONAL_L; 474 } else { 475 /* Unalias the destination */ 476 this->result = fs_reg(this, ir->type); 477 478 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 479 inst->conditional_mod = BRW_CONDITIONAL_L; 480 481 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 482 inst->predicated = true; 483 } 484 break; 485 case ir_binop_max: 486 resolve_ud_negate(&op[0]); 487 resolve_ud_negate(&op[1]); 488 489 if (intel->gen >= 6) { 490 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 491 inst->conditional_mod = BRW_CONDITIONAL_GE; 492 } else { 493 /* Unalias the destination */ 494 this->result = fs_reg(this, ir->type); 495 496 inst = emit(BRW_OPCODE_CMP, this->result, op[0], op[1]); 497 inst->conditional_mod = BRW_CONDITIONAL_G; 498 499 inst = emit(BRW_OPCODE_SEL, this->result, op[0], op[1]); 500 inst->predicated = true; 501 } 502 break; 503 504 case ir_binop_pow: 505 emit_math(SHADER_OPCODE_POW, this->result, op[0], op[1]); 506 break; 507 508 case ir_unop_bit_not: 509 inst = emit(BRW_OPCODE_NOT, this->result, op[0]); 510 break; 511 case ir_binop_bit_and: 512 inst = emit(BRW_OPCODE_AND, this->result, op[0], op[1]); 513 break; 514 case ir_binop_bit_xor: 515 inst = emit(BRW_OPCODE_XOR, this->result, op[0], op[1]); 516 break; 517 case ir_binop_bit_or: 518 inst = emit(BRW_OPCODE_OR, this->result, op[0], op[1]); 519 break; 520 521 case ir_binop_lshift: 522 inst = emit(BRW_OPCODE_SHL, this->result, op[0], op[1]); 523 break; 524 525 case ir_binop_rshift: 526 if (ir->type->base_type == GLSL_TYPE_INT) 527 inst = emit(BRW_OPCODE_ASR, this->result, op[0], op[1]); 528 else 529 inst = emit(BRW_OPCODE_SHR, this->result, op[0], op[1]); 530 break; 531 } 532} 533 534void 535fs_visitor::emit_assignment_writes(fs_reg &l, fs_reg &r, 536 const glsl_type *type, bool predicated) 537{ 538 switch (type->base_type) { 539 case GLSL_TYPE_FLOAT: 540 case GLSL_TYPE_UINT: 541 case GLSL_TYPE_INT: 542 case GLSL_TYPE_BOOL: 543 for (unsigned int i = 0; i < type->components(); i++) { 544 l.type = brw_type_for_base_type(type); 545 r.type = brw_type_for_base_type(type); 546 547 if (predicated || !l.equals(&r)) { 548 fs_inst *inst = emit(BRW_OPCODE_MOV, l, r); 549 inst->predicated = predicated; 550 } 551 552 l.reg_offset++; 553 r.reg_offset++; 554 } 555 break; 556 case GLSL_TYPE_ARRAY: 557 for (unsigned int i = 0; i < type->length; i++) { 558 emit_assignment_writes(l, r, type->fields.array, predicated); 559 } 560 break; 561 562 case GLSL_TYPE_STRUCT: 563 for (unsigned int i = 0; i < type->length; i++) { 564 emit_assignment_writes(l, r, type->fields.structure[i].type, 565 predicated); 566 } 567 break; 568 569 case GLSL_TYPE_SAMPLER: 570 break; 571 572 default: 573 assert(!"not reached"); 574 break; 575 } 576} 577 578/* If the RHS processing resulted in an instruction generating a 579 * temporary value, and it would be easy to rewrite the instruction to 580 * generate its result right into the LHS instead, do so. This ends 581 * up reliably removing instructions where it can be tricky to do so 582 * later without real UD chain information. 583 */ 584bool 585fs_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 586 fs_reg dst, 587 fs_reg src, 588 fs_inst *pre_rhs_inst, 589 fs_inst *last_rhs_inst) 590{ 591 if (pre_rhs_inst == last_rhs_inst) 592 return false; /* No instructions generated to work with. */ 593 594 /* Only attempt if we're doing a direct assignment. */ 595 if (ir->condition || 596 !(ir->lhs->type->is_scalar() || 597 (ir->lhs->type->is_vector() && 598 ir->write_mask == (1 << ir->lhs->type->vector_elements) - 1))) 599 return false; 600 601 /* Make sure the last instruction generated our source reg. */ 602 if (last_rhs_inst->predicated || 603 last_rhs_inst->force_uncompressed || 604 last_rhs_inst->force_sechalf || 605 !src.equals(&last_rhs_inst->dst)) 606 return false; 607 608 /* Success! Rewrite the instruction. */ 609 last_rhs_inst->dst = dst; 610 611 return true; 612} 613 614void 615fs_visitor::visit(ir_assignment *ir) 616{ 617 fs_reg l, r; 618 fs_inst *inst; 619 620 /* FINISHME: arrays on the lhs */ 621 ir->lhs->accept(this); 622 l = this->result; 623 624 fs_inst *pre_rhs_inst = (fs_inst *) this->instructions.get_tail(); 625 626 ir->rhs->accept(this); 627 r = this->result; 628 629 fs_inst *last_rhs_inst = (fs_inst *) this->instructions.get_tail(); 630 631 assert(l.file != BAD_FILE); 632 assert(r.file != BAD_FILE); 633 634 if (try_rewrite_rhs_to_dst(ir, l, r, pre_rhs_inst, last_rhs_inst)) 635 return; 636 637 if (ir->condition) { 638 emit_bool_to_cond_code(ir->condition); 639 } 640 641 if (ir->lhs->type->is_scalar() || 642 ir->lhs->type->is_vector()) { 643 for (int i = 0; i < ir->lhs->type->vector_elements; i++) { 644 if (ir->write_mask & (1 << i)) { 645 inst = emit(BRW_OPCODE_MOV, l, r); 646 if (ir->condition) 647 inst->predicated = true; 648 r.reg_offset++; 649 } 650 l.reg_offset++; 651 } 652 } else { 653 emit_assignment_writes(l, r, ir->lhs->type, ir->condition != NULL); 654 } 655} 656 657fs_inst * 658fs_visitor::emit_texture_gen4(ir_texture *ir, fs_reg dst, fs_reg coordinate, 659 int sampler) 660{ 661 int mlen; 662 int base_mrf = 1; 663 bool simd16 = false; 664 fs_reg orig_dst; 665 666 /* g0 header. */ 667 mlen = 1; 668 669 if (ir->shadow_comparitor && ir->op != ir_txd) { 670 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 671 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 672 coordinate.reg_offset++; 673 } 674 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 675 mlen += 3; 676 677 if (ir->op == ir_tex) { 678 /* There's no plain shadow compare message, so we use shadow 679 * compare with a bias of 0.0. 680 */ 681 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), fs_reg(0.0f)); 682 mlen++; 683 } else if (ir->op == ir_txb) { 684 ir->lod_info.bias->accept(this); 685 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 686 mlen++; 687 } else { 688 assert(ir->op == ir_txl); 689 ir->lod_info.lod->accept(this); 690 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 691 mlen++; 692 } 693 694 ir->shadow_comparitor->accept(this); 695 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 696 mlen++; 697 } else if (ir->op == ir_tex) { 698 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 699 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 700 coordinate.reg_offset++; 701 } 702 /* gen4's SIMD8 sampler always has the slots for u,v,r present. */ 703 mlen += 3; 704 } else if (ir->op == ir_txd) { 705 ir->lod_info.grad.dPdx->accept(this); 706 fs_reg dPdx = this->result; 707 708 ir->lod_info.grad.dPdy->accept(this); 709 fs_reg dPdy = this->result; 710 711 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 712 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i), coordinate); 713 coordinate.reg_offset++; 714 } 715 /* the slots for u and v are always present, but r is optional */ 716 mlen += MAX2(ir->coordinate->type->vector_elements, 2); 717 718 /* P = u, v, r 719 * dPdx = dudx, dvdx, drdx 720 * dPdy = dudy, dvdy, drdy 721 * 722 * 1-arg: Does not exist. 723 * 724 * 2-arg: dudx dvdx dudy dvdy 725 * dPdx.x dPdx.y dPdy.x dPdy.y 726 * m4 m5 m6 m7 727 * 728 * 3-arg: dudx dvdx drdx dudy dvdy drdy 729 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z 730 * m5 m6 m7 m8 m9 m10 731 */ 732 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 733 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 734 dPdx.reg_offset++; 735 } 736 mlen += MAX2(ir->lod_info.grad.dPdx->type->vector_elements, 2); 737 738 for (int i = 0; i < ir->lod_info.grad.dPdy->type->vector_elements; i++) { 739 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 740 dPdy.reg_offset++; 741 } 742 mlen += MAX2(ir->lod_info.grad.dPdy->type->vector_elements, 2); 743 } else if (ir->op == ir_txs) { 744 /* There's no SIMD8 resinfo message on Gen4. Use SIMD16 instead. */ 745 simd16 = true; 746 ir->lod_info.lod->accept(this); 747 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 748 mlen += 2; 749 } else { 750 /* Oh joy. gen4 doesn't have SIMD8 non-shadow-compare bias/lod 751 * instructions. We'll need to do SIMD16 here. 752 */ 753 simd16 = true; 754 assert(ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txf); 755 756 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 757 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2, coordinate.type), 758 coordinate); 759 coordinate.reg_offset++; 760 } 761 762 /* Initialize the rest of u/v/r with 0.0. Empirically, this seems to 763 * be necessary for TXF (ld), but seems wise to do for all messages. 764 */ 765 for (int i = ir->coordinate->type->vector_elements; i < 3; i++) { 766 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen + i * 2), fs_reg(0.0f)); 767 } 768 769 /* lod/bias appears after u/v/r. */ 770 mlen += 6; 771 772 if (ir->op == ir_txb) { 773 ir->lod_info.bias->accept(this); 774 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 775 mlen++; 776 } else { 777 ir->lod_info.lod->accept(this); 778 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, this->result.type), 779 this->result); 780 mlen++; 781 } 782 783 /* The unused upper half. */ 784 mlen++; 785 } 786 787 if (simd16) { 788 /* Now, since we're doing simd16, the return is 2 interleaved 789 * vec4s where the odd-indexed ones are junk. We'll need to move 790 * this weirdness around to the expected layout. 791 */ 792 orig_dst = dst; 793 const glsl_type *vec_type = 794 glsl_type::get_instance(ir->type->base_type, 4, 1); 795 dst = fs_reg(this, glsl_type::get_array_instance(vec_type, 2)); 796 dst.type = intel->is_g4x ? brw_type_for_base_type(ir->type) 797 : BRW_REGISTER_TYPE_F; 798 } 799 800 fs_inst *inst = NULL; 801 switch (ir->op) { 802 case ir_tex: 803 inst = emit(SHADER_OPCODE_TEX, dst); 804 break; 805 case ir_txb: 806 inst = emit(FS_OPCODE_TXB, dst); 807 break; 808 case ir_txl: 809 inst = emit(SHADER_OPCODE_TXL, dst); 810 break; 811 case ir_txd: 812 inst = emit(SHADER_OPCODE_TXD, dst); 813 break; 814 case ir_txs: 815 inst = emit(SHADER_OPCODE_TXS, dst); 816 break; 817 case ir_txf: 818 inst = emit(SHADER_OPCODE_TXF, dst); 819 break; 820 } 821 inst->base_mrf = base_mrf; 822 inst->mlen = mlen; 823 inst->header_present = true; 824 825 if (simd16) { 826 for (int i = 0; i < 4; i++) { 827 emit(BRW_OPCODE_MOV, orig_dst, dst); 828 orig_dst.reg_offset++; 829 dst.reg_offset += 2; 830 } 831 } 832 833 return inst; 834} 835 836/* gen5's sampler has slots for u, v, r, array index, then optional 837 * parameters like shadow comparitor or LOD bias. If optional 838 * parameters aren't present, those base slots are optional and don't 839 * need to be included in the message. 840 * 841 * We don't fill in the unnecessary slots regardless, which may look 842 * surprising in the disassembly. 843 */ 844fs_inst * 845fs_visitor::emit_texture_gen5(ir_texture *ir, fs_reg dst, fs_reg coordinate, 846 int sampler) 847{ 848 int mlen = 0; 849 int base_mrf = 2; 850 int reg_width = c->dispatch_width / 8; 851 bool header_present = false; 852 const int vector_elements = 853 ir->coordinate ? ir->coordinate->type->vector_elements : 0; 854 855 if (ir->offset) { 856 /* The offsets set up by the ir_texture visitor are in the 857 * m1 header, so we can't go headerless. 858 */ 859 header_present = true; 860 mlen++; 861 base_mrf--; 862 } 863 864 for (int i = 0; i < vector_elements; i++) { 865 emit(BRW_OPCODE_MOV, 866 fs_reg(MRF, base_mrf + mlen + i * reg_width, coordinate.type), 867 coordinate); 868 coordinate.reg_offset++; 869 } 870 mlen += vector_elements * reg_width; 871 872 if (ir->shadow_comparitor && ir->op != ir_txd) { 873 mlen = MAX2(mlen, header_present + 4 * reg_width); 874 875 ir->shadow_comparitor->accept(this); 876 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 877 mlen += reg_width; 878 } 879 880 fs_inst *inst = NULL; 881 switch (ir->op) { 882 case ir_tex: 883 inst = emit(SHADER_OPCODE_TEX, dst); 884 break; 885 case ir_txb: 886 ir->lod_info.bias->accept(this); 887 mlen = MAX2(mlen, header_present + 4 * reg_width); 888 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 889 mlen += reg_width; 890 891 inst = emit(FS_OPCODE_TXB, dst); 892 893 break; 894 case ir_txl: 895 ir->lod_info.lod->accept(this); 896 mlen = MAX2(mlen, header_present + 4 * reg_width); 897 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 898 mlen += reg_width; 899 900 inst = emit(SHADER_OPCODE_TXL, dst); 901 break; 902 case ir_txd: { 903 ir->lod_info.grad.dPdx->accept(this); 904 fs_reg dPdx = this->result; 905 906 ir->lod_info.grad.dPdy->accept(this); 907 fs_reg dPdy = this->result; 908 909 mlen = MAX2(mlen, header_present + 4 * reg_width); /* skip over 'ai' */ 910 911 /** 912 * P = u, v, r 913 * dPdx = dudx, dvdx, drdx 914 * dPdy = dudy, dvdy, drdy 915 * 916 * Load up these values: 917 * - dudx dudy dvdx dvdy drdx drdy 918 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z 919 */ 920 for (int i = 0; i < ir->lod_info.grad.dPdx->type->vector_elements; i++) { 921 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 922 dPdx.reg_offset++; 923 mlen += reg_width; 924 925 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 926 dPdy.reg_offset++; 927 mlen += reg_width; 928 } 929 930 inst = emit(SHADER_OPCODE_TXD, dst); 931 break; 932 } 933 case ir_txs: 934 ir->lod_info.lod->accept(this); 935 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 936 mlen += reg_width; 937 inst = emit(SHADER_OPCODE_TXS, dst); 938 break; 939 case ir_txf: 940 mlen = header_present + 4 * reg_width; 941 942 ir->lod_info.lod->accept(this); 943 emit(BRW_OPCODE_MOV, 944 fs_reg(MRF, base_mrf + mlen - reg_width, BRW_REGISTER_TYPE_UD), 945 this->result); 946 inst = emit(SHADER_OPCODE_TXF, dst); 947 break; 948 } 949 inst->base_mrf = base_mrf; 950 inst->mlen = mlen; 951 inst->header_present = header_present; 952 953 if (mlen > 11) { 954 fail("Message length >11 disallowed by hardware\n"); 955 } 956 957 return inst; 958} 959 960fs_inst * 961fs_visitor::emit_texture_gen7(ir_texture *ir, fs_reg dst, fs_reg coordinate, 962 int sampler) 963{ 964 int mlen = 0; 965 int base_mrf = 2; 966 int reg_width = c->dispatch_width / 8; 967 bool header_present = false; 968 969 if (ir->offset) { 970 /* The offsets set up by the ir_texture visitor are in the 971 * m1 header, so we can't go headerless. 972 */ 973 header_present = true; 974 mlen++; 975 base_mrf--; 976 } 977 978 if (ir->shadow_comparitor && ir->op != ir_txd) { 979 ir->shadow_comparitor->accept(this); 980 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 981 mlen += reg_width; 982 } 983 984 /* Set up the LOD info */ 985 switch (ir->op) { 986 case ir_tex: 987 break; 988 case ir_txb: 989 ir->lod_info.bias->accept(this); 990 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 991 mlen += reg_width; 992 break; 993 case ir_txl: 994 ir->lod_info.lod->accept(this); 995 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), this->result); 996 mlen += reg_width; 997 break; 998 case ir_txd: { 999 if (c->dispatch_width == 16) 1000 fail("Gen7 does not support sample_d/sample_d_c in SIMD16 mode."); 1001 1002 ir->lod_info.grad.dPdx->accept(this); 1003 fs_reg dPdx = this->result; 1004 1005 ir->lod_info.grad.dPdy->accept(this); 1006 fs_reg dPdy = this->result; 1007 1008 /* Load dPdx and the coordinate together: 1009 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z 1010 */ 1011 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1012 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1013 coordinate.reg_offset++; 1014 mlen += reg_width; 1015 1016 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdx); 1017 dPdx.reg_offset++; 1018 mlen += reg_width; 1019 1020 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), dPdy); 1021 dPdy.reg_offset++; 1022 mlen += reg_width; 1023 } 1024 break; 1025 } 1026 case ir_txs: 1027 ir->lod_info.lod->accept(this); 1028 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_UD), this->result); 1029 mlen += reg_width; 1030 break; 1031 case ir_txf: 1032 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r. */ 1033 emit(BRW_OPCODE_MOV, 1034 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1035 coordinate.reg_offset++; 1036 mlen += reg_width; 1037 1038 ir->lod_info.lod->accept(this); 1039 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), this->result); 1040 mlen += reg_width; 1041 1042 for (int i = 1; i < ir->coordinate->type->vector_elements; i++) { 1043 emit(BRW_OPCODE_MOV, 1044 fs_reg(MRF, base_mrf + mlen, BRW_REGISTER_TYPE_D), coordinate); 1045 coordinate.reg_offset++; 1046 mlen += reg_width; 1047 } 1048 break; 1049 } 1050 1051 /* Set up the coordinate (except for cases where it was done above) */ 1052 if (ir->op != ir_txd && ir->op != ir_txs && ir->op != ir_txf) { 1053 for (int i = 0; i < ir->coordinate->type->vector_elements; i++) { 1054 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + mlen), coordinate); 1055 coordinate.reg_offset++; 1056 mlen += reg_width; 1057 } 1058 } 1059 1060 /* Generate the SEND */ 1061 fs_inst *inst = NULL; 1062 switch (ir->op) { 1063 case ir_tex: inst = emit(SHADER_OPCODE_TEX, dst); break; 1064 case ir_txb: inst = emit(FS_OPCODE_TXB, dst); break; 1065 case ir_txl: inst = emit(SHADER_OPCODE_TXL, dst); break; 1066 case ir_txd: inst = emit(SHADER_OPCODE_TXD, dst); break; 1067 case ir_txf: inst = emit(SHADER_OPCODE_TXF, dst); break; 1068 case ir_txs: inst = emit(SHADER_OPCODE_TXS, dst); break; 1069 } 1070 inst->base_mrf = base_mrf; 1071 inst->mlen = mlen; 1072 inst->header_present = header_present; 1073 1074 if (mlen > 11) { 1075 fail("Message length >11 disallowed by hardware\n"); 1076 } 1077 1078 return inst; 1079} 1080 1081void 1082fs_visitor::visit(ir_texture *ir) 1083{ 1084 fs_inst *inst = NULL; 1085 1086 int sampler = _mesa_get_sampler_uniform_value(ir->sampler, prog, &fp->Base); 1087 sampler = fp->Base.SamplerUnits[sampler]; 1088 1089 /* Our hardware doesn't have a sample_d_c message, so shadow compares 1090 * for textureGrad/TXD need to be emulated with instructions. 1091 */ 1092 bool hw_compare_supported = ir->op != ir_txd; 1093 if (ir->shadow_comparitor && !hw_compare_supported) { 1094 assert(c->key.tex.compare_funcs[sampler] != GL_NONE); 1095 /* No need to even sample for GL_ALWAYS or GL_NEVER...bail early */ 1096 if (c->key.tex.compare_funcs[sampler] == GL_ALWAYS) 1097 return swizzle_result(ir, fs_reg(1.0f), sampler); 1098 else if (c->key.tex.compare_funcs[sampler] == GL_NEVER) 1099 return swizzle_result(ir, fs_reg(0.0f), sampler); 1100 } 1101 1102 if (ir->coordinate) 1103 ir->coordinate->accept(this); 1104 fs_reg coordinate = this->result; 1105 1106 if (ir->offset != NULL) { 1107 uint32_t offset_bits = brw_texture_offset(ir->offset->as_constant()); 1108 1109 /* Explicitly set up the message header by copying g0 to msg reg m1. */ 1110 emit(BRW_OPCODE_MOV, fs_reg(MRF, 1, BRW_REGISTER_TYPE_UD), 1111 fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD))); 1112 1113 /* Then set the offset bits in DWord 2 of the message header. */ 1114 emit(BRW_OPCODE_MOV, 1115 fs_reg(retype(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, 1, 2), 1116 BRW_REGISTER_TYPE_UD)), 1117 fs_reg(brw_imm_uw(offset_bits))); 1118 } 1119 1120 /* Should be lowered by do_lower_texture_projection */ 1121 assert(!ir->projector); 1122 1123 bool needs_gl_clamp = true; 1124 1125 fs_reg scale_x, scale_y; 1126 1127 /* The 965 requires the EU to do the normalization of GL rectangle 1128 * texture coordinates. We use the program parameter state 1129 * tracking to get the scaling factor. 1130 */ 1131 if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT && 1132 (intel->gen < 6 || 1133 (intel->gen >= 6 && (c->key.tex.gl_clamp_mask[0] & (1 << sampler) || 1134 c->key.tex.gl_clamp_mask[1] & (1 << sampler))))) { 1135 struct gl_program_parameter_list *params = c->fp->program.Base.Parameters; 1136 int tokens[STATE_LENGTH] = { 1137 STATE_INTERNAL, 1138 STATE_TEXRECT_SCALE, 1139 sampler, 1140 0, 1141 0 1142 }; 1143 1144 if (c->dispatch_width == 16) { 1145 fail("rectangle scale uniform setup not supported on 16-wide\n"); 1146 this->result = fs_reg(this, ir->type); 1147 return; 1148 } 1149 1150 c->prog_data.param_convert[c->prog_data.nr_params] = 1151 PARAM_NO_CONVERT; 1152 c->prog_data.param_convert[c->prog_data.nr_params + 1] = 1153 PARAM_NO_CONVERT; 1154 1155 scale_x = fs_reg(UNIFORM, c->prog_data.nr_params); 1156 scale_y = fs_reg(UNIFORM, c->prog_data.nr_params + 1); 1157 1158 GLuint index = _mesa_add_state_reference(params, 1159 (gl_state_index *)tokens); 1160 1161 this->param_index[c->prog_data.nr_params] = index; 1162 this->param_offset[c->prog_data.nr_params] = 0; 1163 c->prog_data.nr_params++; 1164 this->param_index[c->prog_data.nr_params] = index; 1165 this->param_offset[c->prog_data.nr_params] = 1; 1166 c->prog_data.nr_params++; 1167 } 1168 1169 /* The 965 requires the EU to do the normalization of GL rectangle 1170 * texture coordinates. We use the program parameter state 1171 * tracking to get the scaling factor. 1172 */ 1173 if (intel->gen < 6 && 1174 ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1175 fs_reg dst = fs_reg(this, ir->coordinate->type); 1176 fs_reg src = coordinate; 1177 coordinate = dst; 1178 1179 emit(BRW_OPCODE_MUL, dst, src, scale_x); 1180 dst.reg_offset++; 1181 src.reg_offset++; 1182 emit(BRW_OPCODE_MUL, dst, src, scale_y); 1183 } else if (ir->sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_RECT) { 1184 /* On gen6+, the sampler handles the rectangle coordinates 1185 * natively, without needing rescaling. But that means we have 1186 * to do GL_CLAMP clamping at the [0, width], [0, height] scale, 1187 * not [0, 1] like the default case below. 1188 */ 1189 needs_gl_clamp = false; 1190 1191 for (int i = 0; i < 2; i++) { 1192 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1193 fs_reg chan = coordinate; 1194 chan.reg_offset += i; 1195 1196 inst = emit(BRW_OPCODE_SEL, chan, chan, brw_imm_f(0.0)); 1197 inst->conditional_mod = BRW_CONDITIONAL_G; 1198 1199 /* Our parameter comes in as 1.0/width or 1.0/height, 1200 * because that's what people normally want for doing 1201 * texture rectangle handling. We need width or height 1202 * for clamping, but we don't care enough to make a new 1203 * parameter type, so just invert back. 1204 */ 1205 fs_reg limit = fs_reg(this, glsl_type::float_type); 1206 emit(BRW_OPCODE_MOV, limit, i == 0 ? scale_x : scale_y); 1207 emit(SHADER_OPCODE_RCP, limit, limit); 1208 1209 inst = emit(BRW_OPCODE_SEL, chan, chan, limit); 1210 inst->conditional_mod = BRW_CONDITIONAL_L; 1211 } 1212 } 1213 } 1214 1215 if (ir->coordinate && needs_gl_clamp) { 1216 for (int i = 0; i < MIN2(ir->coordinate->type->vector_elements, 3); i++) { 1217 if (c->key.tex.gl_clamp_mask[i] & (1 << sampler)) { 1218 fs_reg chan = coordinate; 1219 chan.reg_offset += i; 1220 1221 fs_inst *inst = emit(BRW_OPCODE_MOV, chan, chan); 1222 inst->saturate = true; 1223 } 1224 } 1225 } 1226 1227 /* Writemasking doesn't eliminate channels on SIMD8 texture 1228 * samples, so don't worry about them. 1229 */ 1230 fs_reg dst = fs_reg(this, glsl_type::get_instance(ir->type->base_type, 4, 1)); 1231 1232 if (intel->gen >= 7) { 1233 inst = emit_texture_gen7(ir, dst, coordinate, sampler); 1234 } else if (intel->gen >= 5) { 1235 inst = emit_texture_gen5(ir, dst, coordinate, sampler); 1236 } else { 1237 inst = emit_texture_gen4(ir, dst, coordinate, sampler); 1238 } 1239 1240 /* If there's an offset, we already set up m1. To avoid the implied move, 1241 * use the null register. Otherwise, we want an implied move from g0. 1242 */ 1243 if (ir->offset != NULL || !inst->header_present) 1244 inst->src[0] = reg_undef; 1245 else 1246 inst->src[0] = fs_reg(retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UW)); 1247 1248 inst->sampler = sampler; 1249 1250 if (ir->shadow_comparitor) { 1251 if (hw_compare_supported) { 1252 inst->shadow_compare = true; 1253 } else { 1254 ir->shadow_comparitor->accept(this); 1255 fs_reg ref = this->result; 1256 1257 fs_reg value = dst; 1258 dst = fs_reg(this, glsl_type::vec4_type); 1259 1260 /* FINISHME: This needs to be done pre-filtering. */ 1261 1262 uint32_t conditional = 0; 1263 switch (c->key.tex.compare_funcs[sampler]) { 1264 /* GL_ALWAYS and GL_NEVER were handled at the top of the function */ 1265 case GL_LESS: conditional = BRW_CONDITIONAL_L; break; 1266 case GL_GREATER: conditional = BRW_CONDITIONAL_G; break; 1267 case GL_LEQUAL: conditional = BRW_CONDITIONAL_LE; break; 1268 case GL_GEQUAL: conditional = BRW_CONDITIONAL_GE; break; 1269 case GL_EQUAL: conditional = BRW_CONDITIONAL_EQ; break; 1270 case GL_NOTEQUAL: conditional = BRW_CONDITIONAL_NEQ; break; 1271 default: assert(!"Should not get here: bad shadow compare function"); 1272 } 1273 1274 /* Use conditional moves to load 0 or 1 as the result */ 1275 this->current_annotation = "manual shadow comparison"; 1276 for (int i = 0; i < 4; i++) { 1277 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(0.0f)); 1278 1279 inst = emit(BRW_OPCODE_CMP, reg_null_f, ref, value); 1280 inst->conditional_mod = conditional; 1281 1282 inst = emit(BRW_OPCODE_MOV, dst, fs_reg(1.0f)); 1283 inst->predicated = true; 1284 1285 dst.reg_offset++; 1286 value.reg_offset++; 1287 } 1288 dst.reg_offset = 0; 1289 } 1290 } 1291 1292 swizzle_result(ir, dst, sampler); 1293} 1294 1295/** 1296 * Swizzle the result of a texture result. This is necessary for 1297 * EXT_texture_swizzle as well as DEPTH_TEXTURE_MODE for shadow comparisons. 1298 */ 1299void 1300fs_visitor::swizzle_result(ir_texture *ir, fs_reg orig_val, int sampler) 1301{ 1302 this->result = orig_val; 1303 1304 if (ir->op == ir_txs) 1305 return; 1306 1307 if (ir->type == glsl_type::float_type) { 1308 /* Ignore DEPTH_TEXTURE_MODE swizzling. */ 1309 assert(ir->sampler->type->sampler_shadow); 1310 } else if (c->key.tex.swizzles[sampler] != SWIZZLE_NOOP) { 1311 fs_reg swizzled_result = fs_reg(this, glsl_type::vec4_type); 1312 1313 for (int i = 0; i < 4; i++) { 1314 int swiz = GET_SWZ(c->key.tex.swizzles[sampler], i); 1315 fs_reg l = swizzled_result; 1316 l.reg_offset += i; 1317 1318 if (swiz == SWIZZLE_ZERO) { 1319 emit(BRW_OPCODE_MOV, l, fs_reg(0.0f)); 1320 } else if (swiz == SWIZZLE_ONE) { 1321 emit(BRW_OPCODE_MOV, l, fs_reg(1.0f)); 1322 } else { 1323 fs_reg r = orig_val; 1324 r.reg_offset += GET_SWZ(c->key.tex.swizzles[sampler], i); 1325 emit(BRW_OPCODE_MOV, l, r); 1326 } 1327 } 1328 this->result = swizzled_result; 1329 } 1330} 1331 1332void 1333fs_visitor::visit(ir_swizzle *ir) 1334{ 1335 ir->val->accept(this); 1336 fs_reg val = this->result; 1337 1338 if (ir->type->vector_elements == 1) { 1339 this->result.reg_offset += ir->mask.x; 1340 return; 1341 } 1342 1343 fs_reg result = fs_reg(this, ir->type); 1344 this->result = result; 1345 1346 for (unsigned int i = 0; i < ir->type->vector_elements; i++) { 1347 fs_reg channel = val; 1348 int swiz = 0; 1349 1350 switch (i) { 1351 case 0: 1352 swiz = ir->mask.x; 1353 break; 1354 case 1: 1355 swiz = ir->mask.y; 1356 break; 1357 case 2: 1358 swiz = ir->mask.z; 1359 break; 1360 case 3: 1361 swiz = ir->mask.w; 1362 break; 1363 } 1364 1365 channel.reg_offset += swiz; 1366 emit(BRW_OPCODE_MOV, result, channel); 1367 result.reg_offset++; 1368 } 1369} 1370 1371void 1372fs_visitor::visit(ir_discard *ir) 1373{ 1374 assert(ir->condition == NULL); /* FINISHME */ 1375 1376 emit(FS_OPCODE_DISCARD); 1377 kill_emitted = true; 1378} 1379 1380void 1381fs_visitor::visit(ir_constant *ir) 1382{ 1383 /* Set this->result to reg at the bottom of the function because some code 1384 * paths will cause this visitor to be applied to other fields. This will 1385 * cause the value stored in this->result to be modified. 1386 * 1387 * Make reg constant so that it doesn't get accidentally modified along the 1388 * way. Yes, I actually had this problem. :( 1389 */ 1390 const fs_reg reg(this, ir->type); 1391 fs_reg dst_reg = reg; 1392 1393 if (ir->type->is_array()) { 1394 const unsigned size = type_size(ir->type->fields.array); 1395 1396 for (unsigned i = 0; i < ir->type->length; i++) { 1397 ir->array_elements[i]->accept(this); 1398 fs_reg src_reg = this->result; 1399 1400 dst_reg.type = src_reg.type; 1401 for (unsigned j = 0; j < size; j++) { 1402 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1403 src_reg.reg_offset++; 1404 dst_reg.reg_offset++; 1405 } 1406 } 1407 } else if (ir->type->is_record()) { 1408 foreach_list(node, &ir->components) { 1409 ir_instruction *const field = (ir_instruction *) node; 1410 const unsigned size = type_size(field->type); 1411 1412 field->accept(this); 1413 fs_reg src_reg = this->result; 1414 1415 dst_reg.type = src_reg.type; 1416 for (unsigned j = 0; j < size; j++) { 1417 emit(BRW_OPCODE_MOV, dst_reg, src_reg); 1418 src_reg.reg_offset++; 1419 dst_reg.reg_offset++; 1420 } 1421 } 1422 } else { 1423 const unsigned size = type_size(ir->type); 1424 1425 for (unsigned i = 0; i < size; i++) { 1426 switch (ir->type->base_type) { 1427 case GLSL_TYPE_FLOAT: 1428 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.f[i])); 1429 break; 1430 case GLSL_TYPE_UINT: 1431 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.u[i])); 1432 break; 1433 case GLSL_TYPE_INT: 1434 emit(BRW_OPCODE_MOV, dst_reg, fs_reg(ir->value.i[i])); 1435 break; 1436 case GLSL_TYPE_BOOL: 1437 emit(BRW_OPCODE_MOV, dst_reg, fs_reg((int)ir->value.b[i])); 1438 break; 1439 default: 1440 assert(!"Non-float/uint/int/bool constant"); 1441 } 1442 dst_reg.reg_offset++; 1443 } 1444 } 1445 1446 this->result = reg; 1447} 1448 1449void 1450fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir) 1451{ 1452 ir_expression *expr = ir->as_expression(); 1453 1454 if (expr) { 1455 fs_reg op[2]; 1456 fs_inst *inst; 1457 1458 assert(expr->get_num_operands() <= 2); 1459 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1460 assert(expr->operands[i]->type->is_scalar()); 1461 1462 expr->operands[i]->accept(this); 1463 op[i] = this->result; 1464 1465 resolve_ud_negate(&op[i]); 1466 } 1467 1468 switch (expr->operation) { 1469 case ir_unop_logic_not: 1470 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], fs_reg(1)); 1471 inst->conditional_mod = BRW_CONDITIONAL_Z; 1472 break; 1473 1474 case ir_binop_logic_xor: 1475 inst = emit(BRW_OPCODE_XOR, reg_null_d, op[0], op[1]); 1476 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1477 break; 1478 1479 case ir_binop_logic_or: 1480 inst = emit(BRW_OPCODE_OR, reg_null_d, op[0], op[1]); 1481 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1482 break; 1483 1484 case ir_binop_logic_and: 1485 inst = emit(BRW_OPCODE_AND, reg_null_d, op[0], op[1]); 1486 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1487 break; 1488 1489 case ir_unop_f2b: 1490 if (intel->gen >= 6) { 1491 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0.0f)); 1492 } else { 1493 inst = emit(BRW_OPCODE_MOV, reg_null_f, op[0]); 1494 } 1495 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1496 break; 1497 1498 case ir_unop_i2b: 1499 if (intel->gen >= 6) { 1500 inst = emit(BRW_OPCODE_CMP, reg_null_d, op[0], fs_reg(0)); 1501 } else { 1502 inst = emit(BRW_OPCODE_MOV, reg_null_d, op[0]); 1503 } 1504 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1505 break; 1506 1507 case ir_binop_greater: 1508 case ir_binop_gequal: 1509 case ir_binop_less: 1510 case ir_binop_lequal: 1511 case ir_binop_equal: 1512 case ir_binop_all_equal: 1513 case ir_binop_nequal: 1514 case ir_binop_any_nequal: 1515 inst = emit(BRW_OPCODE_CMP, reg_null_cmp, op[0], op[1]); 1516 inst->conditional_mod = 1517 brw_conditional_for_comparison(expr->operation); 1518 break; 1519 1520 default: 1521 assert(!"not reached"); 1522 fail("bad cond code\n"); 1523 break; 1524 } 1525 return; 1526 } 1527 1528 ir->accept(this); 1529 1530 if (intel->gen >= 6) { 1531 fs_inst *inst = emit(BRW_OPCODE_AND, reg_null_d, this->result, fs_reg(1)); 1532 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1533 } else { 1534 fs_inst *inst = emit(BRW_OPCODE_MOV, reg_null_d, this->result); 1535 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1536 } 1537} 1538 1539/** 1540 * Emit a gen6 IF statement with the comparison folded into the IF 1541 * instruction. 1542 */ 1543void 1544fs_visitor::emit_if_gen6(ir_if *ir) 1545{ 1546 ir_expression *expr = ir->condition->as_expression(); 1547 1548 if (expr) { 1549 fs_reg op[2]; 1550 fs_inst *inst; 1551 fs_reg temp; 1552 1553 assert(expr->get_num_operands() <= 2); 1554 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 1555 assert(expr->operands[i]->type->is_scalar()); 1556 1557 expr->operands[i]->accept(this); 1558 op[i] = this->result; 1559 } 1560 1561 switch (expr->operation) { 1562 case ir_unop_logic_not: 1563 inst = emit(BRW_OPCODE_IF, temp, op[0], fs_reg(0)); 1564 inst->conditional_mod = BRW_CONDITIONAL_Z; 1565 return; 1566 1567 case ir_binop_logic_xor: 1568 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1569 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1570 return; 1571 1572 case ir_binop_logic_or: 1573 temp = fs_reg(this, glsl_type::bool_type); 1574 emit(BRW_OPCODE_OR, temp, op[0], op[1]); 1575 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1576 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1577 return; 1578 1579 case ir_binop_logic_and: 1580 temp = fs_reg(this, glsl_type::bool_type); 1581 emit(BRW_OPCODE_AND, temp, op[0], op[1]); 1582 inst = emit(BRW_OPCODE_IF, reg_null_d, temp, fs_reg(0)); 1583 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1584 return; 1585 1586 case ir_unop_f2b: 1587 inst = emit(BRW_OPCODE_IF, reg_null_f, op[0], fs_reg(0)); 1588 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1589 return; 1590 1591 case ir_unop_i2b: 1592 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1593 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1594 return; 1595 1596 case ir_binop_greater: 1597 case ir_binop_gequal: 1598 case ir_binop_less: 1599 case ir_binop_lequal: 1600 case ir_binop_equal: 1601 case ir_binop_all_equal: 1602 case ir_binop_nequal: 1603 case ir_binop_any_nequal: 1604 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], op[1]); 1605 inst->conditional_mod = 1606 brw_conditional_for_comparison(expr->operation); 1607 return; 1608 default: 1609 assert(!"not reached"); 1610 inst = emit(BRW_OPCODE_IF, reg_null_d, op[0], fs_reg(0)); 1611 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1612 fail("bad condition\n"); 1613 return; 1614 } 1615 return; 1616 } 1617 1618 ir->condition->accept(this); 1619 1620 fs_inst *inst = emit(BRW_OPCODE_IF, reg_null_d, this->result, fs_reg(0)); 1621 inst->conditional_mod = BRW_CONDITIONAL_NZ; 1622} 1623 1624void 1625fs_visitor::visit(ir_if *ir) 1626{ 1627 fs_inst *inst; 1628 1629 if (intel->gen < 6 && c->dispatch_width == 16) { 1630 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1631 } 1632 1633 /* Don't point the annotation at the if statement, because then it plus 1634 * the then and else blocks get printed. 1635 */ 1636 this->base_ir = ir->condition; 1637 1638 if (intel->gen == 6) { 1639 emit_if_gen6(ir); 1640 } else { 1641 emit_bool_to_cond_code(ir->condition); 1642 1643 inst = emit(BRW_OPCODE_IF); 1644 inst->predicated = true; 1645 } 1646 1647 foreach_list(node, &ir->then_instructions) { 1648 ir_instruction *ir = (ir_instruction *)node; 1649 this->base_ir = ir; 1650 1651 ir->accept(this); 1652 } 1653 1654 if (!ir->else_instructions.is_empty()) { 1655 emit(BRW_OPCODE_ELSE); 1656 1657 foreach_list(node, &ir->else_instructions) { 1658 ir_instruction *ir = (ir_instruction *)node; 1659 this->base_ir = ir; 1660 1661 ir->accept(this); 1662 } 1663 } 1664 1665 emit(BRW_OPCODE_ENDIF); 1666} 1667 1668void 1669fs_visitor::visit(ir_loop *ir) 1670{ 1671 fs_reg counter = reg_undef; 1672 1673 if (c->dispatch_width == 16) { 1674 fail("Can't support (non-uniform) control flow on 16-wide\n"); 1675 } 1676 1677 if (ir->counter) { 1678 this->base_ir = ir->counter; 1679 ir->counter->accept(this); 1680 counter = *(variable_storage(ir->counter)); 1681 1682 if (ir->from) { 1683 this->base_ir = ir->from; 1684 ir->from->accept(this); 1685 1686 emit(BRW_OPCODE_MOV, counter, this->result); 1687 } 1688 } 1689 1690 emit(BRW_OPCODE_DO); 1691 1692 if (ir->to) { 1693 this->base_ir = ir->to; 1694 ir->to->accept(this); 1695 1696 fs_inst *inst = emit(BRW_OPCODE_CMP, reg_null_cmp, counter, this->result); 1697 inst->conditional_mod = brw_conditional_for_comparison(ir->cmp); 1698 1699 inst = emit(BRW_OPCODE_BREAK); 1700 inst->predicated = true; 1701 } 1702 1703 foreach_list(node, &ir->body_instructions) { 1704 ir_instruction *ir = (ir_instruction *)node; 1705 1706 this->base_ir = ir; 1707 ir->accept(this); 1708 } 1709 1710 if (ir->increment) { 1711 this->base_ir = ir->increment; 1712 ir->increment->accept(this); 1713 emit(BRW_OPCODE_ADD, counter, counter, this->result); 1714 } 1715 1716 emit(BRW_OPCODE_WHILE); 1717} 1718 1719void 1720fs_visitor::visit(ir_loop_jump *ir) 1721{ 1722 switch (ir->mode) { 1723 case ir_loop_jump::jump_break: 1724 emit(BRW_OPCODE_BREAK); 1725 break; 1726 case ir_loop_jump::jump_continue: 1727 emit(BRW_OPCODE_CONTINUE); 1728 break; 1729 } 1730} 1731 1732void 1733fs_visitor::visit(ir_call *ir) 1734{ 1735 assert(!"FINISHME"); 1736} 1737 1738void 1739fs_visitor::visit(ir_return *ir) 1740{ 1741 assert(!"FINISHME"); 1742} 1743 1744void 1745fs_visitor::visit(ir_function *ir) 1746{ 1747 /* Ignore function bodies other than main() -- we shouldn't see calls to 1748 * them since they should all be inlined before we get to ir_to_mesa. 1749 */ 1750 if (strcmp(ir->name, "main") == 0) { 1751 const ir_function_signature *sig; 1752 exec_list empty; 1753 1754 sig = ir->matching_signature(&empty); 1755 1756 assert(sig); 1757 1758 foreach_list(node, &sig->body) { 1759 ir_instruction *ir = (ir_instruction *)node; 1760 this->base_ir = ir; 1761 1762 ir->accept(this); 1763 } 1764 } 1765} 1766 1767void 1768fs_visitor::visit(ir_function_signature *ir) 1769{ 1770 assert(!"not reached"); 1771 (void)ir; 1772} 1773 1774fs_inst * 1775fs_visitor::emit(fs_inst inst) 1776{ 1777 fs_inst *list_inst = new(mem_ctx) fs_inst; 1778 *list_inst = inst; 1779 1780 if (force_uncompressed_stack > 0) 1781 list_inst->force_uncompressed = true; 1782 else if (force_sechalf_stack > 0) 1783 list_inst->force_sechalf = true; 1784 1785 list_inst->annotation = this->current_annotation; 1786 list_inst->ir = this->base_ir; 1787 1788 this->instructions.push_tail(list_inst); 1789 1790 return list_inst; 1791} 1792 1793/** Emits a dummy fragment shader consisting of magenta for bringup purposes. */ 1794void 1795fs_visitor::emit_dummy_fs() 1796{ 1797 /* Everyone's favorite color. */ 1798 emit(BRW_OPCODE_MOV, fs_reg(MRF, 2), fs_reg(1.0f)); 1799 emit(BRW_OPCODE_MOV, fs_reg(MRF, 3), fs_reg(0.0f)); 1800 emit(BRW_OPCODE_MOV, fs_reg(MRF, 4), fs_reg(1.0f)); 1801 emit(BRW_OPCODE_MOV, fs_reg(MRF, 5), fs_reg(0.0f)); 1802 1803 fs_inst *write; 1804 write = emit(FS_OPCODE_FB_WRITE, fs_reg(0), fs_reg(0)); 1805 write->base_mrf = 2; 1806} 1807 1808/* The register location here is relative to the start of the URB 1809 * data. It will get adjusted to be a real location before 1810 * generate_code() time. 1811 */ 1812struct brw_reg 1813fs_visitor::interp_reg(int location, int channel) 1814{ 1815 int regnr = urb_setup[location] * 2 + channel / 2; 1816 int stride = (channel & 1) * 4; 1817 1818 assert(urb_setup[location] != -1); 1819 1820 return brw_vec1_grf(regnr, stride); 1821} 1822 1823/** Emits the interpolation for the varying inputs. */ 1824void 1825fs_visitor::emit_interpolation_setup_gen4() 1826{ 1827 this->current_annotation = "compute pixel centers"; 1828 this->pixel_x = fs_reg(this, glsl_type::uint_type); 1829 this->pixel_y = fs_reg(this, glsl_type::uint_type); 1830 this->pixel_x.type = BRW_REGISTER_TYPE_UW; 1831 this->pixel_y.type = BRW_REGISTER_TYPE_UW; 1832 1833 emit(FS_OPCODE_PIXEL_X, this->pixel_x); 1834 emit(FS_OPCODE_PIXEL_Y, this->pixel_y); 1835 1836 this->current_annotation = "compute pixel deltas from v0"; 1837 if (brw->has_pln) { 1838 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1839 fs_reg(this, glsl_type::vec2_type); 1840 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1841 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC]; 1842 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC].reg_offset++; 1843 } else { 1844 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1845 fs_reg(this, glsl_type::float_type); 1846 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC] = 1847 fs_reg(this, glsl_type::float_type); 1848 } 1849 emit(BRW_OPCODE_ADD, this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1850 this->pixel_x, fs_reg(negate(brw_vec1_grf(1, 0)))); 1851 emit(BRW_OPCODE_ADD, this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1852 this->pixel_y, fs_reg(negate(brw_vec1_grf(1, 1)))); 1853 1854 this->current_annotation = "compute pos.w and 1/pos.w"; 1855 /* Compute wpos.w. It's always in our setup, since it's needed to 1856 * interpolate the other attributes. 1857 */ 1858 this->wpos_w = fs_reg(this, glsl_type::float_type); 1859 emit(FS_OPCODE_LINTERP, wpos_w, 1860 this->delta_x[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1861 this->delta_y[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC], 1862 interp_reg(FRAG_ATTRIB_WPOS, 3)); 1863 /* Compute the pixel 1/W value from wpos.w. */ 1864 this->pixel_w = fs_reg(this, glsl_type::float_type); 1865 emit_math(SHADER_OPCODE_RCP, this->pixel_w, wpos_w); 1866 this->current_annotation = NULL; 1867} 1868 1869/** Emits the interpolation for the varying inputs. */ 1870void 1871fs_visitor::emit_interpolation_setup_gen6() 1872{ 1873 struct brw_reg g1_uw = retype(brw_vec1_grf(1, 0), BRW_REGISTER_TYPE_UW); 1874 1875 /* If the pixel centers end up used, the setup is the same as for gen4. */ 1876 this->current_annotation = "compute pixel centers"; 1877 fs_reg int_pixel_x = fs_reg(this, glsl_type::uint_type); 1878 fs_reg int_pixel_y = fs_reg(this, glsl_type::uint_type); 1879 int_pixel_x.type = BRW_REGISTER_TYPE_UW; 1880 int_pixel_y.type = BRW_REGISTER_TYPE_UW; 1881 emit(BRW_OPCODE_ADD, 1882 int_pixel_x, 1883 fs_reg(stride(suboffset(g1_uw, 4), 2, 4, 0)), 1884 fs_reg(brw_imm_v(0x10101010))); 1885 emit(BRW_OPCODE_ADD, 1886 int_pixel_y, 1887 fs_reg(stride(suboffset(g1_uw, 5), 2, 4, 0)), 1888 fs_reg(brw_imm_v(0x11001100))); 1889 1890 /* As of gen6, we can no longer mix float and int sources. We have 1891 * to turn the integer pixel centers into floats for their actual 1892 * use. 1893 */ 1894 this->pixel_x = fs_reg(this, glsl_type::float_type); 1895 this->pixel_y = fs_reg(this, glsl_type::float_type); 1896 emit(BRW_OPCODE_MOV, this->pixel_x, int_pixel_x); 1897 emit(BRW_OPCODE_MOV, this->pixel_y, int_pixel_y); 1898 1899 this->current_annotation = "compute pos.w"; 1900 this->pixel_w = fs_reg(brw_vec8_grf(c->source_w_reg, 0)); 1901 this->wpos_w = fs_reg(this, glsl_type::float_type); 1902 emit_math(SHADER_OPCODE_RCP, this->wpos_w, this->pixel_w); 1903 1904 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) { 1905 uint8_t reg = c->barycentric_coord_reg[i]; 1906 this->delta_x[i] = fs_reg(brw_vec8_grf(reg, 0)); 1907 this->delta_y[i] = fs_reg(brw_vec8_grf(reg + 1, 0)); 1908 } 1909 1910 this->current_annotation = NULL; 1911} 1912 1913void 1914fs_visitor::emit_color_write(int target, int index, int first_color_mrf) 1915{ 1916 int reg_width = c->dispatch_width / 8; 1917 fs_inst *inst; 1918 fs_reg color = outputs[target]; 1919 fs_reg mrf; 1920 1921 /* If there's no color data to be written, skip it. */ 1922 if (color.file == BAD_FILE) 1923 return; 1924 1925 color.reg_offset += index; 1926 1927 if (c->dispatch_width == 8 || intel->gen >= 6) { 1928 /* SIMD8 write looks like: 1929 * m + 0: r0 1930 * m + 1: r1 1931 * m + 2: g0 1932 * m + 3: g1 1933 * 1934 * gen6 SIMD16 DP write looks like: 1935 * m + 0: r0 1936 * m + 1: r1 1937 * m + 2: g0 1938 * m + 3: g1 1939 * m + 4: b0 1940 * m + 5: b1 1941 * m + 6: a0 1942 * m + 7: a1 1943 */ 1944 inst = emit(BRW_OPCODE_MOV, 1945 fs_reg(MRF, first_color_mrf + index * reg_width, color.type), 1946 color); 1947 inst->saturate = c->key.clamp_fragment_color; 1948 } else { 1949 /* pre-gen6 SIMD16 single source DP write looks like: 1950 * m + 0: r0 1951 * m + 1: g0 1952 * m + 2: b0 1953 * m + 3: a0 1954 * m + 4: r1 1955 * m + 5: g1 1956 * m + 6: b1 1957 * m + 7: a1 1958 */ 1959 if (brw->has_compr4) { 1960 /* By setting the high bit of the MRF register number, we 1961 * indicate that we want COMPR4 mode - instead of doing the 1962 * usual destination + 1 for the second half we get 1963 * destination + 4. 1964 */ 1965 inst = emit(BRW_OPCODE_MOV, 1966 fs_reg(MRF, BRW_MRF_COMPR4 + first_color_mrf + index, 1967 color.type), 1968 color); 1969 inst->saturate = c->key.clamp_fragment_color; 1970 } else { 1971 push_force_uncompressed(); 1972 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index, 1973 color.type), 1974 color); 1975 inst->saturate = c->key.clamp_fragment_color; 1976 pop_force_uncompressed(); 1977 1978 push_force_sechalf(); 1979 color.sechalf = true; 1980 inst = emit(BRW_OPCODE_MOV, fs_reg(MRF, first_color_mrf + index + 4, 1981 color.type), 1982 color); 1983 inst->saturate = c->key.clamp_fragment_color; 1984 pop_force_sechalf(); 1985 color.sechalf = false; 1986 } 1987 } 1988} 1989 1990void 1991fs_visitor::emit_fb_writes() 1992{ 1993 this->current_annotation = "FB write header"; 1994 bool header_present = true; 1995 int base_mrf = 2; 1996 int nr = base_mrf; 1997 int reg_width = c->dispatch_width / 8; 1998 1999 if (intel->gen >= 6 && 2000 !this->kill_emitted && 2001 c->key.nr_color_regions == 1) { 2002 header_present = false; 2003 } 2004 2005 if (header_present) { 2006 /* m2, m3 header */ 2007 nr += 2; 2008 } 2009 2010 if (c->aa_dest_stencil_reg) { 2011 push_force_uncompressed(); 2012 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr++), 2013 fs_reg(brw_vec8_grf(c->aa_dest_stencil_reg, 0))); 2014 pop_force_uncompressed(); 2015 } 2016 2017 /* Reserve space for color. It'll be filled in per MRT below. */ 2018 int color_mrf = nr; 2019 nr += 4 * reg_width; 2020 2021 if (c->source_depth_to_render_target) { 2022 if (intel->gen == 6 && c->dispatch_width == 16) { 2023 /* For outputting oDepth on gen6, SIMD8 writes have to be 2024 * used. This would require 8-wide moves of each half to 2025 * message regs, kind of like pre-gen5 SIMD16 FB writes. 2026 * Just bail on doing so for now. 2027 */ 2028 fail("Missing support for simd16 depth writes on gen6\n"); 2029 } 2030 2031 if (c->computes_depth) { 2032 /* Hand over gl_FragDepth. */ 2033 assert(this->frag_depth); 2034 fs_reg depth = *(variable_storage(this->frag_depth)); 2035 2036 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), depth); 2037 } else { 2038 /* Pass through the payload depth. */ 2039 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2040 fs_reg(brw_vec8_grf(c->source_depth_reg, 0))); 2041 } 2042 nr += reg_width; 2043 } 2044 2045 if (c->dest_depth_reg) { 2046 emit(BRW_OPCODE_MOV, fs_reg(MRF, nr), 2047 fs_reg(brw_vec8_grf(c->dest_depth_reg, 0))); 2048 nr += reg_width; 2049 } 2050 2051 for (int target = 0; target < c->key.nr_color_regions; target++) { 2052 this->current_annotation = ralloc_asprintf(this->mem_ctx, 2053 "FB write target %d", 2054 target); 2055 for (int i = 0; i < 4; i++) 2056 emit_color_write(target, i, color_mrf); 2057 2058 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2059 inst->target = target; 2060 inst->base_mrf = base_mrf; 2061 inst->mlen = nr - base_mrf; 2062 if (target == c->key.nr_color_regions - 1) 2063 inst->eot = true; 2064 inst->header_present = header_present; 2065 } 2066 2067 if (c->key.nr_color_regions == 0) { 2068 if (c->key.alpha_test) { 2069 /* If the alpha test is enabled but there's no color buffer, 2070 * we still need to send alpha out the pipeline to our null 2071 * renderbuffer. 2072 */ 2073 emit_color_write(0, 3, color_mrf); 2074 } 2075 2076 fs_inst *inst = emit(FS_OPCODE_FB_WRITE); 2077 inst->base_mrf = base_mrf; 2078 inst->mlen = nr - base_mrf; 2079 inst->eot = true; 2080 inst->header_present = header_present; 2081 } 2082 2083 this->current_annotation = NULL; 2084} 2085 2086void 2087fs_visitor::resolve_ud_negate(fs_reg *reg) 2088{ 2089 if (reg->type != BRW_REGISTER_TYPE_UD || 2090 !reg->negate) 2091 return; 2092 2093 fs_reg temp = fs_reg(this, glsl_type::uint_type); 2094 emit(BRW_OPCODE_MOV, temp, *reg); 2095 *reg = temp; 2096} 2097