brw_vec4_emit.cpp revision 1d4f3ca8f0442821c914b758b323e6e5124149a3
1/* Copyright © 2011 Intel Corporation 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a 4 * copy of this software and associated documentation files (the "Software"), 5 * to deal in the Software without restriction, including without limitation 6 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 * and/or sell copies of the Software, and to permit persons to whom the 8 * Software is furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice (including the next 11 * paragraph) shall be included in all copies or substantial portions of the 12 * Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 20 * IN THE SOFTWARE. 21 */ 22 23#include "brw_vec4.h" 24#include "glsl/ir_print_visitor.h" 25 26extern "C" { 27#include "brw_eu.h" 28}; 29 30using namespace brw; 31 32namespace brw { 33 34int 35vec4_visitor::setup_attributes(int payload_reg) 36{ 37 int nr_attributes; 38 int attribute_map[VERT_ATTRIB_MAX]; 39 40 nr_attributes = 0; 41 for (int i = 0; i < VERT_ATTRIB_MAX; i++) { 42 if (prog_data->inputs_read & BITFIELD64_BIT(i)) { 43 attribute_map[i] = payload_reg + nr_attributes; 44 nr_attributes++; 45 } 46 } 47 48 foreach_list(node, &this->instructions) { 49 vec4_instruction *inst = (vec4_instruction *)node; 50 51 /* We have to support ATTR as a destination for GL_FIXED fixup. */ 52 if (inst->dst.file == ATTR) { 53 int grf = attribute_map[inst->dst.reg + inst->dst.reg_offset]; 54 55 struct brw_reg reg = brw_vec8_grf(grf, 0); 56 reg.dw1.bits.writemask = inst->dst.writemask; 57 58 inst->dst.file = HW_REG; 59 inst->dst.fixed_hw_reg = reg; 60 } 61 62 for (int i = 0; i < 3; i++) { 63 if (inst->src[i].file != ATTR) 64 continue; 65 66 int grf = attribute_map[inst->src[i].reg + inst->src[i].reg_offset]; 67 68 struct brw_reg reg = brw_vec8_grf(grf, 0); 69 reg.dw1.bits.swizzle = inst->src[i].swizzle; 70 if (inst->src[i].abs) 71 reg = brw_abs(reg); 72 if (inst->src[i].negate) 73 reg = negate(reg); 74 75 inst->src[i].file = HW_REG; 76 inst->src[i].fixed_hw_reg = reg; 77 } 78 } 79 80 /* The BSpec says we always have to read at least one thing from 81 * the VF, and it appears that the hardware wedges otherwise. 82 */ 83 if (nr_attributes == 0) 84 nr_attributes = 1; 85 86 prog_data->urb_read_length = (nr_attributes + 1) / 2; 87 88 return payload_reg + nr_attributes; 89} 90 91int 92vec4_visitor::setup_uniforms(int reg) 93{ 94 /* The pre-gen6 VS requires that some push constants get loaded no 95 * matter what, or the GPU would hang. 96 */ 97 if (intel->gen < 6 && this->uniforms == 0) { 98 this->uniform_vector_size[this->uniforms] = 1; 99 100 for (unsigned int i = 0; i < 4; i++) { 101 unsigned int slot = this->uniforms * 4 + i; 102 static float zero = 0.0; 103 c->prog_data.param[slot] = &zero; 104 } 105 106 this->uniforms++; 107 reg++; 108 } else { 109 reg += ALIGN(uniforms, 2) / 2; 110 } 111 112 c->prog_data.nr_params = this->uniforms * 4; 113 114 c->prog_data.curb_read_length = reg - 1; 115 c->prog_data.uses_new_param_layout = true; 116 117 return reg; 118} 119 120void 121vec4_visitor::setup_payload(void) 122{ 123 int reg = 0; 124 125 /* The payload always contains important data in g0, which contains 126 * the URB handles that are passed on to the URB write at the end 127 * of the thread. So, we always start push constants at g1. 128 */ 129 reg++; 130 131 reg = setup_uniforms(reg); 132 133 reg = setup_attributes(reg); 134 135 this->first_non_payload_grf = reg; 136} 137 138struct brw_reg 139vec4_instruction::get_dst(void) 140{ 141 struct brw_reg brw_reg; 142 143 switch (dst.file) { 144 case GRF: 145 brw_reg = brw_vec8_grf(dst.reg + dst.reg_offset, 0); 146 brw_reg = retype(brw_reg, dst.type); 147 brw_reg.dw1.bits.writemask = dst.writemask; 148 break; 149 150 case MRF: 151 brw_reg = brw_message_reg(dst.reg + dst.reg_offset); 152 brw_reg = retype(brw_reg, dst.type); 153 brw_reg.dw1.bits.writemask = dst.writemask; 154 break; 155 156 case HW_REG: 157 brw_reg = dst.fixed_hw_reg; 158 break; 159 160 case BAD_FILE: 161 brw_reg = brw_null_reg(); 162 break; 163 164 default: 165 assert(!"not reached"); 166 brw_reg = brw_null_reg(); 167 break; 168 } 169 return brw_reg; 170} 171 172struct brw_reg 173vec4_instruction::get_src(int i) 174{ 175 struct brw_reg brw_reg; 176 177 switch (src[i].file) { 178 case GRF: 179 brw_reg = brw_vec8_grf(src[i].reg + src[i].reg_offset, 0); 180 brw_reg = retype(brw_reg, src[i].type); 181 brw_reg.dw1.bits.swizzle = src[i].swizzle; 182 if (src[i].abs) 183 brw_reg = brw_abs(brw_reg); 184 if (src[i].negate) 185 brw_reg = negate(brw_reg); 186 break; 187 188 case IMM: 189 switch (src[i].type) { 190 case BRW_REGISTER_TYPE_F: 191 brw_reg = brw_imm_f(src[i].imm.f); 192 break; 193 case BRW_REGISTER_TYPE_D: 194 brw_reg = brw_imm_d(src[i].imm.i); 195 break; 196 case BRW_REGISTER_TYPE_UD: 197 brw_reg = brw_imm_ud(src[i].imm.u); 198 break; 199 default: 200 assert(!"not reached"); 201 brw_reg = brw_null_reg(); 202 break; 203 } 204 break; 205 206 case UNIFORM: 207 brw_reg = stride(brw_vec4_grf(1 + (src[i].reg + src[i].reg_offset) / 2, 208 ((src[i].reg + src[i].reg_offset) % 2) * 4), 209 0, 4, 1); 210 brw_reg = retype(brw_reg, src[i].type); 211 brw_reg.dw1.bits.swizzle = src[i].swizzle; 212 if (src[i].abs) 213 brw_reg = brw_abs(brw_reg); 214 if (src[i].negate) 215 brw_reg = negate(brw_reg); 216 217 /* This should have been moved to pull constants. */ 218 assert(!src[i].reladdr); 219 break; 220 221 case HW_REG: 222 brw_reg = src[i].fixed_hw_reg; 223 break; 224 225 case BAD_FILE: 226 /* Probably unused. */ 227 brw_reg = brw_null_reg(); 228 break; 229 case ATTR: 230 default: 231 assert(!"not reached"); 232 brw_reg = brw_null_reg(); 233 break; 234 } 235 236 return brw_reg; 237} 238 239void 240vec4_visitor::generate_math1_gen4(vec4_instruction *inst, 241 struct brw_reg dst, 242 struct brw_reg src) 243{ 244 brw_math(p, 245 dst, 246 brw_math_function(inst->opcode), 247 BRW_MATH_SATURATE_NONE, 248 inst->base_mrf, 249 src, 250 BRW_MATH_DATA_VECTOR, 251 BRW_MATH_PRECISION_FULL); 252} 253 254static void 255check_gen6_math_src_arg(struct brw_reg src) 256{ 257 /* Source swizzles are ignored. */ 258 assert(!src.abs); 259 assert(!src.negate); 260 assert(src.dw1.bits.swizzle == BRW_SWIZZLE_XYZW); 261} 262 263void 264vec4_visitor::generate_math1_gen6(vec4_instruction *inst, 265 struct brw_reg dst, 266 struct brw_reg src) 267{ 268 /* Can't do writemask because math can't be align16. */ 269 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW); 270 check_gen6_math_src_arg(src); 271 272 brw_set_access_mode(p, BRW_ALIGN_1); 273 brw_math(p, 274 dst, 275 brw_math_function(inst->opcode), 276 BRW_MATH_SATURATE_NONE, 277 inst->base_mrf, 278 src, 279 BRW_MATH_DATA_SCALAR, 280 BRW_MATH_PRECISION_FULL); 281 brw_set_access_mode(p, BRW_ALIGN_16); 282} 283 284void 285vec4_visitor::generate_math2_gen6(vec4_instruction *inst, 286 struct brw_reg dst, 287 struct brw_reg src0, 288 struct brw_reg src1) 289{ 290 /* Can't do writemask because math can't be align16. */ 291 assert(dst.dw1.bits.writemask == WRITEMASK_XYZW); 292 /* Source swizzles are ignored. */ 293 check_gen6_math_src_arg(src0); 294 check_gen6_math_src_arg(src1); 295 296 brw_set_access_mode(p, BRW_ALIGN_1); 297 brw_math2(p, 298 dst, 299 brw_math_function(inst->opcode), 300 src0, src1); 301 brw_set_access_mode(p, BRW_ALIGN_16); 302} 303 304void 305vec4_visitor::generate_math2_gen4(vec4_instruction *inst, 306 struct brw_reg dst, 307 struct brw_reg src0, 308 struct brw_reg src1) 309{ 310 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), src1.type), src1); 311 312 brw_math(p, 313 dst, 314 brw_math_function(inst->opcode), 315 BRW_MATH_SATURATE_NONE, 316 inst->base_mrf, 317 src0, 318 BRW_MATH_DATA_VECTOR, 319 BRW_MATH_PRECISION_FULL); 320} 321 322void 323vec4_visitor::generate_urb_write(vec4_instruction *inst) 324{ 325 brw_urb_WRITE(p, 326 brw_null_reg(), /* dest */ 327 inst->base_mrf, /* starting mrf reg nr */ 328 brw_vec8_grf(0, 0), /* src */ 329 false, /* allocate */ 330 true, /* used */ 331 inst->mlen, 332 0, /* response len */ 333 inst->eot, /* eot */ 334 inst->eot, /* writes complete */ 335 inst->offset, /* urb destination offset */ 336 BRW_URB_SWIZZLE_INTERLEAVE); 337} 338 339void 340vec4_visitor::generate_oword_dual_block_offsets(struct brw_reg m1, 341 struct brw_reg index) 342{ 343 int second_vertex_offset; 344 345 if (intel->gen >= 6) 346 second_vertex_offset = 1; 347 else 348 second_vertex_offset = 16; 349 350 m1 = retype(m1, BRW_REGISTER_TYPE_D); 351 352 /* Set up M1 (message payload). Only the block offsets in M1.0 and 353 * M1.4 are used, and the rest are ignored. 354 */ 355 struct brw_reg m1_0 = suboffset(vec1(m1), 0); 356 struct brw_reg m1_4 = suboffset(vec1(m1), 4); 357 struct brw_reg index_0 = suboffset(vec1(index), 0); 358 struct brw_reg index_4 = suboffset(vec1(index), 4); 359 360 brw_push_insn_state(p); 361 brw_set_mask_control(p, BRW_MASK_DISABLE); 362 brw_set_access_mode(p, BRW_ALIGN_1); 363 364 brw_MOV(p, m1_0, index_0); 365 366 brw_set_predicate_inverse(p, true); 367 if (index.file == BRW_IMMEDIATE_VALUE) { 368 index_4.dw1.ud += second_vertex_offset; 369 brw_MOV(p, m1_4, index_4); 370 } else { 371 brw_ADD(p, m1_4, index_4, brw_imm_d(second_vertex_offset)); 372 } 373 374 brw_pop_insn_state(p); 375} 376 377void 378vec4_visitor::generate_scratch_read(vec4_instruction *inst, 379 struct brw_reg dst, 380 struct brw_reg index) 381{ 382 struct brw_reg header = brw_vec8_grf(0, 0); 383 384 gen6_resolve_implied_move(p, &header, inst->base_mrf); 385 386 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1), 387 index); 388 389 uint32_t msg_type; 390 391 if (intel->gen >= 6) 392 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 393 else if (intel->gen == 5 || intel->is_g4x) 394 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 395 else 396 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 397 398 /* Each of the 8 channel enables is considered for whether each 399 * dword is written. 400 */ 401 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); 402 brw_set_dest(p, send, dst); 403 brw_set_src0(p, send, header); 404 if (intel->gen < 6) 405 send->header.destreg__conditionalmod = inst->base_mrf; 406 brw_set_dp_read_message(p, send, 407 255, /* binding table index: stateless access */ 408 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD, 409 msg_type, 410 BRW_DATAPORT_READ_TARGET_RENDER_CACHE, 411 2, /* mlen */ 412 1 /* rlen */); 413} 414 415void 416vec4_visitor::generate_scratch_write(vec4_instruction *inst, 417 struct brw_reg dst, 418 struct brw_reg src, 419 struct brw_reg index) 420{ 421 struct brw_reg header = brw_vec8_grf(0, 0); 422 bool write_commit; 423 424 /* If the instruction is predicated, we'll predicate the send, not 425 * the header setup. 426 */ 427 brw_set_predicate_control(p, false); 428 429 gen6_resolve_implied_move(p, &header, inst->base_mrf); 430 431 generate_oword_dual_block_offsets(brw_message_reg(inst->base_mrf + 1), 432 index); 433 434 brw_MOV(p, 435 retype(brw_message_reg(inst->base_mrf + 2), BRW_REGISTER_TYPE_D), 436 retype(src, BRW_REGISTER_TYPE_D)); 437 438 uint32_t msg_type; 439 440 if (intel->gen >= 6) 441 msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; 442 else 443 msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE; 444 445 brw_set_predicate_control(p, inst->predicate); 446 447 /* Pre-gen6, we have to specify write commits to ensure ordering 448 * between reads and writes within a thread. Afterwards, that's 449 * guaranteed and write commits only matter for inter-thread 450 * synchronization. 451 */ 452 if (intel->gen >= 6) { 453 write_commit = false; 454 } else { 455 /* The visitor set up our destination register to be g0. This 456 * means that when the next read comes along, we will end up 457 * reading from g0 and causing a block on the write commit. For 458 * write-after-read, we are relying on the value of the previous 459 * read being used (and thus blocking on completion) before our 460 * write is executed. This means we have to be careful in 461 * instruction scheduling to not violate this assumption. 462 */ 463 write_commit = true; 464 } 465 466 /* Each of the 8 channel enables is considered for whether each 467 * dword is written. 468 */ 469 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); 470 brw_set_dest(p, send, dst); 471 brw_set_src0(p, send, header); 472 if (intel->gen < 6) 473 send->header.destreg__conditionalmod = inst->base_mrf; 474 brw_set_dp_write_message(p, send, 475 255, /* binding table index: stateless access */ 476 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD, 477 msg_type, 478 3, /* mlen */ 479 true, /* header present */ 480 false, /* pixel scoreboard */ 481 write_commit, /* rlen */ 482 false, /* eot */ 483 write_commit); 484} 485 486void 487vec4_visitor::generate_pull_constant_load(vec4_instruction *inst, 488 struct brw_reg dst, 489 struct brw_reg index) 490{ 491 struct brw_reg header = brw_vec8_grf(0, 0); 492 493 gen6_resolve_implied_move(p, &header, inst->base_mrf); 494 495 brw_MOV(p, retype(brw_message_reg(inst->base_mrf + 1), BRW_REGISTER_TYPE_D), 496 index); 497 498 uint32_t msg_type; 499 500 if (intel->gen >= 6) 501 msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 502 else if (intel->gen == 5 || intel->is_g4x) 503 msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 504 else 505 msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ; 506 507 /* Each of the 8 channel enables is considered for whether each 508 * dword is written. 509 */ 510 struct brw_instruction *send = brw_next_insn(p, BRW_OPCODE_SEND); 511 brw_set_dest(p, send, dst); 512 brw_set_src0(p, send, header); 513 if (intel->gen < 6) 514 send->header.destreg__conditionalmod = inst->base_mrf; 515 brw_set_dp_read_message(p, send, 516 SURF_INDEX_VERT_CONST_BUFFER, 517 BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD, 518 msg_type, 519 BRW_DATAPORT_READ_TARGET_DATA_CACHE, 520 2, /* mlen */ 521 1 /* rlen */); 522} 523 524void 525vec4_visitor::generate_vs_instruction(vec4_instruction *instruction, 526 struct brw_reg dst, 527 struct brw_reg *src) 528{ 529 vec4_instruction *inst = (vec4_instruction *)instruction; 530 531 switch (inst->opcode) { 532 case SHADER_OPCODE_RCP: 533 case SHADER_OPCODE_RSQ: 534 case SHADER_OPCODE_SQRT: 535 case SHADER_OPCODE_EXP2: 536 case SHADER_OPCODE_LOG2: 537 case SHADER_OPCODE_SIN: 538 case SHADER_OPCODE_COS: 539 if (intel->gen >= 6) { 540 generate_math1_gen6(inst, dst, src[0]); 541 } else { 542 generate_math1_gen4(inst, dst, src[0]); 543 } 544 break; 545 546 case SHADER_OPCODE_POW: 547 case SHADER_OPCODE_INT_QUOTIENT: 548 case SHADER_OPCODE_INT_REMAINDER: 549 if (intel->gen >= 6) { 550 generate_math2_gen6(inst, dst, src[0], src[1]); 551 } else { 552 generate_math2_gen4(inst, dst, src[0], src[1]); 553 } 554 break; 555 556 case VS_OPCODE_URB_WRITE: 557 generate_urb_write(inst); 558 break; 559 560 case VS_OPCODE_SCRATCH_READ: 561 generate_scratch_read(inst, dst, src[0]); 562 break; 563 564 case VS_OPCODE_SCRATCH_WRITE: 565 generate_scratch_write(inst, dst, src[0], src[1]); 566 break; 567 568 case VS_OPCODE_PULL_CONSTANT_LOAD: 569 generate_pull_constant_load(inst, dst, src[0]); 570 break; 571 572 default: 573 if (inst->opcode < (int)ARRAY_SIZE(brw_opcodes)) { 574 fail("unsupported opcode in `%s' in VS\n", 575 brw_opcodes[inst->opcode].name); 576 } else { 577 fail("Unsupported opcode %d in VS", inst->opcode); 578 } 579 } 580} 581 582bool 583vec4_visitor::run() 584{ 585 if (c->key.nr_userclip && !c->key.uses_clip_distance) 586 setup_uniform_clipplane_values(); 587 588 /* Generate VS IR for main(). (the visitor only descends into 589 * functions called "main"). 590 */ 591 visit_instructions(shader->ir); 592 593 emit_urb_writes(); 594 595 /* Before any optimization, push array accesses out to scratch 596 * space where we need them to be. This pass may allocate new 597 * virtual GRFs, so we want to do it early. It also makes sure 598 * that we have reladdr computations available for CSE, since we'll 599 * often do repeated subexpressions for those. 600 */ 601 move_grf_array_access_to_scratch(); 602 move_uniform_array_access_to_pull_constants(); 603 pack_uniform_registers(); 604 move_push_constants_to_pull_constants(); 605 606 bool progress; 607 do { 608 progress = false; 609 progress = dead_code_eliminate() || progress; 610 progress = opt_copy_propagation() || progress; 611 progress = opt_algebraic() || progress; 612 progress = opt_compute_to_mrf() || progress; 613 } while (progress); 614 615 616 if (failed) 617 return false; 618 619 setup_payload(); 620 reg_allocate(); 621 622 if (failed) 623 return false; 624 625 brw_set_access_mode(p, BRW_ALIGN_16); 626 627 generate_code(); 628 629 return !failed; 630} 631 632void 633vec4_visitor::generate_code() 634{ 635 int last_native_inst = 0; 636 const char *last_annotation_string = NULL; 637 ir_instruction *last_annotation_ir = NULL; 638 639 int loop_stack_array_size = 16; 640 int loop_stack_depth = 0; 641 brw_instruction **loop_stack = 642 rzalloc_array(this->mem_ctx, brw_instruction *, loop_stack_array_size); 643 int *if_depth_in_loop = 644 rzalloc_array(this->mem_ctx, int, loop_stack_array_size); 645 646 647 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 648 printf("Native code for vertex shader %d:\n", prog->Name); 649 } 650 651 foreach_list(node, &this->instructions) { 652 vec4_instruction *inst = (vec4_instruction *)node; 653 struct brw_reg src[3], dst; 654 655 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 656 if (last_annotation_ir != inst->ir) { 657 last_annotation_ir = inst->ir; 658 if (last_annotation_ir) { 659 printf(" "); 660 last_annotation_ir->print(); 661 printf("\n"); 662 } 663 } 664 if (last_annotation_string != inst->annotation) { 665 last_annotation_string = inst->annotation; 666 if (last_annotation_string) 667 printf(" %s\n", last_annotation_string); 668 } 669 } 670 671 for (unsigned int i = 0; i < 3; i++) { 672 src[i] = inst->get_src(i); 673 } 674 dst = inst->get_dst(); 675 676 brw_set_conditionalmod(p, inst->conditional_mod); 677 brw_set_predicate_control(p, inst->predicate); 678 brw_set_predicate_inverse(p, inst->predicate_inverse); 679 brw_set_saturate(p, inst->saturate); 680 681 switch (inst->opcode) { 682 case BRW_OPCODE_MOV: 683 brw_MOV(p, dst, src[0]); 684 break; 685 case BRW_OPCODE_ADD: 686 brw_ADD(p, dst, src[0], src[1]); 687 break; 688 case BRW_OPCODE_MUL: 689 brw_MUL(p, dst, src[0], src[1]); 690 break; 691 case BRW_OPCODE_MACH: 692 brw_set_acc_write_control(p, 1); 693 brw_MACH(p, dst, src[0], src[1]); 694 brw_set_acc_write_control(p, 0); 695 break; 696 697 case BRW_OPCODE_FRC: 698 brw_FRC(p, dst, src[0]); 699 break; 700 case BRW_OPCODE_RNDD: 701 brw_RNDD(p, dst, src[0]); 702 break; 703 case BRW_OPCODE_RNDE: 704 brw_RNDE(p, dst, src[0]); 705 break; 706 case BRW_OPCODE_RNDZ: 707 brw_RNDZ(p, dst, src[0]); 708 break; 709 710 case BRW_OPCODE_AND: 711 brw_AND(p, dst, src[0], src[1]); 712 break; 713 case BRW_OPCODE_OR: 714 brw_OR(p, dst, src[0], src[1]); 715 break; 716 case BRW_OPCODE_XOR: 717 brw_XOR(p, dst, src[0], src[1]); 718 break; 719 case BRW_OPCODE_NOT: 720 brw_NOT(p, dst, src[0]); 721 break; 722 case BRW_OPCODE_ASR: 723 brw_ASR(p, dst, src[0], src[1]); 724 break; 725 case BRW_OPCODE_SHR: 726 brw_SHR(p, dst, src[0], src[1]); 727 break; 728 case BRW_OPCODE_SHL: 729 brw_SHL(p, dst, src[0], src[1]); 730 break; 731 732 case BRW_OPCODE_CMP: 733 brw_CMP(p, dst, inst->conditional_mod, src[0], src[1]); 734 break; 735 case BRW_OPCODE_SEL: 736 brw_SEL(p, dst, src[0], src[1]); 737 break; 738 739 case BRW_OPCODE_DP4: 740 brw_DP4(p, dst, src[0], src[1]); 741 break; 742 743 case BRW_OPCODE_DP3: 744 brw_DP3(p, dst, src[0], src[1]); 745 break; 746 747 case BRW_OPCODE_DP2: 748 brw_DP2(p, dst, src[0], src[1]); 749 break; 750 751 case BRW_OPCODE_IF: 752 if (inst->src[0].file != BAD_FILE) { 753 /* The instruction has an embedded compare (only allowed on gen6) */ 754 assert(intel->gen == 6); 755 gen6_IF(p, inst->conditional_mod, src[0], src[1]); 756 } else { 757 struct brw_instruction *brw_inst = brw_IF(p, BRW_EXECUTE_8); 758 brw_inst->header.predicate_control = inst->predicate; 759 } 760 if_depth_in_loop[loop_stack_depth]++; 761 break; 762 763 case BRW_OPCODE_ELSE: 764 brw_ELSE(p); 765 break; 766 case BRW_OPCODE_ENDIF: 767 brw_ENDIF(p); 768 if_depth_in_loop[loop_stack_depth]--; 769 break; 770 771 case BRW_OPCODE_DO: 772 loop_stack[loop_stack_depth++] = brw_DO(p, BRW_EXECUTE_8); 773 if (loop_stack_array_size <= loop_stack_depth) { 774 loop_stack_array_size *= 2; 775 loop_stack = reralloc(this->mem_ctx, loop_stack, brw_instruction *, 776 loop_stack_array_size); 777 if_depth_in_loop = reralloc(this->mem_ctx, if_depth_in_loop, int, 778 loop_stack_array_size); 779 } 780 if_depth_in_loop[loop_stack_depth] = 0; 781 break; 782 783 case BRW_OPCODE_BREAK: 784 brw_BREAK(p, if_depth_in_loop[loop_stack_depth]); 785 brw_set_predicate_control(p, BRW_PREDICATE_NONE); 786 break; 787 case BRW_OPCODE_CONTINUE: 788 /* FINISHME: We need to write the loop instruction support still. */ 789 if (intel->gen >= 6) 790 gen6_CONT(p, loop_stack[loop_stack_depth - 1]); 791 else 792 brw_CONT(p, if_depth_in_loop[loop_stack_depth]); 793 brw_set_predicate_control(p, BRW_PREDICATE_NONE); 794 break; 795 796 case BRW_OPCODE_WHILE: { 797 struct brw_instruction *inst0, *inst1; 798 GLuint br = 1; 799 800 if (intel->gen >= 5) 801 br = 2; 802 803 assert(loop_stack_depth > 0); 804 loop_stack_depth--; 805 inst0 = inst1 = brw_WHILE(p, loop_stack[loop_stack_depth]); 806 if (intel->gen < 6) { 807 /* patch all the BREAK/CONT instructions from last BGNLOOP */ 808 while (inst0 > loop_stack[loop_stack_depth]) { 809 inst0--; 810 if (inst0->header.opcode == BRW_OPCODE_BREAK && 811 inst0->bits3.if_else.jump_count == 0) { 812 inst0->bits3.if_else.jump_count = br * (inst1 - inst0 + 1); 813 } 814 else if (inst0->header.opcode == BRW_OPCODE_CONTINUE && 815 inst0->bits3.if_else.jump_count == 0) { 816 inst0->bits3.if_else.jump_count = br * (inst1 - inst0); 817 } 818 } 819 } 820 } 821 break; 822 823 default: 824 generate_vs_instruction(inst, dst, src); 825 break; 826 } 827 828 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 829 for (unsigned int i = last_native_inst; i < p->nr_insn; i++) { 830 if (0) { 831 printf("0x%08x 0x%08x 0x%08x 0x%08x ", 832 ((uint32_t *)&p->store[i])[3], 833 ((uint32_t *)&p->store[i])[2], 834 ((uint32_t *)&p->store[i])[1], 835 ((uint32_t *)&p->store[i])[0]); 836 } 837 brw_disasm(stdout, &p->store[i], intel->gen); 838 } 839 } 840 841 last_native_inst = p->nr_insn; 842 } 843 844 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 845 printf("\n"); 846 } 847 848 ralloc_free(loop_stack); 849 ralloc_free(if_depth_in_loop); 850 851 brw_set_uip_jip(p); 852 853 /* OK, while the INTEL_DEBUG=vs above is very nice for debugging VS 854 * emit issues, it doesn't get the jump distances into the output, 855 * which is often something we want to debug. So this is here in 856 * case you're doing that. 857 */ 858 if (0) { 859 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 860 for (unsigned int i = 0; i < p->nr_insn; i++) { 861 printf("0x%08x 0x%08x 0x%08x 0x%08x ", 862 ((uint32_t *)&p->store[i])[3], 863 ((uint32_t *)&p->store[i])[2], 864 ((uint32_t *)&p->store[i])[1], 865 ((uint32_t *)&p->store[i])[0]); 866 brw_disasm(stdout, &p->store[i], intel->gen); 867 } 868 } 869 } 870} 871 872extern "C" { 873 874bool 875brw_vs_emit(struct gl_shader_program *prog, struct brw_vs_compile *c) 876{ 877 if (!prog) 878 return false; 879 880 struct brw_shader *shader = 881 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_VERTEX]; 882 if (!shader) 883 return false; 884 885 if (unlikely(INTEL_DEBUG & DEBUG_VS)) { 886 printf("GLSL IR for native vertex shader %d:\n", prog->Name); 887 _mesa_print_ir(shader->ir, NULL); 888 printf("\n\n"); 889 } 890 891 vec4_visitor v(c, prog, shader); 892 if (!v.run()) { 893 prog->LinkStatus = GL_FALSE; 894 ralloc_strcat(&prog->InfoLog, v.fail_msg); 895 return false; 896 } 897 898 return true; 899} 900 901} /* extern "C" */ 902 903} /* namespace brw */ 904