brw_vec4_visitor.cpp revision 29e2bc8b13be0f7ec48f8514e47322353e041365
1/* 2 * Copyright © 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "brw_vec4.h" 25extern "C" { 26#include "main/macros.h" 27#include "program/prog_parameter.h" 28} 29 30namespace brw { 31 32src_reg::src_reg(dst_reg reg) 33{ 34 init(); 35 36 this->file = reg.file; 37 this->reg = reg.reg; 38 this->reg_offset = reg.reg_offset; 39 this->type = reg.type; 40 this->reladdr = reg.reladdr; 41 this->fixed_hw_reg = reg.fixed_hw_reg; 42 43 int swizzles[4]; 44 int next_chan = 0; 45 int last = 0; 46 47 for (int i = 0; i < 4; i++) { 48 if (!(reg.writemask & (1 << i))) 49 continue; 50 51 swizzles[next_chan++] = last = i; 52 } 53 54 for (; next_chan < 4; next_chan++) { 55 swizzles[next_chan] = last; 56 } 57 58 this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 59 swizzles[2], swizzles[3]); 60} 61 62dst_reg::dst_reg(src_reg reg) 63{ 64 init(); 65 66 this->file = reg.file; 67 this->reg = reg.reg; 68 this->reg_offset = reg.reg_offset; 69 this->type = reg.type; 70 this->writemask = WRITEMASK_XYZW; 71 this->reladdr = reg.reladdr; 72 this->fixed_hw_reg = reg.fixed_hw_reg; 73} 74 75vec4_instruction::vec4_instruction(vec4_visitor *v, 76 enum opcode opcode, dst_reg dst, 77 src_reg src0, src_reg src1, src_reg src2) 78{ 79 this->opcode = opcode; 80 this->dst = dst; 81 this->src[0] = src0; 82 this->src[1] = src1; 83 this->src[2] = src2; 84 this->ir = v->base_ir; 85 this->annotation = v->current_annotation; 86} 87 88vec4_instruction * 89vec4_visitor::emit(vec4_instruction *inst) 90{ 91 this->instructions.push_tail(inst); 92 93 return inst; 94} 95 96vec4_instruction * 97vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst) 98{ 99 new_inst->ir = inst->ir; 100 new_inst->annotation = inst->annotation; 101 102 inst->insert_before(new_inst); 103 104 return inst; 105} 106 107vec4_instruction * 108vec4_visitor::emit(enum opcode opcode, dst_reg dst, 109 src_reg src0, src_reg src1, src_reg src2) 110{ 111 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, 112 src0, src1, src2)); 113} 114 115 116vec4_instruction * 117vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1) 118{ 119 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1)); 120} 121 122vec4_instruction * 123vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0) 124{ 125 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0)); 126} 127 128vec4_instruction * 129vec4_visitor::emit(enum opcode opcode) 130{ 131 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg())); 132} 133 134#define ALU1(op) \ 135 vec4_instruction * \ 136 vec4_visitor::op(dst_reg dst, src_reg src0) \ 137 { \ 138 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 139 src0); \ 140 } 141 142#define ALU2(op) \ 143 vec4_instruction * \ 144 vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1) \ 145 { \ 146 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 147 src0, src1); \ 148 } 149 150ALU1(NOT) 151ALU1(MOV) 152ALU1(FRC) 153ALU1(RNDD) 154ALU1(RNDE) 155ALU1(RNDZ) 156ALU2(ADD) 157ALU2(MUL) 158ALU2(MACH) 159ALU2(AND) 160ALU2(OR) 161ALU2(XOR) 162ALU2(DP3) 163ALU2(DP4) 164 165/** Gen4 predicated IF. */ 166vec4_instruction * 167vec4_visitor::IF(uint32_t predicate) 168{ 169 vec4_instruction *inst; 170 171 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF); 172 inst->predicate = predicate; 173 174 return inst; 175} 176 177/** Gen6+ IF with embedded comparison. */ 178vec4_instruction * 179vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition) 180{ 181 assert(intel->gen >= 6); 182 183 vec4_instruction *inst; 184 185 resolve_ud_negate(&src0); 186 resolve_ud_negate(&src1); 187 188 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(), 189 src0, src1); 190 inst->conditional_mod = condition; 191 192 return inst; 193} 194 195/** 196 * CMP: Sets the low bit of the destination channels with the result 197 * of the comparison, while the upper bits are undefined, and updates 198 * the flag register with the packed 16 bits of the result. 199 */ 200vec4_instruction * 201vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition) 202{ 203 vec4_instruction *inst; 204 205 /* original gen4 does type conversion to the destination type 206 * before before comparison, producing garbage results for floating 207 * point comparisons. 208 */ 209 if (intel->gen == 4) { 210 dst.type = src0.type; 211 if (dst.file == HW_REG) 212 dst.fixed_hw_reg.type = dst.type; 213 } 214 215 resolve_ud_negate(&src0); 216 resolve_ud_negate(&src1); 217 218 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1); 219 inst->conditional_mod = condition; 220 221 return inst; 222} 223 224vec4_instruction * 225vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index) 226{ 227 vec4_instruction *inst; 228 229 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ, 230 dst, index); 231 inst->base_mrf = 14; 232 inst->mlen = 1; 233 234 return inst; 235} 236 237vec4_instruction * 238vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index) 239{ 240 vec4_instruction *inst; 241 242 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE, 243 dst, src, index); 244 inst->base_mrf = 13; 245 inst->mlen = 2; 246 247 return inst; 248} 249 250void 251vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements) 252{ 253 static enum opcode dot_opcodes[] = { 254 BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4 255 }; 256 257 emit(dot_opcodes[elements - 2], dst, src0, src1); 258} 259 260void 261vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src) 262{ 263 /* The gen6 math instruction ignores the source modifiers -- 264 * swizzle, abs, negate, and at least some parts of the register 265 * region description. 266 * 267 * While it would seem that this MOV could be avoided at this point 268 * in the case that the swizzle is matched up with the destination 269 * writemask, note that uniform packing and register allocation 270 * could rearrange our swizzle, so let's leave this matter up to 271 * copy propagation later. 272 */ 273 src_reg temp_src = src_reg(this, glsl_type::vec4_type); 274 emit(MOV(dst_reg(temp_src), src)); 275 276 if (dst.writemask != WRITEMASK_XYZW) { 277 /* The gen6 math instruction must be align1, so we can't do 278 * writemasks. 279 */ 280 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 281 282 emit(opcode, temp_dst, temp_src); 283 284 emit(MOV(dst, src_reg(temp_dst))); 285 } else { 286 emit(opcode, dst, temp_src); 287 } 288} 289 290void 291vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src) 292{ 293 vec4_instruction *inst = emit(opcode, dst, src); 294 inst->base_mrf = 1; 295 inst->mlen = 1; 296} 297 298void 299vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src) 300{ 301 switch (opcode) { 302 case SHADER_OPCODE_RCP: 303 case SHADER_OPCODE_RSQ: 304 case SHADER_OPCODE_SQRT: 305 case SHADER_OPCODE_EXP2: 306 case SHADER_OPCODE_LOG2: 307 case SHADER_OPCODE_SIN: 308 case SHADER_OPCODE_COS: 309 break; 310 default: 311 assert(!"not reached: bad math opcode"); 312 return; 313 } 314 315 if (intel->gen >= 6) { 316 return emit_math1_gen6(opcode, dst, src); 317 } else { 318 return emit_math1_gen4(opcode, dst, src); 319 } 320} 321 322void 323vec4_visitor::emit_math2_gen6(enum opcode opcode, 324 dst_reg dst, src_reg src0, src_reg src1) 325{ 326 src_reg expanded; 327 328 /* The gen6 math instruction ignores the source modifiers -- 329 * swizzle, abs, negate, and at least some parts of the register 330 * region description. Move the sources to temporaries to make it 331 * generally work. 332 */ 333 334 expanded = src_reg(this, glsl_type::vec4_type); 335 expanded.type = src0.type; 336 emit(MOV(dst_reg(expanded), src0)); 337 src0 = expanded; 338 339 expanded = src_reg(this, glsl_type::vec4_type); 340 expanded.type = src1.type; 341 emit(MOV(dst_reg(expanded), src1)); 342 src1 = expanded; 343 344 if (dst.writemask != WRITEMASK_XYZW) { 345 /* The gen6 math instruction must be align1, so we can't do 346 * writemasks. 347 */ 348 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 349 temp_dst.type = dst.type; 350 351 emit(opcode, temp_dst, src0, src1); 352 353 emit(MOV(dst, src_reg(temp_dst))); 354 } else { 355 emit(opcode, dst, src0, src1); 356 } 357} 358 359void 360vec4_visitor::emit_math2_gen4(enum opcode opcode, 361 dst_reg dst, src_reg src0, src_reg src1) 362{ 363 vec4_instruction *inst = emit(opcode, dst, src0, src1); 364 inst->base_mrf = 1; 365 inst->mlen = 2; 366} 367 368void 369vec4_visitor::emit_math(enum opcode opcode, 370 dst_reg dst, src_reg src0, src_reg src1) 371{ 372 switch (opcode) { 373 case SHADER_OPCODE_POW: 374 case SHADER_OPCODE_INT_QUOTIENT: 375 case SHADER_OPCODE_INT_REMAINDER: 376 break; 377 default: 378 assert(!"not reached: unsupported binary math opcode"); 379 return; 380 } 381 382 if (intel->gen >= 6) { 383 return emit_math2_gen6(opcode, dst, src0, src1); 384 } else { 385 return emit_math2_gen4(opcode, dst, src0, src1); 386 } 387} 388 389void 390vec4_visitor::visit_instructions(const exec_list *list) 391{ 392 foreach_list(node, list) { 393 ir_instruction *ir = (ir_instruction *)node; 394 395 base_ir = ir; 396 ir->accept(this); 397 } 398} 399 400 401static int 402type_size(const struct glsl_type *type) 403{ 404 unsigned int i; 405 int size; 406 407 switch (type->base_type) { 408 case GLSL_TYPE_UINT: 409 case GLSL_TYPE_INT: 410 case GLSL_TYPE_FLOAT: 411 case GLSL_TYPE_BOOL: 412 if (type->is_matrix()) { 413 return type->matrix_columns; 414 } else { 415 /* Regardless of size of vector, it gets a vec4. This is bad 416 * packing for things like floats, but otherwise arrays become a 417 * mess. Hopefully a later pass over the code can pack scalars 418 * down if appropriate. 419 */ 420 return 1; 421 } 422 case GLSL_TYPE_ARRAY: 423 assert(type->length > 0); 424 return type_size(type->fields.array) * type->length; 425 case GLSL_TYPE_STRUCT: 426 size = 0; 427 for (i = 0; i < type->length; i++) { 428 size += type_size(type->fields.structure[i].type); 429 } 430 return size; 431 case GLSL_TYPE_SAMPLER: 432 /* Samplers take up one slot in UNIFORMS[], but they're baked in 433 * at link time. 434 */ 435 return 1; 436 default: 437 assert(0); 438 return 0; 439 } 440} 441 442int 443vec4_visitor::virtual_grf_alloc(int size) 444{ 445 if (virtual_grf_array_size <= virtual_grf_count) { 446 if (virtual_grf_array_size == 0) 447 virtual_grf_array_size = 16; 448 else 449 virtual_grf_array_size *= 2; 450 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int, 451 virtual_grf_array_size); 452 virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int, 453 virtual_grf_array_size); 454 } 455 virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count; 456 virtual_grf_reg_count += size; 457 virtual_grf_sizes[virtual_grf_count] = size; 458 return virtual_grf_count++; 459} 460 461src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type) 462{ 463 init(); 464 465 this->file = GRF; 466 this->reg = v->virtual_grf_alloc(type_size(type)); 467 468 if (type->is_array() || type->is_record()) { 469 this->swizzle = BRW_SWIZZLE_NOOP; 470 } else { 471 this->swizzle = swizzle_for_size(type->vector_elements); 472 } 473 474 this->type = brw_type_for_base_type(type); 475} 476 477dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type) 478{ 479 init(); 480 481 this->file = GRF; 482 this->reg = v->virtual_grf_alloc(type_size(type)); 483 484 if (type->is_array() || type->is_record()) { 485 this->writemask = WRITEMASK_XYZW; 486 } else { 487 this->writemask = (1 << type->vector_elements) - 1; 488 } 489 490 this->type = brw_type_for_base_type(type); 491} 492 493/* Our support for uniforms is piggy-backed on the struct 494 * gl_fragment_program, because that's where the values actually 495 * get stored, rather than in some global gl_shader_program uniform 496 * store. 497 */ 498int 499vec4_visitor::setup_uniform_values(int loc, const glsl_type *type) 500{ 501 unsigned int offset = 0; 502 float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f; 503 504 if (type->is_matrix()) { 505 const glsl_type *column = type->column_type(); 506 507 for (unsigned int i = 0; i < type->matrix_columns; i++) { 508 offset += setup_uniform_values(loc + offset, column); 509 } 510 511 return offset; 512 } 513 514 switch (type->base_type) { 515 case GLSL_TYPE_FLOAT: 516 case GLSL_TYPE_UINT: 517 case GLSL_TYPE_INT: 518 case GLSL_TYPE_BOOL: 519 for (unsigned int i = 0; i < type->vector_elements; i++) { 520 c->prog_data.param[this->uniforms * 4 + i] = &values[i]; 521 } 522 523 /* Set up pad elements to get things aligned to a vec4 boundary. */ 524 for (unsigned int i = type->vector_elements; i < 4; i++) { 525 static float zero = 0; 526 527 c->prog_data.param[this->uniforms * 4 + i] = &zero; 528 } 529 530 /* Track the size of this uniform vector, for future packing of 531 * uniforms. 532 */ 533 this->uniform_vector_size[this->uniforms] = type->vector_elements; 534 this->uniforms++; 535 536 return 1; 537 538 case GLSL_TYPE_STRUCT: 539 for (unsigned int i = 0; i < type->length; i++) { 540 offset += setup_uniform_values(loc + offset, 541 type->fields.structure[i].type); 542 } 543 return offset; 544 545 case GLSL_TYPE_ARRAY: 546 for (unsigned int i = 0; i < type->length; i++) { 547 offset += setup_uniform_values(loc + offset, type->fields.array); 548 } 549 return offset; 550 551 case GLSL_TYPE_SAMPLER: 552 /* The sampler takes up a slot, but we don't use any values from it. */ 553 return 1; 554 555 default: 556 assert(!"not reached"); 557 return 0; 558 } 559} 560 561void 562vec4_visitor::setup_uniform_clipplane_values() 563{ 564 gl_clip_plane *clip_planes = brw_select_clip_planes(ctx); 565 566 /* Pre-Gen6, we compact clip planes. For example, if the user 567 * enables just clip planes 0, 1, and 3, we will enable clip planes 568 * 0, 1, and 2 in the hardware, and we'll move clip plane 3 to clip 569 * plane 2. This simplifies the implementation of the Gen6 clip 570 * thread. 571 * 572 * In Gen6 and later, we don't compact clip planes, because this 573 * simplifies the implementation of gl_ClipDistance. 574 */ 575 int compacted_clipplane_index = 0; 576 for (int i = 0; i < c->key.nr_userclip_plane_consts; ++i) { 577 if (intel->gen < 6 && 578 !(c->key.userclip_planes_enabled_gen_4_5 & (1 << i))) { 579 continue; 580 } 581 this->uniform_vector_size[this->uniforms] = 4; 582 this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms); 583 this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F; 584 for (int j = 0; j < 4; ++j) { 585 c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j]; 586 } 587 ++compacted_clipplane_index; 588 ++this->uniforms; 589 } 590} 591 592/* Our support for builtin uniforms is even scarier than non-builtin. 593 * It sits on top of the PROG_STATE_VAR parameters that are 594 * automatically updated from GL context state. 595 */ 596void 597vec4_visitor::setup_builtin_uniform_values(ir_variable *ir) 598{ 599 const ir_state_slot *const slots = ir->state_slots; 600 assert(ir->state_slots != NULL); 601 602 for (unsigned int i = 0; i < ir->num_state_slots; i++) { 603 /* This state reference has already been setup by ir_to_mesa, 604 * but we'll get the same index back here. We can reference 605 * ParameterValues directly, since unlike brw_fs.cpp, we never 606 * add new state references during compile. 607 */ 608 int index = _mesa_add_state_reference(this->vp->Base.Parameters, 609 (gl_state_index *)slots[i].tokens); 610 float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f; 611 612 this->uniform_vector_size[this->uniforms] = 0; 613 /* Add each of the unique swizzled channels of the element. 614 * This will end up matching the size of the glsl_type of this field. 615 */ 616 int last_swiz = -1; 617 for (unsigned int j = 0; j < 4; j++) { 618 int swiz = GET_SWZ(slots[i].swizzle, j); 619 last_swiz = swiz; 620 621 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz]; 622 if (swiz <= last_swiz) 623 this->uniform_vector_size[this->uniforms]++; 624 } 625 this->uniforms++; 626 } 627} 628 629dst_reg * 630vec4_visitor::variable_storage(ir_variable *var) 631{ 632 return (dst_reg *)hash_table_find(this->variable_ht, var); 633} 634 635void 636vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) 637{ 638 ir_expression *expr = ir->as_expression(); 639 640 *predicate = BRW_PREDICATE_NORMAL; 641 642 if (expr) { 643 src_reg op[2]; 644 vec4_instruction *inst; 645 646 assert(expr->get_num_operands() <= 2); 647 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 648 expr->operands[i]->accept(this); 649 op[i] = this->result; 650 651 resolve_ud_negate(&op[i]); 652 } 653 654 switch (expr->operation) { 655 case ir_unop_logic_not: 656 inst = emit(AND(dst_null_d(), op[0], src_reg(1))); 657 inst->conditional_mod = BRW_CONDITIONAL_Z; 658 break; 659 660 case ir_binop_logic_xor: 661 inst = emit(XOR(dst_null_d(), op[0], op[1])); 662 inst->conditional_mod = BRW_CONDITIONAL_NZ; 663 break; 664 665 case ir_binop_logic_or: 666 inst = emit(OR(dst_null_d(), op[0], op[1])); 667 inst->conditional_mod = BRW_CONDITIONAL_NZ; 668 break; 669 670 case ir_binop_logic_and: 671 inst = emit(AND(dst_null_d(), op[0], op[1])); 672 inst->conditional_mod = BRW_CONDITIONAL_NZ; 673 break; 674 675 case ir_unop_f2b: 676 if (intel->gen >= 6) { 677 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 678 } else { 679 inst = emit(MOV(dst_null_f(), op[0])); 680 inst->conditional_mod = BRW_CONDITIONAL_NZ; 681 } 682 break; 683 684 case ir_unop_i2b: 685 if (intel->gen >= 6) { 686 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 687 } else { 688 inst = emit(MOV(dst_null_d(), op[0])); 689 inst->conditional_mod = BRW_CONDITIONAL_NZ; 690 } 691 break; 692 693 case ir_binop_all_equal: 694 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 695 *predicate = BRW_PREDICATE_ALIGN16_ALL4H; 696 break; 697 698 case ir_binop_any_nequal: 699 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 700 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 701 break; 702 703 case ir_unop_any: 704 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 705 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 706 break; 707 708 case ir_binop_greater: 709 case ir_binop_gequal: 710 case ir_binop_less: 711 case ir_binop_lequal: 712 case ir_binop_equal: 713 case ir_binop_nequal: 714 emit(CMP(dst_null_d(), op[0], op[1], 715 brw_conditional_for_comparison(expr->operation))); 716 break; 717 718 default: 719 assert(!"not reached"); 720 break; 721 } 722 return; 723 } 724 725 ir->accept(this); 726 727 resolve_ud_negate(&this->result); 728 729 if (intel->gen >= 6) { 730 vec4_instruction *inst = emit(AND(dst_null_d(), 731 this->result, src_reg(1))); 732 inst->conditional_mod = BRW_CONDITIONAL_NZ; 733 } else { 734 vec4_instruction *inst = emit(MOV(dst_null_d(), this->result)); 735 inst->conditional_mod = BRW_CONDITIONAL_NZ; 736 } 737} 738 739/** 740 * Emit a gen6 IF statement with the comparison folded into the IF 741 * instruction. 742 */ 743void 744vec4_visitor::emit_if_gen6(ir_if *ir) 745{ 746 ir_expression *expr = ir->condition->as_expression(); 747 748 if (expr) { 749 src_reg op[2]; 750 dst_reg temp; 751 752 assert(expr->get_num_operands() <= 2); 753 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 754 expr->operands[i]->accept(this); 755 op[i] = this->result; 756 } 757 758 switch (expr->operation) { 759 case ir_unop_logic_not: 760 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z)); 761 return; 762 763 case ir_binop_logic_xor: 764 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ)); 765 return; 766 767 case ir_binop_logic_or: 768 temp = dst_reg(this, glsl_type::bool_type); 769 emit(OR(temp, op[0], op[1])); 770 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 771 return; 772 773 case ir_binop_logic_and: 774 temp = dst_reg(this, glsl_type::bool_type); 775 emit(AND(temp, op[0], op[1])); 776 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 777 return; 778 779 case ir_unop_f2b: 780 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 781 return; 782 783 case ir_unop_i2b: 784 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 785 return; 786 787 case ir_binop_greater: 788 case ir_binop_gequal: 789 case ir_binop_less: 790 case ir_binop_lequal: 791 case ir_binop_equal: 792 case ir_binop_nequal: 793 emit(IF(op[0], op[1], 794 brw_conditional_for_comparison(expr->operation))); 795 return; 796 797 case ir_binop_all_equal: 798 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 799 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H)); 800 return; 801 802 case ir_binop_any_nequal: 803 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 804 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 805 return; 806 807 case ir_unop_any: 808 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 809 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 810 return; 811 812 default: 813 assert(!"not reached"); 814 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 815 return; 816 } 817 return; 818 } 819 820 ir->condition->accept(this); 821 822 emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ)); 823} 824 825void 826vec4_visitor::visit(ir_variable *ir) 827{ 828 dst_reg *reg = NULL; 829 830 if (variable_storage(ir)) 831 return; 832 833 switch (ir->mode) { 834 case ir_var_in: 835 reg = new(mem_ctx) dst_reg(ATTR, ir->location); 836 837 /* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED attributes 838 * come in as floating point conversions of the integer values. 839 */ 840 for (int i = ir->location; i < ir->location + type_size(ir->type); i++) { 841 if (!c->key.gl_fixed_input_size[i]) 842 continue; 843 844 dst_reg dst = *reg; 845 dst.type = brw_type_for_base_type(ir->type); 846 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1; 847 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f))); 848 } 849 break; 850 851 case ir_var_out: 852 reg = new(mem_ctx) dst_reg(this, ir->type); 853 854 for (int i = 0; i < type_size(ir->type); i++) { 855 output_reg[ir->location + i] = *reg; 856 output_reg[ir->location + i].reg_offset = i; 857 output_reg[ir->location + i].type = 858 brw_type_for_base_type(ir->type->get_scalar_type()); 859 output_reg_annotation[ir->location + i] = ir->name; 860 } 861 break; 862 863 case ir_var_auto: 864 case ir_var_temporary: 865 reg = new(mem_ctx) dst_reg(this, ir->type); 866 break; 867 868 case ir_var_uniform: 869 reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms); 870 871 /* Track how big the whole uniform variable is, in case we need to put a 872 * copy of its data into pull constants for array access. 873 */ 874 this->uniform_size[this->uniforms] = type_size(ir->type); 875 876 if (!strncmp(ir->name, "gl_", 3)) { 877 setup_builtin_uniform_values(ir); 878 } else { 879 setup_uniform_values(ir->location, ir->type); 880 } 881 break; 882 883 default: 884 assert(!"not reached"); 885 } 886 887 reg->type = brw_type_for_base_type(ir->type); 888 hash_table_insert(this->variable_ht, reg, ir); 889} 890 891void 892vec4_visitor::visit(ir_loop *ir) 893{ 894 dst_reg counter; 895 896 /* We don't want debugging output to print the whole body of the 897 * loop as the annotation. 898 */ 899 this->base_ir = NULL; 900 901 if (ir->counter != NULL) { 902 this->base_ir = ir->counter; 903 ir->counter->accept(this); 904 counter = *(variable_storage(ir->counter)); 905 906 if (ir->from != NULL) { 907 this->base_ir = ir->from; 908 ir->from->accept(this); 909 910 emit(MOV(counter, this->result)); 911 } 912 } 913 914 emit(BRW_OPCODE_DO); 915 916 if (ir->to) { 917 this->base_ir = ir->to; 918 ir->to->accept(this); 919 920 emit(CMP(dst_null_d(), src_reg(counter), this->result, 921 brw_conditional_for_comparison(ir->cmp))); 922 923 vec4_instruction *inst = emit(BRW_OPCODE_BREAK); 924 inst->predicate = BRW_PREDICATE_NORMAL; 925 } 926 927 visit_instructions(&ir->body_instructions); 928 929 930 if (ir->increment) { 931 this->base_ir = ir->increment; 932 ir->increment->accept(this); 933 emit(ADD(counter, src_reg(counter), this->result)); 934 } 935 936 emit(BRW_OPCODE_WHILE); 937} 938 939void 940vec4_visitor::visit(ir_loop_jump *ir) 941{ 942 switch (ir->mode) { 943 case ir_loop_jump::jump_break: 944 emit(BRW_OPCODE_BREAK); 945 break; 946 case ir_loop_jump::jump_continue: 947 emit(BRW_OPCODE_CONTINUE); 948 break; 949 } 950} 951 952 953void 954vec4_visitor::visit(ir_function_signature *ir) 955{ 956 assert(0); 957 (void)ir; 958} 959 960void 961vec4_visitor::visit(ir_function *ir) 962{ 963 /* Ignore function bodies other than main() -- we shouldn't see calls to 964 * them since they should all be inlined. 965 */ 966 if (strcmp(ir->name, "main") == 0) { 967 const ir_function_signature *sig; 968 exec_list empty; 969 970 sig = ir->matching_signature(&empty); 971 972 assert(sig); 973 974 visit_instructions(&sig->body); 975 } 976} 977 978bool 979vec4_visitor::try_emit_sat(ir_expression *ir) 980{ 981 ir_rvalue *sat_src = ir->as_rvalue_to_saturate(); 982 if (!sat_src) 983 return false; 984 985 sat_src->accept(this); 986 src_reg src = this->result; 987 988 this->result = src_reg(this, ir->type); 989 vec4_instruction *inst; 990 inst = emit(MOV(dst_reg(this->result), src)); 991 inst->saturate = true; 992 993 return true; 994} 995 996void 997vec4_visitor::emit_bool_comparison(unsigned int op, 998 dst_reg dst, src_reg src0, src_reg src1) 999{ 1000 /* original gen4 does destination conversion before comparison. */ 1001 if (intel->gen < 5) 1002 dst.type = src0.type; 1003 1004 emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op))); 1005 1006 dst.type = BRW_REGISTER_TYPE_D; 1007 emit(AND(dst, src_reg(dst), src_reg(0x1))); 1008} 1009 1010void 1011vec4_visitor::visit(ir_expression *ir) 1012{ 1013 unsigned int operand; 1014 src_reg op[Elements(ir->operands)]; 1015 src_reg result_src; 1016 dst_reg result_dst; 1017 vec4_instruction *inst; 1018 1019 if (try_emit_sat(ir)) 1020 return; 1021 1022 for (operand = 0; operand < ir->get_num_operands(); operand++) { 1023 this->result.file = BAD_FILE; 1024 ir->operands[operand]->accept(this); 1025 if (this->result.file == BAD_FILE) { 1026 printf("Failed to get tree for expression operand:\n"); 1027 ir->operands[operand]->print(); 1028 exit(1); 1029 } 1030 op[operand] = this->result; 1031 1032 /* Matrix expression operands should have been broken down to vector 1033 * operations already. 1034 */ 1035 assert(!ir->operands[operand]->type->is_matrix()); 1036 } 1037 1038 int vector_elements = ir->operands[0]->type->vector_elements; 1039 if (ir->operands[1]) { 1040 vector_elements = MAX2(vector_elements, 1041 ir->operands[1]->type->vector_elements); 1042 } 1043 1044 this->result.file = BAD_FILE; 1045 1046 /* Storage for our result. Ideally for an assignment we'd be using 1047 * the actual storage for the result here, instead. 1048 */ 1049 result_src = src_reg(this, ir->type); 1050 /* convenience for the emit functions below. */ 1051 result_dst = dst_reg(result_src); 1052 /* If nothing special happens, this is the result. */ 1053 this->result = result_src; 1054 /* Limit writes to the channels that will be used by result_src later. 1055 * This does limit this temp's use as a temporary for multi-instruction 1056 * sequences. 1057 */ 1058 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1059 1060 switch (ir->operation) { 1061 case ir_unop_logic_not: 1062 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 1063 * ones complement of the whole register, not just bit 0. 1064 */ 1065 emit(XOR(result_dst, op[0], src_reg(1))); 1066 break; 1067 case ir_unop_neg: 1068 op[0].negate = !op[0].negate; 1069 this->result = op[0]; 1070 break; 1071 case ir_unop_abs: 1072 op[0].abs = true; 1073 op[0].negate = false; 1074 this->result = op[0]; 1075 break; 1076 1077 case ir_unop_sign: 1078 emit(MOV(result_dst, src_reg(0.0f))); 1079 1080 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G)); 1081 inst = emit(MOV(result_dst, src_reg(1.0f))); 1082 inst->predicate = BRW_PREDICATE_NORMAL; 1083 1084 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L)); 1085 inst = emit(MOV(result_dst, src_reg(-1.0f))); 1086 inst->predicate = BRW_PREDICATE_NORMAL; 1087 1088 break; 1089 1090 case ir_unop_rcp: 1091 emit_math(SHADER_OPCODE_RCP, result_dst, op[0]); 1092 break; 1093 1094 case ir_unop_exp2: 1095 emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]); 1096 break; 1097 case ir_unop_log2: 1098 emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]); 1099 break; 1100 case ir_unop_exp: 1101 case ir_unop_log: 1102 assert(!"not reached: should be handled by ir_explog_to_explog2"); 1103 break; 1104 case ir_unop_sin: 1105 case ir_unop_sin_reduced: 1106 emit_math(SHADER_OPCODE_SIN, result_dst, op[0]); 1107 break; 1108 case ir_unop_cos: 1109 case ir_unop_cos_reduced: 1110 emit_math(SHADER_OPCODE_COS, result_dst, op[0]); 1111 break; 1112 1113 case ir_unop_dFdx: 1114 case ir_unop_dFdy: 1115 assert(!"derivatives not valid in vertex shader"); 1116 break; 1117 1118 case ir_unop_noise: 1119 assert(!"not reached: should be handled by lower_noise"); 1120 break; 1121 1122 case ir_binop_add: 1123 emit(ADD(result_dst, op[0], op[1])); 1124 break; 1125 case ir_binop_sub: 1126 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 1127 break; 1128 1129 case ir_binop_mul: 1130 if (ir->type->is_integer()) { 1131 /* For integer multiplication, the MUL uses the low 16 bits 1132 * of one of the operands (src0 on gen6, src1 on gen7). The 1133 * MACH accumulates in the contribution of the upper 16 bits 1134 * of that operand. 1135 * 1136 * FINISHME: Emit just the MUL if we know an operand is small 1137 * enough. 1138 */ 1139 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 1140 1141 emit(MUL(acc, op[0], op[1])); 1142 emit(MACH(dst_null_d(), op[0], op[1])); 1143 emit(MOV(result_dst, src_reg(acc))); 1144 } else { 1145 emit(MUL(result_dst, op[0], op[1])); 1146 } 1147 break; 1148 case ir_binop_div: 1149 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 1150 assert(ir->type->is_integer()); 1151 emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]); 1152 break; 1153 case ir_binop_mod: 1154 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 1155 assert(ir->type->is_integer()); 1156 emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]); 1157 break; 1158 1159 case ir_binop_less: 1160 case ir_binop_greater: 1161 case ir_binop_lequal: 1162 case ir_binop_gequal: 1163 case ir_binop_equal: 1164 case ir_binop_nequal: { 1165 emit(CMP(result_dst, op[0], op[1], 1166 brw_conditional_for_comparison(ir->operation))); 1167 emit(AND(result_dst, result_src, src_reg(0x1))); 1168 break; 1169 } 1170 1171 case ir_binop_all_equal: 1172 /* "==" operator producing a scalar boolean. */ 1173 if (ir->operands[0]->type->is_vector() || 1174 ir->operands[1]->type->is_vector()) { 1175 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 1176 emit(MOV(result_dst, src_reg(0))); 1177 inst = emit(MOV(result_dst, src_reg(1))); 1178 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H; 1179 } else { 1180 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z)); 1181 emit(AND(result_dst, result_src, src_reg(0x1))); 1182 } 1183 break; 1184 case ir_binop_any_nequal: 1185 /* "!=" operator producing a scalar boolean. */ 1186 if (ir->operands[0]->type->is_vector() || 1187 ir->operands[1]->type->is_vector()) { 1188 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 1189 1190 emit(MOV(result_dst, src_reg(0))); 1191 inst = emit(MOV(result_dst, src_reg(1))); 1192 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1193 } else { 1194 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ)); 1195 emit(AND(result_dst, result_src, src_reg(0x1))); 1196 } 1197 break; 1198 1199 case ir_unop_any: 1200 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 1201 emit(MOV(result_dst, src_reg(0))); 1202 1203 inst = emit(MOV(result_dst, src_reg(1))); 1204 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1205 break; 1206 1207 case ir_binop_logic_xor: 1208 emit(XOR(result_dst, op[0], op[1])); 1209 break; 1210 1211 case ir_binop_logic_or: 1212 emit(OR(result_dst, op[0], op[1])); 1213 break; 1214 1215 case ir_binop_logic_and: 1216 emit(AND(result_dst, op[0], op[1])); 1217 break; 1218 1219 case ir_binop_dot: 1220 assert(ir->operands[0]->type->is_vector()); 1221 assert(ir->operands[0]->type == ir->operands[1]->type); 1222 emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements); 1223 break; 1224 1225 case ir_unop_sqrt: 1226 emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]); 1227 break; 1228 case ir_unop_rsq: 1229 emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]); 1230 break; 1231 case ir_unop_i2f: 1232 case ir_unop_i2u: 1233 case ir_unop_u2i: 1234 case ir_unop_u2f: 1235 case ir_unop_b2f: 1236 case ir_unop_b2i: 1237 case ir_unop_f2i: 1238 emit(MOV(result_dst, op[0])); 1239 break; 1240 case ir_unop_f2b: 1241 case ir_unop_i2b: { 1242 emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 1243 emit(AND(result_dst, result_src, src_reg(1))); 1244 break; 1245 } 1246 1247 case ir_unop_trunc: 1248 emit(RNDZ(result_dst, op[0])); 1249 break; 1250 case ir_unop_ceil: 1251 op[0].negate = !op[0].negate; 1252 inst = emit(RNDD(result_dst, op[0])); 1253 this->result.negate = true; 1254 break; 1255 case ir_unop_floor: 1256 inst = emit(RNDD(result_dst, op[0])); 1257 break; 1258 case ir_unop_fract: 1259 inst = emit(FRC(result_dst, op[0])); 1260 break; 1261 case ir_unop_round_even: 1262 emit(RNDE(result_dst, op[0])); 1263 break; 1264 1265 case ir_binop_min: 1266 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L)); 1267 1268 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1269 inst->predicate = BRW_PREDICATE_NORMAL; 1270 break; 1271 case ir_binop_max: 1272 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G)); 1273 1274 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1275 inst->predicate = BRW_PREDICATE_NORMAL; 1276 break; 1277 1278 case ir_binop_pow: 1279 emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]); 1280 break; 1281 1282 case ir_unop_bit_not: 1283 inst = emit(NOT(result_dst, op[0])); 1284 break; 1285 case ir_binop_bit_and: 1286 inst = emit(AND(result_dst, op[0], op[1])); 1287 break; 1288 case ir_binop_bit_xor: 1289 inst = emit(XOR(result_dst, op[0], op[1])); 1290 break; 1291 case ir_binop_bit_or: 1292 inst = emit(OR(result_dst, op[0], op[1])); 1293 break; 1294 1295 case ir_binop_lshift: 1296 inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]); 1297 break; 1298 1299 case ir_binop_rshift: 1300 if (ir->type->base_type == GLSL_TYPE_INT) 1301 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]); 1302 else 1303 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]); 1304 break; 1305 1306 case ir_quadop_vector: 1307 assert(!"not reached: should be handled by lower_quadop_vector"); 1308 break; 1309 } 1310} 1311 1312 1313void 1314vec4_visitor::visit(ir_swizzle *ir) 1315{ 1316 src_reg src; 1317 int i = 0; 1318 int swizzle[4]; 1319 1320 /* Note that this is only swizzles in expressions, not those on the left 1321 * hand side of an assignment, which do write masking. See ir_assignment 1322 * for that. 1323 */ 1324 1325 ir->val->accept(this); 1326 src = this->result; 1327 assert(src.file != BAD_FILE); 1328 1329 for (i = 0; i < ir->type->vector_elements; i++) { 1330 switch (i) { 1331 case 0: 1332 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x); 1333 break; 1334 case 1: 1335 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y); 1336 break; 1337 case 2: 1338 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z); 1339 break; 1340 case 3: 1341 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w); 1342 break; 1343 } 1344 } 1345 for (; i < 4; i++) { 1346 /* Replicate the last channel out. */ 1347 swizzle[i] = swizzle[ir->type->vector_elements - 1]; 1348 } 1349 1350 src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); 1351 1352 this->result = src; 1353} 1354 1355void 1356vec4_visitor::visit(ir_dereference_variable *ir) 1357{ 1358 const struct glsl_type *type = ir->type; 1359 dst_reg *reg = variable_storage(ir->var); 1360 1361 if (!reg) { 1362 fail("Failed to find variable storage for %s\n", ir->var->name); 1363 this->result = src_reg(brw_null_reg()); 1364 return; 1365 } 1366 1367 this->result = src_reg(*reg); 1368 1369 if (type->is_scalar() || type->is_vector() || type->is_matrix()) 1370 this->result.swizzle = swizzle_for_size(type->vector_elements); 1371} 1372 1373void 1374vec4_visitor::visit(ir_dereference_array *ir) 1375{ 1376 ir_constant *constant_index; 1377 src_reg src; 1378 int element_size = type_size(ir->type); 1379 1380 constant_index = ir->array_index->constant_expression_value(); 1381 1382 ir->array->accept(this); 1383 src = this->result; 1384 1385 if (constant_index) { 1386 src.reg_offset += constant_index->value.i[0] * element_size; 1387 } else { 1388 /* Variable index array dereference. It eats the "vec4" of the 1389 * base of the array and an index that offsets the Mesa register 1390 * index. 1391 */ 1392 ir->array_index->accept(this); 1393 1394 src_reg index_reg; 1395 1396 if (element_size == 1) { 1397 index_reg = this->result; 1398 } else { 1399 index_reg = src_reg(this, glsl_type::int_type); 1400 1401 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size))); 1402 } 1403 1404 if (src.reladdr) { 1405 src_reg temp = src_reg(this, glsl_type::int_type); 1406 1407 emit(ADD(dst_reg(temp), *src.reladdr, index_reg)); 1408 1409 index_reg = temp; 1410 } 1411 1412 src.reladdr = ralloc(mem_ctx, src_reg); 1413 memcpy(src.reladdr, &index_reg, sizeof(index_reg)); 1414 } 1415 1416 /* If the type is smaller than a vec4, replicate the last channel out. */ 1417 if (ir->type->is_scalar() || ir->type->is_vector()) 1418 src.swizzle = swizzle_for_size(ir->type->vector_elements); 1419 else 1420 src.swizzle = BRW_SWIZZLE_NOOP; 1421 src.type = brw_type_for_base_type(ir->type); 1422 1423 this->result = src; 1424} 1425 1426void 1427vec4_visitor::visit(ir_dereference_record *ir) 1428{ 1429 unsigned int i; 1430 const glsl_type *struct_type = ir->record->type; 1431 int offset = 0; 1432 1433 ir->record->accept(this); 1434 1435 for (i = 0; i < struct_type->length; i++) { 1436 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 1437 break; 1438 offset += type_size(struct_type->fields.structure[i].type); 1439 } 1440 1441 /* If the type is smaller than a vec4, replicate the last channel out. */ 1442 if (ir->type->is_scalar() || ir->type->is_vector()) 1443 this->result.swizzle = swizzle_for_size(ir->type->vector_elements); 1444 else 1445 this->result.swizzle = BRW_SWIZZLE_NOOP; 1446 this->result.type = brw_type_for_base_type(ir->type); 1447 1448 this->result.reg_offset += offset; 1449} 1450 1451/** 1452 * We want to be careful in assignment setup to hit the actual storage 1453 * instead of potentially using a temporary like we might with the 1454 * ir_dereference handler. 1455 */ 1456static dst_reg 1457get_assignment_lhs(ir_dereference *ir, vec4_visitor *v) 1458{ 1459 /* The LHS must be a dereference. If the LHS is a variable indexed array 1460 * access of a vector, it must be separated into a series conditional moves 1461 * before reaching this point (see ir_vec_index_to_cond_assign). 1462 */ 1463 assert(ir->as_dereference()); 1464 ir_dereference_array *deref_array = ir->as_dereference_array(); 1465 if (deref_array) { 1466 assert(!deref_array->array->type->is_vector()); 1467 } 1468 1469 /* Use the rvalue deref handler for the most part. We'll ignore 1470 * swizzles in it and write swizzles using writemask, though. 1471 */ 1472 ir->accept(v); 1473 return dst_reg(v->result); 1474} 1475 1476void 1477vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src, 1478 const struct glsl_type *type, uint32_t predicate) 1479{ 1480 if (type->base_type == GLSL_TYPE_STRUCT) { 1481 for (unsigned int i = 0; i < type->length; i++) { 1482 emit_block_move(dst, src, type->fields.structure[i].type, predicate); 1483 } 1484 return; 1485 } 1486 1487 if (type->is_array()) { 1488 for (unsigned int i = 0; i < type->length; i++) { 1489 emit_block_move(dst, src, type->fields.array, predicate); 1490 } 1491 return; 1492 } 1493 1494 if (type->is_matrix()) { 1495 const struct glsl_type *vec_type; 1496 1497 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 1498 type->vector_elements, 1); 1499 1500 for (int i = 0; i < type->matrix_columns; i++) { 1501 emit_block_move(dst, src, vec_type, predicate); 1502 } 1503 return; 1504 } 1505 1506 assert(type->is_scalar() || type->is_vector()); 1507 1508 dst->type = brw_type_for_base_type(type); 1509 src->type = dst->type; 1510 1511 dst->writemask = (1 << type->vector_elements) - 1; 1512 1513 /* Do we need to worry about swizzling a swizzle? */ 1514 assert(src->swizzle == BRW_SWIZZLE_NOOP 1515 || src->swizzle == swizzle_for_size(type->vector_elements)); 1516 src->swizzle = swizzle_for_size(type->vector_elements); 1517 1518 vec4_instruction *inst = emit(MOV(*dst, *src)); 1519 inst->predicate = predicate; 1520 1521 dst->reg_offset++; 1522 src->reg_offset++; 1523} 1524 1525 1526/* If the RHS processing resulted in an instruction generating a 1527 * temporary value, and it would be easy to rewrite the instruction to 1528 * generate its result right into the LHS instead, do so. This ends 1529 * up reliably removing instructions where it can be tricky to do so 1530 * later without real UD chain information. 1531 */ 1532bool 1533vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 1534 dst_reg dst, 1535 src_reg src, 1536 vec4_instruction *pre_rhs_inst, 1537 vec4_instruction *last_rhs_inst) 1538{ 1539 /* This could be supported, but it would take more smarts. */ 1540 if (ir->condition) 1541 return false; 1542 1543 if (pre_rhs_inst == last_rhs_inst) 1544 return false; /* No instructions generated to work with. */ 1545 1546 /* Make sure the last instruction generated our source reg. */ 1547 if (src.file != GRF || 1548 src.file != last_rhs_inst->dst.file || 1549 src.reg != last_rhs_inst->dst.reg || 1550 src.reg_offset != last_rhs_inst->dst.reg_offset || 1551 src.reladdr || 1552 src.abs || 1553 src.negate || 1554 last_rhs_inst->predicate != BRW_PREDICATE_NONE) 1555 return false; 1556 1557 /* Check that that last instruction fully initialized the channels 1558 * we want to use, in the order we want to use them. We could 1559 * potentially reswizzle the operands of many instructions so that 1560 * we could handle out of order channels, but don't yet. 1561 */ 1562 1563 for (unsigned i = 0; i < 4; i++) { 1564 if (dst.writemask & (1 << i)) { 1565 if (!(last_rhs_inst->dst.writemask & (1 << i))) 1566 return false; 1567 1568 if (BRW_GET_SWZ(src.swizzle, i) != i) 1569 return false; 1570 } 1571 } 1572 1573 /* Success! Rewrite the instruction. */ 1574 last_rhs_inst->dst.file = dst.file; 1575 last_rhs_inst->dst.reg = dst.reg; 1576 last_rhs_inst->dst.reg_offset = dst.reg_offset; 1577 last_rhs_inst->dst.reladdr = dst.reladdr; 1578 last_rhs_inst->dst.writemask &= dst.writemask; 1579 1580 return true; 1581} 1582 1583void 1584vec4_visitor::visit(ir_assignment *ir) 1585{ 1586 dst_reg dst = get_assignment_lhs(ir->lhs, this); 1587 uint32_t predicate = BRW_PREDICATE_NONE; 1588 1589 if (!ir->lhs->type->is_scalar() && 1590 !ir->lhs->type->is_vector()) { 1591 ir->rhs->accept(this); 1592 src_reg src = this->result; 1593 1594 if (ir->condition) { 1595 emit_bool_to_cond_code(ir->condition, &predicate); 1596 } 1597 1598 emit_block_move(&dst, &src, ir->rhs->type, predicate); 1599 return; 1600 } 1601 1602 /* Now we're down to just a scalar/vector with writemasks. */ 1603 int i; 1604 1605 vec4_instruction *pre_rhs_inst, *last_rhs_inst; 1606 pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1607 1608 ir->rhs->accept(this); 1609 1610 last_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1611 1612 src_reg src = this->result; 1613 1614 int swizzles[4]; 1615 int first_enabled_chan = 0; 1616 int src_chan = 0; 1617 1618 assert(ir->lhs->type->is_vector() || 1619 ir->lhs->type->is_scalar()); 1620 dst.writemask = ir->write_mask; 1621 1622 for (int i = 0; i < 4; i++) { 1623 if (dst.writemask & (1 << i)) { 1624 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i); 1625 break; 1626 } 1627 } 1628 1629 /* Swizzle a small RHS vector into the channels being written. 1630 * 1631 * glsl ir treats write_mask as dictating how many channels are 1632 * present on the RHS while in our instructions we need to make 1633 * those channels appear in the slots of the vec4 they're written to. 1634 */ 1635 for (int i = 0; i < 4; i++) { 1636 if (dst.writemask & (1 << i)) 1637 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++); 1638 else 1639 swizzles[i] = first_enabled_chan; 1640 } 1641 src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 1642 swizzles[2], swizzles[3]); 1643 1644 if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) { 1645 return; 1646 } 1647 1648 if (ir->condition) { 1649 emit_bool_to_cond_code(ir->condition, &predicate); 1650 } 1651 1652 for (i = 0; i < type_size(ir->lhs->type); i++) { 1653 vec4_instruction *inst = emit(MOV(dst, src)); 1654 inst->predicate = predicate; 1655 1656 dst.reg_offset++; 1657 src.reg_offset++; 1658 } 1659} 1660 1661void 1662vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir) 1663{ 1664 if (ir->type->base_type == GLSL_TYPE_STRUCT) { 1665 foreach_list(node, &ir->components) { 1666 ir_constant *field_value = (ir_constant *)node; 1667 1668 emit_constant_values(dst, field_value); 1669 } 1670 return; 1671 } 1672 1673 if (ir->type->is_array()) { 1674 for (unsigned int i = 0; i < ir->type->length; i++) { 1675 emit_constant_values(dst, ir->array_elements[i]); 1676 } 1677 return; 1678 } 1679 1680 if (ir->type->is_matrix()) { 1681 for (int i = 0; i < ir->type->matrix_columns; i++) { 1682 for (int j = 0; j < ir->type->vector_elements; j++) { 1683 dst->writemask = 1 << j; 1684 dst->type = BRW_REGISTER_TYPE_F; 1685 1686 emit(MOV(*dst, 1687 src_reg(ir->value.f[i * ir->type->vector_elements + j]))); 1688 } 1689 dst->reg_offset++; 1690 } 1691 return; 1692 } 1693 1694 for (int i = 0; i < ir->type->vector_elements; i++) { 1695 dst->writemask = 1 << i; 1696 dst->type = brw_type_for_base_type(ir->type); 1697 1698 switch (ir->type->base_type) { 1699 case GLSL_TYPE_FLOAT: 1700 emit(MOV(*dst, src_reg(ir->value.f[i]))); 1701 break; 1702 case GLSL_TYPE_INT: 1703 emit(MOV(*dst, src_reg(ir->value.i[i]))); 1704 break; 1705 case GLSL_TYPE_UINT: 1706 emit(MOV(*dst, src_reg(ir->value.u[i]))); 1707 break; 1708 case GLSL_TYPE_BOOL: 1709 emit(MOV(*dst, src_reg(ir->value.b[i]))); 1710 break; 1711 default: 1712 assert(!"Non-float/uint/int/bool constant"); 1713 break; 1714 } 1715 } 1716 dst->reg_offset++; 1717} 1718 1719void 1720vec4_visitor::visit(ir_constant *ir) 1721{ 1722 dst_reg dst = dst_reg(this, ir->type); 1723 this->result = src_reg(dst); 1724 1725 emit_constant_values(&dst, ir); 1726} 1727 1728void 1729vec4_visitor::visit(ir_call *ir) 1730{ 1731 assert(!"not reached"); 1732} 1733 1734void 1735vec4_visitor::visit(ir_texture *ir) 1736{ 1737 /* FINISHME: Implement vertex texturing. 1738 * 1739 * With 0 vertex samplers available, the linker will reject 1740 * programs that do vertex texturing, but after our visitor has 1741 * run. 1742 */ 1743 this->result = src_reg(this, glsl_type::vec4_type); 1744} 1745 1746void 1747vec4_visitor::visit(ir_return *ir) 1748{ 1749 assert(!"not reached"); 1750} 1751 1752void 1753vec4_visitor::visit(ir_discard *ir) 1754{ 1755 assert(!"not reached"); 1756} 1757 1758void 1759vec4_visitor::visit(ir_if *ir) 1760{ 1761 /* Don't point the annotation at the if statement, because then it plus 1762 * the then and else blocks get printed. 1763 */ 1764 this->base_ir = ir->condition; 1765 1766 if (intel->gen == 6) { 1767 emit_if_gen6(ir); 1768 } else { 1769 uint32_t predicate; 1770 emit_bool_to_cond_code(ir->condition, &predicate); 1771 emit(IF(predicate)); 1772 } 1773 1774 visit_instructions(&ir->then_instructions); 1775 1776 if (!ir->else_instructions.is_empty()) { 1777 this->base_ir = ir->condition; 1778 emit(BRW_OPCODE_ELSE); 1779 1780 visit_instructions(&ir->else_instructions); 1781 } 1782 1783 this->base_ir = ir->condition; 1784 emit(BRW_OPCODE_ENDIF); 1785} 1786 1787void 1788vec4_visitor::emit_ndc_computation() 1789{ 1790 /* Get the position */ 1791 src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]); 1792 1793 /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */ 1794 dst_reg ndc = dst_reg(this, glsl_type::vec4_type); 1795 output_reg[BRW_VERT_RESULT_NDC] = ndc; 1796 1797 current_annotation = "NDC"; 1798 dst_reg ndc_w = ndc; 1799 ndc_w.writemask = WRITEMASK_W; 1800 src_reg pos_w = pos; 1801 pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W); 1802 emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w); 1803 1804 dst_reg ndc_xyz = ndc; 1805 ndc_xyz.writemask = WRITEMASK_XYZ; 1806 1807 emit(MUL(ndc_xyz, pos, src_reg(ndc_w))); 1808} 1809 1810void 1811vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) 1812{ 1813 if (intel->gen < 6 && 1814 ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) || 1815 c->key.userclip_active || brw->has_negative_rhw_bug)) { 1816 dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); 1817 dst_reg header1_w = header1; 1818 header1_w.writemask = WRITEMASK_W; 1819 GLuint i; 1820 1821 emit(MOV(header1, 0u)); 1822 1823 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1824 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]); 1825 1826 current_annotation = "Point size"; 1827 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11)))); 1828 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8)); 1829 } 1830 1831 current_annotation = "Clipping flags"; 1832 for (i = 0; i < c->key.nr_userclip_plane_consts; i++) { 1833 vec4_instruction *inst; 1834 1835 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]), 1836 src_reg(this->userplane[i]))); 1837 inst->conditional_mod = BRW_CONDITIONAL_L; 1838 1839 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i)); 1840 inst->predicate = BRW_PREDICATE_NORMAL; 1841 } 1842 1843 /* i965 clipping workaround: 1844 * 1) Test for -ve rhw 1845 * 2) If set, 1846 * set ndc = (0,0,0,0) 1847 * set ucp[6] = 1 1848 * 1849 * Later, clipping will detect ucp[6] and ensure the primitive is 1850 * clipped against all fixed planes. 1851 */ 1852 if (brw->has_negative_rhw_bug) { 1853#if 0 1854 /* FINISHME */ 1855 brw_CMP(p, 1856 vec8(brw_null_reg()), 1857 BRW_CONDITIONAL_L, 1858 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3), 1859 brw_imm_f(0)); 1860 1861 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6)); 1862 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0)); 1863 brw_set_predicate_control(p, BRW_PREDICATE_NONE); 1864#endif 1865 } 1866 1867 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1))); 1868 } else if (intel->gen < 6) { 1869 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); 1870 } else { 1871 emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); 1872 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1873 emit(MOV(brw_writemask(reg, WRITEMASK_W), 1874 src_reg(output_reg[VERT_RESULT_PSIZ]))); 1875 } 1876 } 1877} 1878 1879void 1880vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset) 1881{ 1882 if (intel->gen < 6) { 1883 /* Clip distance slots are set aside in gen5, but they are not used. It 1884 * is not clear whether we actually need to set aside space for them, 1885 * but the performance cost is negligible. 1886 */ 1887 return; 1888 } 1889 1890 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables): 1891 * 1892 * "If a linked set of shaders forming the vertex stage contains no 1893 * static write to gl_ClipVertex or gl_ClipDistance, but the 1894 * application has requested clipping against user clip planes through 1895 * the API, then the coordinate written to gl_Position is used for 1896 * comparison against the user clip planes." 1897 * 1898 * This function is only called if the shader didn't write to 1899 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping 1900 * if the user wrote to it; otherwise we use gl_Position. 1901 */ 1902 gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX; 1903 if (!(c->prog_data.outputs_written 1904 & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) { 1905 clip_vertex = VERT_RESULT_HPOS; 1906 } 1907 1908 for (int i = 0; i + offset < c->key.nr_userclip_plane_consts && i < 4; 1909 ++i) { 1910 emit(DP4(dst_reg(brw_writemask(reg, 1 << i)), 1911 src_reg(output_reg[clip_vertex]), 1912 src_reg(this->userplane[i + offset]))); 1913 } 1914} 1915 1916void 1917vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result) 1918{ 1919 assert (vert_result < VERT_RESULT_MAX); 1920 reg.type = output_reg[vert_result].type; 1921 current_annotation = output_reg_annotation[vert_result]; 1922 /* Copy the register, saturating if necessary */ 1923 vec4_instruction *inst = emit(MOV(reg, 1924 src_reg(output_reg[vert_result]))); 1925 if ((vert_result == VERT_RESULT_COL0 || 1926 vert_result == VERT_RESULT_COL1 || 1927 vert_result == VERT_RESULT_BFC0 || 1928 vert_result == VERT_RESULT_BFC1) && 1929 c->key.clamp_vertex_color) { 1930 inst->saturate = true; 1931 } 1932} 1933 1934void 1935vec4_visitor::emit_urb_slot(int mrf, int vert_result) 1936{ 1937 struct brw_reg hw_reg = brw_message_reg(mrf); 1938 dst_reg reg = dst_reg(MRF, mrf); 1939 reg.type = BRW_REGISTER_TYPE_F; 1940 1941 switch (vert_result) { 1942 case VERT_RESULT_PSIZ: 1943 /* PSIZ is always in slot 0, and is coupled with other flags. */ 1944 current_annotation = "indices, point width, clip flags"; 1945 emit_psiz_and_flags(hw_reg); 1946 break; 1947 case BRW_VERT_RESULT_NDC: 1948 current_annotation = "NDC"; 1949 emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC]))); 1950 break; 1951 case BRW_VERT_RESULT_HPOS_DUPLICATE: 1952 case VERT_RESULT_HPOS: 1953 current_annotation = "gl_Position"; 1954 emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS]))); 1955 break; 1956 case VERT_RESULT_CLIP_DIST0: 1957 case VERT_RESULT_CLIP_DIST1: 1958 if (this->c->key.uses_clip_distance) { 1959 emit_generic_urb_slot(reg, vert_result); 1960 } else { 1961 current_annotation = "user clip distances"; 1962 emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4); 1963 } 1964 break; 1965 case BRW_VERT_RESULT_PAD: 1966 /* No need to write to this slot */ 1967 break; 1968 default: 1969 emit_generic_urb_slot(reg, vert_result); 1970 break; 1971 } 1972} 1973 1974static int 1975align_interleaved_urb_mlen(struct brw_context *brw, int mlen) 1976{ 1977 struct intel_context *intel = &brw->intel; 1978 1979 if (intel->gen >= 6) { 1980 /* URB data written (does not include the message header reg) must 1981 * be a multiple of 256 bits, or 2 VS registers. See vol5c.5, 1982 * section 5.4.3.2.2: URB_INTERLEAVED. 1983 * 1984 * URB entries are allocated on a multiple of 1024 bits, so an 1985 * extra 128 bits written here to make the end align to 256 is 1986 * no problem. 1987 */ 1988 if ((mlen % 2) != 1) 1989 mlen++; 1990 } 1991 1992 return mlen; 1993} 1994 1995/** 1996 * Generates the VUE payload plus the 1 or 2 URB write instructions to 1997 * complete the VS thread. 1998 * 1999 * The VUE layout is documented in Volume 2a. 2000 */ 2001void 2002vec4_visitor::emit_urb_writes() 2003{ 2004 /* MRF 0 is reserved for the debugger, so start with message header 2005 * in MRF 1. 2006 */ 2007 int base_mrf = 1; 2008 int mrf = base_mrf; 2009 /* In the process of generating our URB write message contents, we 2010 * may need to unspill a register or load from an array. Those 2011 * reads would use MRFs 14-15. 2012 */ 2013 int max_usable_mrf = 13; 2014 2015 /* The following assertion verifies that max_usable_mrf causes an 2016 * even-numbered amount of URB write data, which will meet gen6's 2017 * requirements for length alignment. 2018 */ 2019 assert ((max_usable_mrf - base_mrf) % 2 == 0); 2020 2021 /* FINISHME: edgeflag */ 2022 2023 brw_compute_vue_map(&c->vue_map, intel, c->key.userclip_active, 2024 c->prog_data.outputs_written); 2025 2026 /* First mrf is the g0-based message header containing URB handles and such, 2027 * which is implied in VS_OPCODE_URB_WRITE. 2028 */ 2029 mrf++; 2030 2031 if (intel->gen < 6) { 2032 emit_ndc_computation(); 2033 } 2034 2035 /* Set up the VUE data for the first URB write */ 2036 int slot; 2037 for (slot = 0; slot < c->vue_map.num_slots; ++slot) { 2038 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 2039 2040 /* If this was max_usable_mrf, we can't fit anything more into this URB 2041 * WRITE. 2042 */ 2043 if (mrf > max_usable_mrf) { 2044 slot++; 2045 break; 2046 } 2047 } 2048 2049 current_annotation = "URB write"; 2050 vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE); 2051 inst->base_mrf = base_mrf; 2052 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 2053 inst->eot = (slot >= c->vue_map.num_slots); 2054 2055 /* Optional second URB write */ 2056 if (!inst->eot) { 2057 mrf = base_mrf + 1; 2058 2059 for (; slot < c->vue_map.num_slots; ++slot) { 2060 assert(mrf < max_usable_mrf); 2061 2062 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 2063 } 2064 2065 current_annotation = "URB write"; 2066 inst = emit(VS_OPCODE_URB_WRITE); 2067 inst->base_mrf = base_mrf; 2068 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 2069 inst->eot = true; 2070 /* URB destination offset. In the previous write, we got MRFs 2071 * 2-13 minus the one header MRF, so 12 regs. URB offset is in 2072 * URB row increments, and each of our MRFs is half of one of 2073 * those, since we're doing interleaved writes. 2074 */ 2075 inst->offset = (max_usable_mrf - base_mrf) / 2; 2076 } 2077 2078 if (intel->gen == 6) 2079 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 8) / 8; 2080 else 2081 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 4) / 4; 2082} 2083 2084src_reg 2085vec4_visitor::get_scratch_offset(vec4_instruction *inst, 2086 src_reg *reladdr, int reg_offset) 2087{ 2088 /* Because we store the values to scratch interleaved like our 2089 * vertex data, we need to scale the vec4 index by 2. 2090 */ 2091 int message_header_scale = 2; 2092 2093 /* Pre-gen6, the message header uses byte offsets instead of vec4 2094 * (16-byte) offset units. 2095 */ 2096 if (intel->gen < 6) 2097 message_header_scale *= 16; 2098 2099 if (reladdr) { 2100 src_reg index = src_reg(this, glsl_type::int_type); 2101 2102 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2103 emit_before(inst, MUL(dst_reg(index), 2104 index, src_reg(message_header_scale))); 2105 2106 return index; 2107 } else { 2108 return src_reg(reg_offset * message_header_scale); 2109 } 2110} 2111 2112src_reg 2113vec4_visitor::get_pull_constant_offset(vec4_instruction *inst, 2114 src_reg *reladdr, int reg_offset) 2115{ 2116 if (reladdr) { 2117 src_reg index = src_reg(this, glsl_type::int_type); 2118 2119 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2120 2121 /* Pre-gen6, the message header uses byte offsets instead of vec4 2122 * (16-byte) offset units. 2123 */ 2124 if (intel->gen < 6) { 2125 emit_before(inst, MUL(dst_reg(index), index, src_reg(16))); 2126 } 2127 2128 return index; 2129 } else { 2130 int message_header_scale = intel->gen < 6 ? 16 : 1; 2131 return src_reg(reg_offset * message_header_scale); 2132 } 2133} 2134 2135/** 2136 * Emits an instruction before @inst to load the value named by @orig_src 2137 * from scratch space at @base_offset to @temp. 2138 */ 2139void 2140vec4_visitor::emit_scratch_read(vec4_instruction *inst, 2141 dst_reg temp, src_reg orig_src, 2142 int base_offset) 2143{ 2144 int reg_offset = base_offset + orig_src.reg_offset; 2145 src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset); 2146 2147 emit_before(inst, SCRATCH_READ(temp, index)); 2148} 2149 2150/** 2151 * Emits an instruction after @inst to store the value to be written 2152 * to @orig_dst to scratch space at @base_offset, from @temp. 2153 */ 2154void 2155vec4_visitor::emit_scratch_write(vec4_instruction *inst, 2156 src_reg temp, dst_reg orig_dst, 2157 int base_offset) 2158{ 2159 int reg_offset = base_offset + orig_dst.reg_offset; 2160 src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset); 2161 2162 dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0), 2163 orig_dst.writemask)); 2164 vec4_instruction *write = SCRATCH_WRITE(dst, temp, index); 2165 write->predicate = inst->predicate; 2166 write->ir = inst->ir; 2167 write->annotation = inst->annotation; 2168 inst->insert_after(write); 2169} 2170 2171/** 2172 * We can't generally support array access in GRF space, because a 2173 * single instruction's destination can only span 2 contiguous 2174 * registers. So, we send all GRF arrays that get variable index 2175 * access to scratch space. 2176 */ 2177void 2178vec4_visitor::move_grf_array_access_to_scratch() 2179{ 2180 int scratch_loc[this->virtual_grf_count]; 2181 2182 for (int i = 0; i < this->virtual_grf_count; i++) { 2183 scratch_loc[i] = -1; 2184 } 2185 2186 /* First, calculate the set of virtual GRFs that need to be punted 2187 * to scratch due to having any array access on them, and where in 2188 * scratch. 2189 */ 2190 foreach_list(node, &this->instructions) { 2191 vec4_instruction *inst = (vec4_instruction *)node; 2192 2193 if (inst->dst.file == GRF && inst->dst.reladdr && 2194 scratch_loc[inst->dst.reg] == -1) { 2195 scratch_loc[inst->dst.reg] = c->last_scratch; 2196 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4; 2197 } 2198 2199 for (int i = 0 ; i < 3; i++) { 2200 src_reg *src = &inst->src[i]; 2201 2202 if (src->file == GRF && src->reladdr && 2203 scratch_loc[src->reg] == -1) { 2204 scratch_loc[src->reg] = c->last_scratch; 2205 c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4; 2206 } 2207 } 2208 } 2209 2210 /* Now, for anything that will be accessed through scratch, rewrite 2211 * it to load/store. Note that this is a _safe list walk, because 2212 * we may generate a new scratch_write instruction after the one 2213 * we're processing. 2214 */ 2215 foreach_list_safe(node, &this->instructions) { 2216 vec4_instruction *inst = (vec4_instruction *)node; 2217 2218 /* Set up the annotation tracking for new generated instructions. */ 2219 base_ir = inst->ir; 2220 current_annotation = inst->annotation; 2221 2222 if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) { 2223 src_reg temp = src_reg(this, glsl_type::vec4_type); 2224 2225 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]); 2226 2227 inst->dst.file = temp.file; 2228 inst->dst.reg = temp.reg; 2229 inst->dst.reg_offset = temp.reg_offset; 2230 inst->dst.reladdr = NULL; 2231 } 2232 2233 for (int i = 0 ; i < 3; i++) { 2234 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1) 2235 continue; 2236 2237 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2238 2239 emit_scratch_read(inst, temp, inst->src[i], 2240 scratch_loc[inst->src[i].reg]); 2241 2242 inst->src[i].file = temp.file; 2243 inst->src[i].reg = temp.reg; 2244 inst->src[i].reg_offset = temp.reg_offset; 2245 inst->src[i].reladdr = NULL; 2246 } 2247 } 2248} 2249 2250/** 2251 * Emits an instruction before @inst to load the value named by @orig_src 2252 * from the pull constant buffer (surface) at @base_offset to @temp. 2253 */ 2254void 2255vec4_visitor::emit_pull_constant_load(vec4_instruction *inst, 2256 dst_reg temp, src_reg orig_src, 2257 int base_offset) 2258{ 2259 int reg_offset = base_offset + orig_src.reg_offset; 2260 src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset); 2261 vec4_instruction *load; 2262 2263 load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD, 2264 temp, index); 2265 load->base_mrf = 14; 2266 load->mlen = 1; 2267 emit_before(inst, load); 2268} 2269 2270/** 2271 * Implements array access of uniforms by inserting a 2272 * PULL_CONSTANT_LOAD instruction. 2273 * 2274 * Unlike temporary GRF array access (where we don't support it due to 2275 * the difficulty of doing relative addressing on instruction 2276 * destinations), we could potentially do array access of uniforms 2277 * that were loaded in GRF space as push constants. In real-world 2278 * usage we've seen, though, the arrays being used are always larger 2279 * than we could load as push constants, so just always move all 2280 * uniform array access out to a pull constant buffer. 2281 */ 2282void 2283vec4_visitor::move_uniform_array_access_to_pull_constants() 2284{ 2285 int pull_constant_loc[this->uniforms]; 2286 2287 for (int i = 0; i < this->uniforms; i++) { 2288 pull_constant_loc[i] = -1; 2289 } 2290 2291 /* Walk through and find array access of uniforms. Put a copy of that 2292 * uniform in the pull constant buffer. 2293 * 2294 * Note that we don't move constant-indexed accesses to arrays. No 2295 * testing has been done of the performance impact of this choice. 2296 */ 2297 foreach_list_safe(node, &this->instructions) { 2298 vec4_instruction *inst = (vec4_instruction *)node; 2299 2300 for (int i = 0 ; i < 3; i++) { 2301 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr) 2302 continue; 2303 2304 int uniform = inst->src[i].reg; 2305 2306 /* If this array isn't already present in the pull constant buffer, 2307 * add it. 2308 */ 2309 if (pull_constant_loc[uniform] == -1) { 2310 const float **values = &prog_data->param[uniform * 4]; 2311 2312 pull_constant_loc[uniform] = prog_data->nr_pull_params / 4; 2313 2314 for (int j = 0; j < uniform_size[uniform] * 4; j++) { 2315 prog_data->pull_param[prog_data->nr_pull_params++] = values[j]; 2316 } 2317 } 2318 2319 /* Set up the annotation tracking for new generated instructions. */ 2320 base_ir = inst->ir; 2321 current_annotation = inst->annotation; 2322 2323 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2324 2325 emit_pull_constant_load(inst, temp, inst->src[i], 2326 pull_constant_loc[uniform]); 2327 2328 inst->src[i].file = temp.file; 2329 inst->src[i].reg = temp.reg; 2330 inst->src[i].reg_offset = temp.reg_offset; 2331 inst->src[i].reladdr = NULL; 2332 } 2333 } 2334 2335 /* Now there are no accesses of the UNIFORM file with a reladdr, so 2336 * no need to track them as larger-than-vec4 objects. This will be 2337 * relied on in cutting out unused uniform vectors from push 2338 * constants. 2339 */ 2340 split_uniform_registers(); 2341} 2342 2343void 2344vec4_visitor::resolve_ud_negate(src_reg *reg) 2345{ 2346 if (reg->type != BRW_REGISTER_TYPE_UD || 2347 !reg->negate) 2348 return; 2349 2350 src_reg temp = src_reg(this, glsl_type::uvec4_type); 2351 emit(BRW_OPCODE_MOV, dst_reg(temp), *reg); 2352 *reg = temp; 2353} 2354 2355vec4_visitor::vec4_visitor(struct brw_vs_compile *c, 2356 struct gl_shader_program *prog, 2357 struct brw_shader *shader) 2358{ 2359 this->c = c; 2360 this->p = &c->func; 2361 this->brw = p->brw; 2362 this->intel = &brw->intel; 2363 this->ctx = &intel->ctx; 2364 this->prog = prog; 2365 this->shader = shader; 2366 2367 this->mem_ctx = ralloc_context(NULL); 2368 this->failed = false; 2369 2370 this->base_ir = NULL; 2371 this->current_annotation = NULL; 2372 2373 this->c = c; 2374 this->vp = (struct gl_vertex_program *) 2375 prog->_LinkedShaders[MESA_SHADER_VERTEX]->Program; 2376 this->prog_data = &c->prog_data; 2377 2378 this->variable_ht = hash_table_ctor(0, 2379 hash_table_pointer_hash, 2380 hash_table_pointer_compare); 2381 2382 this->virtual_grf_def = NULL; 2383 this->virtual_grf_use = NULL; 2384 this->virtual_grf_sizes = NULL; 2385 this->virtual_grf_count = 0; 2386 this->virtual_grf_reg_map = NULL; 2387 this->virtual_grf_reg_count = 0; 2388 this->virtual_grf_array_size = 0; 2389 this->live_intervals_valid = false; 2390 2391 this->uniforms = 0; 2392 2393 this->variable_ht = hash_table_ctor(0, 2394 hash_table_pointer_hash, 2395 hash_table_pointer_compare); 2396} 2397 2398vec4_visitor::~vec4_visitor() 2399{ 2400 ralloc_free(this->mem_ctx); 2401 hash_table_dtor(this->variable_ht); 2402} 2403 2404 2405void 2406vec4_visitor::fail(const char *format, ...) 2407{ 2408 va_list va; 2409 char *msg; 2410 2411 if (failed) 2412 return; 2413 2414 failed = true; 2415 2416 va_start(va, format); 2417 msg = ralloc_vasprintf(mem_ctx, format, va); 2418 va_end(va); 2419 msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg); 2420 2421 this->fail_msg = msg; 2422 2423 if (INTEL_DEBUG & DEBUG_VS) { 2424 fprintf(stderr, "%s", msg); 2425 } 2426} 2427 2428} /* namespace brw */ 2429