brw_vec4_visitor.cpp revision bba910373fc6cdca939422d94adfe58b43e41b86
1/* 2 * Copyright © 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "brw_vec4.h" 25extern "C" { 26#include "main/macros.h" 27#include "program/prog_parameter.h" 28} 29 30namespace brw { 31 32src_reg::src_reg(dst_reg reg) 33{ 34 init(); 35 36 this->file = reg.file; 37 this->reg = reg.reg; 38 this->reg_offset = reg.reg_offset; 39 this->type = reg.type; 40 this->reladdr = reg.reladdr; 41 this->fixed_hw_reg = reg.fixed_hw_reg; 42 43 int swizzles[4]; 44 int next_chan = 0; 45 int last = 0; 46 47 for (int i = 0; i < 4; i++) { 48 if (!(reg.writemask & (1 << i))) 49 continue; 50 51 swizzles[next_chan++] = last = i; 52 } 53 54 for (; next_chan < 4; next_chan++) { 55 swizzles[next_chan] = last; 56 } 57 58 this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 59 swizzles[2], swizzles[3]); 60} 61 62dst_reg::dst_reg(src_reg reg) 63{ 64 init(); 65 66 this->file = reg.file; 67 this->reg = reg.reg; 68 this->reg_offset = reg.reg_offset; 69 this->type = reg.type; 70 this->writemask = WRITEMASK_XYZW; 71 this->reladdr = reg.reladdr; 72 this->fixed_hw_reg = reg.fixed_hw_reg; 73} 74 75vec4_instruction::vec4_instruction(vec4_visitor *v, 76 enum opcode opcode, dst_reg dst, 77 src_reg src0, src_reg src1, src_reg src2) 78{ 79 this->opcode = opcode; 80 this->dst = dst; 81 this->src[0] = src0; 82 this->src[1] = src1; 83 this->src[2] = src2; 84 this->ir = v->base_ir; 85 this->annotation = v->current_annotation; 86} 87 88vec4_instruction * 89vec4_visitor::emit(vec4_instruction *inst) 90{ 91 this->instructions.push_tail(inst); 92 93 return inst; 94} 95 96vec4_instruction * 97vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst) 98{ 99 new_inst->ir = inst->ir; 100 new_inst->annotation = inst->annotation; 101 102 inst->insert_before(new_inst); 103 104 return inst; 105} 106 107vec4_instruction * 108vec4_visitor::emit(enum opcode opcode, dst_reg dst, 109 src_reg src0, src_reg src1, src_reg src2) 110{ 111 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, 112 src0, src1, src2)); 113} 114 115 116vec4_instruction * 117vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1) 118{ 119 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1)); 120} 121 122vec4_instruction * 123vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0) 124{ 125 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0)); 126} 127 128vec4_instruction * 129vec4_visitor::emit(enum opcode opcode) 130{ 131 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg())); 132} 133 134#define ALU1(op) \ 135 vec4_instruction * \ 136 vec4_visitor::op(dst_reg dst, src_reg src0) \ 137 { \ 138 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 139 src0); \ 140 } 141 142#define ALU2(op) \ 143 vec4_instruction * \ 144 vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1) \ 145 { \ 146 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 147 src0, src1); \ 148 } 149 150ALU1(NOT) 151ALU1(MOV) 152ALU1(FRC) 153ALU1(RNDD) 154ALU1(RNDE) 155ALU1(RNDZ) 156ALU2(ADD) 157ALU2(MUL) 158ALU2(MACH) 159ALU2(AND) 160ALU2(OR) 161ALU2(XOR) 162ALU2(DP3) 163ALU2(DP4) 164 165/** Gen4 predicated IF. */ 166vec4_instruction * 167vec4_visitor::IF(uint32_t predicate) 168{ 169 vec4_instruction *inst; 170 171 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF); 172 inst->predicate = predicate; 173 174 return inst; 175} 176 177/** Gen6+ IF with embedded comparison. */ 178vec4_instruction * 179vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition) 180{ 181 assert(intel->gen >= 6); 182 183 vec4_instruction *inst; 184 185 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(), 186 src0, src1); 187 inst->conditional_mod = condition; 188 189 return inst; 190} 191 192/** 193 * CMP: Sets the low bit of the destination channels with the result 194 * of the comparison, while the upper bits are undefined, and updates 195 * the flag register with the packed 16 bits of the result. 196 */ 197vec4_instruction * 198vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition) 199{ 200 vec4_instruction *inst; 201 202 /* original gen4 does type conversion to the destination type 203 * before before comparison, producing garbage results for floating 204 * point comparisons. 205 */ 206 if (intel->gen == 4) { 207 dst.type = src0.type; 208 if (dst.file == HW_REG) 209 dst.fixed_hw_reg.type = dst.type; 210 } 211 212 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1); 213 inst->conditional_mod = condition; 214 215 return inst; 216} 217 218vec4_instruction * 219vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index) 220{ 221 vec4_instruction *inst; 222 223 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ, 224 dst, index); 225 inst->base_mrf = 14; 226 inst->mlen = 1; 227 228 return inst; 229} 230 231vec4_instruction * 232vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index) 233{ 234 vec4_instruction *inst; 235 236 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE, 237 dst, src, index); 238 inst->base_mrf = 13; 239 inst->mlen = 2; 240 241 return inst; 242} 243 244void 245vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements) 246{ 247 static enum opcode dot_opcodes[] = { 248 BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4 249 }; 250 251 emit(dot_opcodes[elements - 2], dst, src0, src1); 252} 253 254void 255vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src) 256{ 257 /* The gen6 math instruction ignores the source modifiers -- 258 * swizzle, abs, negate, and at least some parts of the register 259 * region description. 260 * 261 * While it would seem that this MOV could be avoided at this point 262 * in the case that the swizzle is matched up with the destination 263 * writemask, note that uniform packing and register allocation 264 * could rearrange our swizzle, so let's leave this matter up to 265 * copy propagation later. 266 */ 267 src_reg temp_src = src_reg(this, glsl_type::vec4_type); 268 emit(MOV(dst_reg(temp_src), src)); 269 270 if (dst.writemask != WRITEMASK_XYZW) { 271 /* The gen6 math instruction must be align1, so we can't do 272 * writemasks. 273 */ 274 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 275 276 emit(opcode, temp_dst, temp_src); 277 278 emit(MOV(dst, src_reg(temp_dst))); 279 } else { 280 emit(opcode, dst, temp_src); 281 } 282} 283 284void 285vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src) 286{ 287 vec4_instruction *inst = emit(opcode, dst, src); 288 inst->base_mrf = 1; 289 inst->mlen = 1; 290} 291 292void 293vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src) 294{ 295 switch (opcode) { 296 case SHADER_OPCODE_RCP: 297 case SHADER_OPCODE_RSQ: 298 case SHADER_OPCODE_SQRT: 299 case SHADER_OPCODE_EXP2: 300 case SHADER_OPCODE_LOG2: 301 case SHADER_OPCODE_SIN: 302 case SHADER_OPCODE_COS: 303 break; 304 default: 305 assert(!"not reached: bad math opcode"); 306 return; 307 } 308 309 if (intel->gen >= 6) { 310 return emit_math1_gen6(opcode, dst, src); 311 } else { 312 return emit_math1_gen4(opcode, dst, src); 313 } 314} 315 316void 317vec4_visitor::emit_math2_gen6(enum opcode opcode, 318 dst_reg dst, src_reg src0, src_reg src1) 319{ 320 src_reg expanded; 321 322 /* The gen6 math instruction ignores the source modifiers -- 323 * swizzle, abs, negate, and at least some parts of the register 324 * region description. Move the sources to temporaries to make it 325 * generally work. 326 */ 327 328 expanded = src_reg(this, glsl_type::vec4_type); 329 emit(MOV(dst_reg(expanded), src0)); 330 src0 = expanded; 331 332 expanded = src_reg(this, glsl_type::vec4_type); 333 emit(MOV(dst_reg(expanded), src1)); 334 src1 = expanded; 335 336 if (dst.writemask != WRITEMASK_XYZW) { 337 /* The gen6 math instruction must be align1, so we can't do 338 * writemasks. 339 */ 340 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 341 342 emit(opcode, temp_dst, src0, src1); 343 344 emit(MOV(dst, src_reg(temp_dst))); 345 } else { 346 emit(opcode, dst, src0, src1); 347 } 348} 349 350void 351vec4_visitor::emit_math2_gen4(enum opcode opcode, 352 dst_reg dst, src_reg src0, src_reg src1) 353{ 354 vec4_instruction *inst = emit(opcode, dst, src0, src1); 355 inst->base_mrf = 1; 356 inst->mlen = 2; 357} 358 359void 360vec4_visitor::emit_math(enum opcode opcode, 361 dst_reg dst, src_reg src0, src_reg src1) 362{ 363 assert(opcode == SHADER_OPCODE_POW); 364 365 if (intel->gen >= 6) { 366 return emit_math2_gen6(opcode, dst, src0, src1); 367 } else { 368 return emit_math2_gen4(opcode, dst, src0, src1); 369 } 370} 371 372void 373vec4_visitor::visit_instructions(const exec_list *list) 374{ 375 foreach_list(node, list) { 376 ir_instruction *ir = (ir_instruction *)node; 377 378 base_ir = ir; 379 ir->accept(this); 380 } 381} 382 383 384static int 385type_size(const struct glsl_type *type) 386{ 387 unsigned int i; 388 int size; 389 390 switch (type->base_type) { 391 case GLSL_TYPE_UINT: 392 case GLSL_TYPE_INT: 393 case GLSL_TYPE_FLOAT: 394 case GLSL_TYPE_BOOL: 395 if (type->is_matrix()) { 396 return type->matrix_columns; 397 } else { 398 /* Regardless of size of vector, it gets a vec4. This is bad 399 * packing for things like floats, but otherwise arrays become a 400 * mess. Hopefully a later pass over the code can pack scalars 401 * down if appropriate. 402 */ 403 return 1; 404 } 405 case GLSL_TYPE_ARRAY: 406 assert(type->length > 0); 407 return type_size(type->fields.array) * type->length; 408 case GLSL_TYPE_STRUCT: 409 size = 0; 410 for (i = 0; i < type->length; i++) { 411 size += type_size(type->fields.structure[i].type); 412 } 413 return size; 414 case GLSL_TYPE_SAMPLER: 415 /* Samplers take up one slot in UNIFORMS[], but they're baked in 416 * at link time. 417 */ 418 return 1; 419 default: 420 assert(0); 421 return 0; 422 } 423} 424 425int 426vec4_visitor::virtual_grf_alloc(int size) 427{ 428 if (virtual_grf_array_size <= virtual_grf_count) { 429 if (virtual_grf_array_size == 0) 430 virtual_grf_array_size = 16; 431 else 432 virtual_grf_array_size *= 2; 433 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int, 434 virtual_grf_array_size); 435 } 436 virtual_grf_sizes[virtual_grf_count] = size; 437 return virtual_grf_count++; 438} 439 440src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type) 441{ 442 init(); 443 444 this->file = GRF; 445 this->reg = v->virtual_grf_alloc(type_size(type)); 446 447 if (type->is_array() || type->is_record()) { 448 this->swizzle = BRW_SWIZZLE_NOOP; 449 } else { 450 this->swizzle = swizzle_for_size(type->vector_elements); 451 } 452 453 this->type = brw_type_for_base_type(type); 454} 455 456dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type) 457{ 458 init(); 459 460 this->file = GRF; 461 this->reg = v->virtual_grf_alloc(type_size(type)); 462 463 if (type->is_array() || type->is_record()) { 464 this->writemask = WRITEMASK_XYZW; 465 } else { 466 this->writemask = (1 << type->vector_elements) - 1; 467 } 468 469 this->type = brw_type_for_base_type(type); 470} 471 472/* Our support for uniforms is piggy-backed on the struct 473 * gl_fragment_program, because that's where the values actually 474 * get stored, rather than in some global gl_shader_program uniform 475 * store. 476 */ 477int 478vec4_visitor::setup_uniform_values(int loc, const glsl_type *type) 479{ 480 unsigned int offset = 0; 481 float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f; 482 483 if (type->is_matrix()) { 484 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT, 485 type->vector_elements, 486 1); 487 488 for (unsigned int i = 0; i < type->matrix_columns; i++) { 489 offset += setup_uniform_values(loc + offset, column); 490 } 491 492 return offset; 493 } 494 495 switch (type->base_type) { 496 case GLSL_TYPE_FLOAT: 497 case GLSL_TYPE_UINT: 498 case GLSL_TYPE_INT: 499 case GLSL_TYPE_BOOL: 500 for (unsigned int i = 0; i < type->vector_elements; i++) { 501 c->prog_data.param[this->uniforms * 4 + i] = &values[i]; 502 } 503 504 /* Set up pad elements to get things aligned to a vec4 boundary. */ 505 for (unsigned int i = type->vector_elements; i < 4; i++) { 506 static float zero = 0; 507 508 c->prog_data.param[this->uniforms * 4 + i] = &zero; 509 } 510 511 /* Track the size of this uniform vector, for future packing of 512 * uniforms. 513 */ 514 this->uniform_vector_size[this->uniforms] = type->vector_elements; 515 this->uniforms++; 516 517 return 1; 518 519 case GLSL_TYPE_STRUCT: 520 for (unsigned int i = 0; i < type->length; i++) { 521 offset += setup_uniform_values(loc + offset, 522 type->fields.structure[i].type); 523 } 524 return offset; 525 526 case GLSL_TYPE_ARRAY: 527 for (unsigned int i = 0; i < type->length; i++) { 528 offset += setup_uniform_values(loc + offset, type->fields.array); 529 } 530 return offset; 531 532 case GLSL_TYPE_SAMPLER: 533 /* The sampler takes up a slot, but we don't use any values from it. */ 534 return 1; 535 536 default: 537 assert(!"not reached"); 538 return 0; 539 } 540} 541 542/* Our support for builtin uniforms is even scarier than non-builtin. 543 * It sits on top of the PROG_STATE_VAR parameters that are 544 * automatically updated from GL context state. 545 */ 546void 547vec4_visitor::setup_builtin_uniform_values(ir_variable *ir) 548{ 549 const ir_state_slot *const slots = ir->state_slots; 550 assert(ir->state_slots != NULL); 551 552 for (unsigned int i = 0; i < ir->num_state_slots; i++) { 553 /* This state reference has already been setup by ir_to_mesa, 554 * but we'll get the same index back here. We can reference 555 * ParameterValues directly, since unlike brw_fs.cpp, we never 556 * add new state references during compile. 557 */ 558 int index = _mesa_add_state_reference(this->vp->Base.Parameters, 559 (gl_state_index *)slots[i].tokens); 560 float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f; 561 562 this->uniform_vector_size[this->uniforms] = 0; 563 /* Add each of the unique swizzled channels of the element. 564 * This will end up matching the size of the glsl_type of this field. 565 */ 566 int last_swiz = -1; 567 for (unsigned int j = 0; j < 4; j++) { 568 int swiz = GET_SWZ(slots[i].swizzle, j); 569 last_swiz = swiz; 570 571 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz]; 572 if (swiz <= last_swiz) 573 this->uniform_vector_size[this->uniforms]++; 574 } 575 this->uniforms++; 576 } 577} 578 579dst_reg * 580vec4_visitor::variable_storage(ir_variable *var) 581{ 582 return (dst_reg *)hash_table_find(this->variable_ht, var); 583} 584 585void 586vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) 587{ 588 ir_expression *expr = ir->as_expression(); 589 590 *predicate = BRW_PREDICATE_NORMAL; 591 592 if (expr) { 593 src_reg op[2]; 594 vec4_instruction *inst; 595 596 assert(expr->get_num_operands() <= 2); 597 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 598 expr->operands[i]->accept(this); 599 op[i] = this->result; 600 } 601 602 switch (expr->operation) { 603 case ir_unop_logic_not: 604 inst = emit(AND(dst_null_d(), op[0], src_reg(1))); 605 inst->conditional_mod = BRW_CONDITIONAL_Z; 606 break; 607 608 case ir_binop_logic_xor: 609 inst = emit(XOR(dst_null_d(), op[0], op[1])); 610 inst->conditional_mod = BRW_CONDITIONAL_NZ; 611 break; 612 613 case ir_binop_logic_or: 614 inst = emit(OR(dst_null_d(), op[0], op[1])); 615 inst->conditional_mod = BRW_CONDITIONAL_NZ; 616 break; 617 618 case ir_binop_logic_and: 619 inst = emit(AND(dst_null_d(), op[0], op[1])); 620 inst->conditional_mod = BRW_CONDITIONAL_NZ; 621 break; 622 623 case ir_unop_f2b: 624 if (intel->gen >= 6) { 625 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 626 } else { 627 inst = emit(MOV(dst_null_f(), op[0])); 628 inst->conditional_mod = BRW_CONDITIONAL_NZ; 629 } 630 break; 631 632 case ir_unop_i2b: 633 if (intel->gen >= 6) { 634 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 635 } else { 636 inst = emit(MOV(dst_null_d(), op[0])); 637 inst->conditional_mod = BRW_CONDITIONAL_NZ; 638 } 639 break; 640 641 case ir_binop_all_equal: 642 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 643 *predicate = BRW_PREDICATE_ALIGN16_ALL4H; 644 break; 645 646 case ir_binop_any_nequal: 647 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 648 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 649 break; 650 651 case ir_unop_any: 652 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 653 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 654 break; 655 656 case ir_binop_greater: 657 case ir_binop_gequal: 658 case ir_binop_less: 659 case ir_binop_lequal: 660 case ir_binop_equal: 661 case ir_binop_nequal: 662 emit(CMP(dst_null_d(), op[0], op[1], 663 brw_conditional_for_comparison(expr->operation))); 664 break; 665 666 default: 667 assert(!"not reached"); 668 break; 669 } 670 return; 671 } 672 673 ir->accept(this); 674 675 if (intel->gen >= 6) { 676 vec4_instruction *inst = emit(AND(dst_null_d(), 677 this->result, src_reg(1))); 678 inst->conditional_mod = BRW_CONDITIONAL_NZ; 679 } else { 680 vec4_instruction *inst = emit(MOV(dst_null_d(), this->result)); 681 inst->conditional_mod = BRW_CONDITIONAL_NZ; 682 } 683} 684 685/** 686 * Emit a gen6 IF statement with the comparison folded into the IF 687 * instruction. 688 */ 689void 690vec4_visitor::emit_if_gen6(ir_if *ir) 691{ 692 ir_expression *expr = ir->condition->as_expression(); 693 694 if (expr) { 695 src_reg op[2]; 696 dst_reg temp; 697 698 assert(expr->get_num_operands() <= 2); 699 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 700 expr->operands[i]->accept(this); 701 op[i] = this->result; 702 } 703 704 switch (expr->operation) { 705 case ir_unop_logic_not: 706 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z)); 707 return; 708 709 case ir_binop_logic_xor: 710 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ)); 711 return; 712 713 case ir_binop_logic_or: 714 temp = dst_reg(this, glsl_type::bool_type); 715 emit(OR(temp, op[0], op[1])); 716 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 717 return; 718 719 case ir_binop_logic_and: 720 temp = dst_reg(this, glsl_type::bool_type); 721 emit(AND(temp, op[0], op[1])); 722 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 723 return; 724 725 case ir_unop_f2b: 726 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 727 return; 728 729 case ir_unop_i2b: 730 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 731 return; 732 733 case ir_binop_greater: 734 case ir_binop_gequal: 735 case ir_binop_less: 736 case ir_binop_lequal: 737 case ir_binop_equal: 738 case ir_binop_nequal: 739 emit(IF(op[0], op[1], 740 brw_conditional_for_comparison(expr->operation))); 741 return; 742 743 case ir_binop_all_equal: 744 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 745 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H)); 746 return; 747 748 case ir_binop_any_nequal: 749 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 750 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 751 return; 752 753 case ir_unop_any: 754 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 755 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 756 return; 757 758 default: 759 assert(!"not reached"); 760 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 761 return; 762 } 763 return; 764 } 765 766 ir->condition->accept(this); 767 768 emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ)); 769} 770 771void 772vec4_visitor::visit(ir_variable *ir) 773{ 774 dst_reg *reg = NULL; 775 776 if (variable_storage(ir)) 777 return; 778 779 switch (ir->mode) { 780 case ir_var_in: 781 reg = new(mem_ctx) dst_reg(ATTR, ir->location); 782 783 /* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED attributes 784 * come in as floating point conversions of the integer values. 785 */ 786 for (int i = ir->location; i < ir->location + type_size(ir->type); i++) { 787 if (!c->key.gl_fixed_input_size[i]) 788 continue; 789 790 dst_reg dst = *reg; 791 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1; 792 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f))); 793 } 794 break; 795 796 case ir_var_out: 797 reg = new(mem_ctx) dst_reg(this, ir->type); 798 799 for (int i = 0; i < type_size(ir->type); i++) { 800 output_reg[ir->location + i] = *reg; 801 output_reg[ir->location + i].reg_offset = i; 802 output_reg[ir->location + i].type = BRW_REGISTER_TYPE_F; 803 } 804 break; 805 806 case ir_var_auto: 807 case ir_var_temporary: 808 reg = new(mem_ctx) dst_reg(this, ir->type); 809 break; 810 811 case ir_var_uniform: 812 reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms); 813 814 /* Track how big the whole uniform variable is, in case we need to put a 815 * copy of its data into pull constants for array access. 816 */ 817 this->uniform_size[this->uniforms] = type_size(ir->type); 818 819 if (!strncmp(ir->name, "gl_", 3)) { 820 setup_builtin_uniform_values(ir); 821 } else { 822 setup_uniform_values(ir->location, ir->type); 823 } 824 break; 825 826 default: 827 assert(!"not reached"); 828 } 829 830 reg->type = brw_type_for_base_type(ir->type); 831 hash_table_insert(this->variable_ht, reg, ir); 832} 833 834void 835vec4_visitor::visit(ir_loop *ir) 836{ 837 dst_reg counter; 838 839 /* We don't want debugging output to print the whole body of the 840 * loop as the annotation. 841 */ 842 this->base_ir = NULL; 843 844 if (ir->counter != NULL) { 845 this->base_ir = ir->counter; 846 ir->counter->accept(this); 847 counter = *(variable_storage(ir->counter)); 848 849 if (ir->from != NULL) { 850 this->base_ir = ir->from; 851 ir->from->accept(this); 852 853 emit(MOV(counter, this->result)); 854 } 855 } 856 857 emit(BRW_OPCODE_DO); 858 859 if (ir->to) { 860 this->base_ir = ir->to; 861 ir->to->accept(this); 862 863 emit(CMP(dst_null_d(), src_reg(counter), this->result, 864 brw_conditional_for_comparison(ir->cmp))); 865 866 vec4_instruction *inst = emit(BRW_OPCODE_BREAK); 867 inst->predicate = BRW_PREDICATE_NORMAL; 868 } 869 870 visit_instructions(&ir->body_instructions); 871 872 873 if (ir->increment) { 874 this->base_ir = ir->increment; 875 ir->increment->accept(this); 876 emit(ADD(counter, src_reg(counter), this->result)); 877 } 878 879 emit(BRW_OPCODE_WHILE); 880} 881 882void 883vec4_visitor::visit(ir_loop_jump *ir) 884{ 885 switch (ir->mode) { 886 case ir_loop_jump::jump_break: 887 emit(BRW_OPCODE_BREAK); 888 break; 889 case ir_loop_jump::jump_continue: 890 emit(BRW_OPCODE_CONTINUE); 891 break; 892 } 893} 894 895 896void 897vec4_visitor::visit(ir_function_signature *ir) 898{ 899 assert(0); 900 (void)ir; 901} 902 903void 904vec4_visitor::visit(ir_function *ir) 905{ 906 /* Ignore function bodies other than main() -- we shouldn't see calls to 907 * them since they should all be inlined. 908 */ 909 if (strcmp(ir->name, "main") == 0) { 910 const ir_function_signature *sig; 911 exec_list empty; 912 913 sig = ir->matching_signature(&empty); 914 915 assert(sig); 916 917 visit_instructions(&sig->body); 918 } 919} 920 921GLboolean 922vec4_visitor::try_emit_sat(ir_expression *ir) 923{ 924 ir_rvalue *sat_src = ir->as_rvalue_to_saturate(); 925 if (!sat_src) 926 return false; 927 928 sat_src->accept(this); 929 src_reg src = this->result; 930 931 this->result = src_reg(this, ir->type); 932 vec4_instruction *inst; 933 inst = emit(MOV(dst_reg(this->result), src)); 934 inst->saturate = true; 935 936 return true; 937} 938 939void 940vec4_visitor::emit_bool_comparison(unsigned int op, 941 dst_reg dst, src_reg src0, src_reg src1) 942{ 943 /* original gen4 does destination conversion before comparison. */ 944 if (intel->gen < 5) 945 dst.type = src0.type; 946 947 emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op))); 948 949 dst.type = BRW_REGISTER_TYPE_D; 950 emit(AND(dst, src_reg(dst), src_reg(0x1))); 951} 952 953void 954vec4_visitor::visit(ir_expression *ir) 955{ 956 unsigned int operand; 957 src_reg op[Elements(ir->operands)]; 958 src_reg result_src; 959 dst_reg result_dst; 960 vec4_instruction *inst; 961 962 if (try_emit_sat(ir)) 963 return; 964 965 for (operand = 0; operand < ir->get_num_operands(); operand++) { 966 this->result.file = BAD_FILE; 967 ir->operands[operand]->accept(this); 968 if (this->result.file == BAD_FILE) { 969 printf("Failed to get tree for expression operand:\n"); 970 ir->operands[operand]->print(); 971 exit(1); 972 } 973 op[operand] = this->result; 974 975 /* Matrix expression operands should have been broken down to vector 976 * operations already. 977 */ 978 assert(!ir->operands[operand]->type->is_matrix()); 979 } 980 981 int vector_elements = ir->operands[0]->type->vector_elements; 982 if (ir->operands[1]) { 983 vector_elements = MAX2(vector_elements, 984 ir->operands[1]->type->vector_elements); 985 } 986 987 this->result.file = BAD_FILE; 988 989 /* Storage for our result. Ideally for an assignment we'd be using 990 * the actual storage for the result here, instead. 991 */ 992 result_src = src_reg(this, ir->type); 993 /* convenience for the emit functions below. */ 994 result_dst = dst_reg(result_src); 995 /* If nothing special happens, this is the result. */ 996 this->result = result_src; 997 /* Limit writes to the channels that will be used by result_src later. 998 * This does limit this temp's use as a temporary for multi-instruction 999 * sequences. 1000 */ 1001 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1002 1003 switch (ir->operation) { 1004 case ir_unop_logic_not: 1005 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 1006 * ones complement of the whole register, not just bit 0. 1007 */ 1008 emit(XOR(result_dst, op[0], src_reg(1))); 1009 break; 1010 case ir_unop_neg: 1011 op[0].negate = !op[0].negate; 1012 this->result = op[0]; 1013 break; 1014 case ir_unop_abs: 1015 op[0].abs = true; 1016 op[0].negate = false; 1017 this->result = op[0]; 1018 break; 1019 1020 case ir_unop_sign: 1021 emit(MOV(result_dst, src_reg(0.0f))); 1022 1023 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G)); 1024 inst = emit(MOV(result_dst, src_reg(1.0f))); 1025 inst->predicate = BRW_PREDICATE_NORMAL; 1026 1027 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L)); 1028 inst = emit(MOV(result_dst, src_reg(-1.0f))); 1029 inst->predicate = BRW_PREDICATE_NORMAL; 1030 1031 break; 1032 1033 case ir_unop_rcp: 1034 emit_math(SHADER_OPCODE_RCP, result_dst, op[0]); 1035 break; 1036 1037 case ir_unop_exp2: 1038 emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]); 1039 break; 1040 case ir_unop_log2: 1041 emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]); 1042 break; 1043 case ir_unop_exp: 1044 case ir_unop_log: 1045 assert(!"not reached: should be handled by ir_explog_to_explog2"); 1046 break; 1047 case ir_unop_sin: 1048 case ir_unop_sin_reduced: 1049 emit_math(SHADER_OPCODE_SIN, result_dst, op[0]); 1050 break; 1051 case ir_unop_cos: 1052 case ir_unop_cos_reduced: 1053 emit_math(SHADER_OPCODE_COS, result_dst, op[0]); 1054 break; 1055 1056 case ir_unop_dFdx: 1057 case ir_unop_dFdy: 1058 assert(!"derivatives not valid in vertex shader"); 1059 break; 1060 1061 case ir_unop_noise: 1062 assert(!"not reached: should be handled by lower_noise"); 1063 break; 1064 1065 case ir_binop_add: 1066 emit(ADD(result_dst, op[0], op[1])); 1067 break; 1068 case ir_binop_sub: 1069 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 1070 break; 1071 1072 case ir_binop_mul: 1073 if (ir->type->is_integer()) { 1074 /* For integer multiplication, the MUL uses the low 16 bits 1075 * of one of the operands (src0 on gen6, src1 on gen7). The 1076 * MACH accumulates in the contribution of the upper 16 bits 1077 * of that operand. 1078 * 1079 * FINISHME: Emit just the MUL if we know an operand is small 1080 * enough. 1081 */ 1082 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 1083 1084 emit(MUL(acc, op[0], op[1])); 1085 emit(MACH(dst_null_d(), op[0], op[1])); 1086 emit(MOV(result_dst, src_reg(acc))); 1087 } else { 1088 emit(MUL(result_dst, op[0], op[1])); 1089 } 1090 break; 1091 case ir_binop_div: 1092 assert(!"not reached: should be handled by ir_div_to_mul_rcp"); 1093 case ir_binop_mod: 1094 assert(!"ir_binop_mod should have been converted to b * fract(a/b)"); 1095 break; 1096 1097 case ir_binop_less: 1098 case ir_binop_greater: 1099 case ir_binop_lequal: 1100 case ir_binop_gequal: 1101 case ir_binop_equal: 1102 case ir_binop_nequal: { 1103 emit(CMP(result_dst, op[0], op[1], 1104 brw_conditional_for_comparison(ir->operation))); 1105 emit(AND(result_dst, result_src, src_reg(0x1))); 1106 break; 1107 } 1108 1109 case ir_binop_all_equal: 1110 /* "==" operator producing a scalar boolean. */ 1111 if (ir->operands[0]->type->is_vector() || 1112 ir->operands[1]->type->is_vector()) { 1113 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 1114 emit(MOV(result_dst, src_reg(0))); 1115 inst = emit(MOV(result_dst, src_reg(1))); 1116 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H; 1117 } else { 1118 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z)); 1119 emit(AND(result_dst, result_src, src_reg(0x1))); 1120 } 1121 break; 1122 case ir_binop_any_nequal: 1123 /* "!=" operator producing a scalar boolean. */ 1124 if (ir->operands[0]->type->is_vector() || 1125 ir->operands[1]->type->is_vector()) { 1126 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 1127 1128 emit(MOV(result_dst, src_reg(0))); 1129 inst = emit(MOV(result_dst, src_reg(1))); 1130 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1131 } else { 1132 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ)); 1133 emit(AND(result_dst, result_src, src_reg(0x1))); 1134 } 1135 break; 1136 1137 case ir_unop_any: 1138 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 1139 emit(MOV(result_dst, src_reg(0))); 1140 1141 inst = emit(MOV(result_dst, src_reg(1))); 1142 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1143 break; 1144 1145 case ir_binop_logic_xor: 1146 emit(XOR(result_dst, op[0], op[1])); 1147 break; 1148 1149 case ir_binop_logic_or: 1150 emit(OR(result_dst, op[0], op[1])); 1151 break; 1152 1153 case ir_binop_logic_and: 1154 emit(AND(result_dst, op[0], op[1])); 1155 break; 1156 1157 case ir_binop_dot: 1158 assert(ir->operands[0]->type->is_vector()); 1159 assert(ir->operands[0]->type == ir->operands[1]->type); 1160 emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements); 1161 break; 1162 1163 case ir_unop_sqrt: 1164 emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]); 1165 break; 1166 case ir_unop_rsq: 1167 emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]); 1168 break; 1169 case ir_unop_i2f: 1170 case ir_unop_i2u: 1171 case ir_unop_u2i: 1172 case ir_unop_u2f: 1173 case ir_unop_b2f: 1174 case ir_unop_b2i: 1175 case ir_unop_f2i: 1176 emit(MOV(result_dst, op[0])); 1177 break; 1178 case ir_unop_f2b: 1179 case ir_unop_i2b: { 1180 emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 1181 emit(AND(result_dst, result_src, src_reg(1))); 1182 break; 1183 } 1184 1185 case ir_unop_trunc: 1186 emit(RNDZ(result_dst, op[0])); 1187 break; 1188 case ir_unop_ceil: 1189 op[0].negate = !op[0].negate; 1190 inst = emit(RNDD(result_dst, op[0])); 1191 this->result.negate = true; 1192 break; 1193 case ir_unop_floor: 1194 inst = emit(RNDD(result_dst, op[0])); 1195 break; 1196 case ir_unop_fract: 1197 inst = emit(FRC(result_dst, op[0])); 1198 break; 1199 case ir_unop_round_even: 1200 emit(RNDE(result_dst, op[0])); 1201 break; 1202 1203 case ir_binop_min: 1204 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L)); 1205 1206 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1207 inst->predicate = BRW_PREDICATE_NORMAL; 1208 break; 1209 case ir_binop_max: 1210 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G)); 1211 1212 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1213 inst->predicate = BRW_PREDICATE_NORMAL; 1214 break; 1215 1216 case ir_binop_pow: 1217 emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]); 1218 break; 1219 1220 case ir_unop_bit_not: 1221 inst = emit(NOT(result_dst, op[0])); 1222 break; 1223 case ir_binop_bit_and: 1224 inst = emit(AND(result_dst, op[0], op[1])); 1225 break; 1226 case ir_binop_bit_xor: 1227 inst = emit(XOR(result_dst, op[0], op[1])); 1228 break; 1229 case ir_binop_bit_or: 1230 inst = emit(OR(result_dst, op[0], op[1])); 1231 break; 1232 1233 case ir_binop_lshift: 1234 case ir_binop_rshift: 1235 assert(!"GLSL 1.30 features unsupported"); 1236 break; 1237 1238 case ir_quadop_vector: 1239 assert(!"not reached: should be handled by lower_quadop_vector"); 1240 break; 1241 } 1242} 1243 1244 1245void 1246vec4_visitor::visit(ir_swizzle *ir) 1247{ 1248 src_reg src; 1249 int i = 0; 1250 int swizzle[4]; 1251 1252 /* Note that this is only swizzles in expressions, not those on the left 1253 * hand side of an assignment, which do write masking. See ir_assignment 1254 * for that. 1255 */ 1256 1257 ir->val->accept(this); 1258 src = this->result; 1259 assert(src.file != BAD_FILE); 1260 1261 for (i = 0; i < ir->type->vector_elements; i++) { 1262 switch (i) { 1263 case 0: 1264 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x); 1265 break; 1266 case 1: 1267 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y); 1268 break; 1269 case 2: 1270 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z); 1271 break; 1272 case 3: 1273 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w); 1274 break; 1275 } 1276 } 1277 for (; i < 4; i++) { 1278 /* Replicate the last channel out. */ 1279 swizzle[i] = swizzle[ir->type->vector_elements - 1]; 1280 } 1281 1282 src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); 1283 1284 this->result = src; 1285} 1286 1287void 1288vec4_visitor::visit(ir_dereference_variable *ir) 1289{ 1290 const struct glsl_type *type = ir->type; 1291 dst_reg *reg = variable_storage(ir->var); 1292 1293 if (!reg) { 1294 fail("Failed to find variable storage for %s\n", ir->var->name); 1295 this->result = src_reg(brw_null_reg()); 1296 return; 1297 } 1298 1299 this->result = src_reg(*reg); 1300 1301 if (type->is_scalar() || type->is_vector() || type->is_matrix()) 1302 this->result.swizzle = swizzle_for_size(type->vector_elements); 1303} 1304 1305void 1306vec4_visitor::visit(ir_dereference_array *ir) 1307{ 1308 ir_constant *constant_index; 1309 src_reg src; 1310 int element_size = type_size(ir->type); 1311 1312 constant_index = ir->array_index->constant_expression_value(); 1313 1314 ir->array->accept(this); 1315 src = this->result; 1316 1317 if (constant_index) { 1318 src.reg_offset += constant_index->value.i[0] * element_size; 1319 } else { 1320 /* Variable index array dereference. It eats the "vec4" of the 1321 * base of the array and an index that offsets the Mesa register 1322 * index. 1323 */ 1324 ir->array_index->accept(this); 1325 1326 src_reg index_reg; 1327 1328 if (element_size == 1) { 1329 index_reg = this->result; 1330 } else { 1331 index_reg = src_reg(this, glsl_type::int_type); 1332 1333 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size))); 1334 } 1335 1336 if (src.reladdr) { 1337 src_reg temp = src_reg(this, glsl_type::int_type); 1338 1339 emit(ADD(dst_reg(temp), *src.reladdr, index_reg)); 1340 1341 index_reg = temp; 1342 } 1343 1344 src.reladdr = ralloc(mem_ctx, src_reg); 1345 memcpy(src.reladdr, &index_reg, sizeof(index_reg)); 1346 } 1347 1348 /* If the type is smaller than a vec4, replicate the last channel out. */ 1349 if (ir->type->is_scalar() || ir->type->is_vector()) 1350 src.swizzle = swizzle_for_size(ir->type->vector_elements); 1351 else 1352 src.swizzle = BRW_SWIZZLE_NOOP; 1353 src.type = brw_type_for_base_type(ir->type); 1354 1355 this->result = src; 1356} 1357 1358void 1359vec4_visitor::visit(ir_dereference_record *ir) 1360{ 1361 unsigned int i; 1362 const glsl_type *struct_type = ir->record->type; 1363 int offset = 0; 1364 1365 ir->record->accept(this); 1366 1367 for (i = 0; i < struct_type->length; i++) { 1368 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 1369 break; 1370 offset += type_size(struct_type->fields.structure[i].type); 1371 } 1372 1373 /* If the type is smaller than a vec4, replicate the last channel out. */ 1374 if (ir->type->is_scalar() || ir->type->is_vector()) 1375 this->result.swizzle = swizzle_for_size(ir->type->vector_elements); 1376 else 1377 this->result.swizzle = BRW_SWIZZLE_NOOP; 1378 this->result.type = brw_type_for_base_type(ir->type); 1379 1380 this->result.reg_offset += offset; 1381} 1382 1383/** 1384 * We want to be careful in assignment setup to hit the actual storage 1385 * instead of potentially using a temporary like we might with the 1386 * ir_dereference handler. 1387 */ 1388static dst_reg 1389get_assignment_lhs(ir_dereference *ir, vec4_visitor *v) 1390{ 1391 /* The LHS must be a dereference. If the LHS is a variable indexed array 1392 * access of a vector, it must be separated into a series conditional moves 1393 * before reaching this point (see ir_vec_index_to_cond_assign). 1394 */ 1395 assert(ir->as_dereference()); 1396 ir_dereference_array *deref_array = ir->as_dereference_array(); 1397 if (deref_array) { 1398 assert(!deref_array->array->type->is_vector()); 1399 } 1400 1401 /* Use the rvalue deref handler for the most part. We'll ignore 1402 * swizzles in it and write swizzles using writemask, though. 1403 */ 1404 ir->accept(v); 1405 return dst_reg(v->result); 1406} 1407 1408void 1409vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src, 1410 const struct glsl_type *type, uint32_t predicate) 1411{ 1412 if (type->base_type == GLSL_TYPE_STRUCT) { 1413 for (unsigned int i = 0; i < type->length; i++) { 1414 emit_block_move(dst, src, type->fields.structure[i].type, predicate); 1415 } 1416 return; 1417 } 1418 1419 if (type->is_array()) { 1420 for (unsigned int i = 0; i < type->length; i++) { 1421 emit_block_move(dst, src, type->fields.array, predicate); 1422 } 1423 return; 1424 } 1425 1426 if (type->is_matrix()) { 1427 const struct glsl_type *vec_type; 1428 1429 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 1430 type->vector_elements, 1); 1431 1432 for (int i = 0; i < type->matrix_columns; i++) { 1433 emit_block_move(dst, src, vec_type, predicate); 1434 } 1435 return; 1436 } 1437 1438 assert(type->is_scalar() || type->is_vector()); 1439 1440 dst->type = brw_type_for_base_type(type); 1441 src->type = dst->type; 1442 1443 dst->writemask = (1 << type->vector_elements) - 1; 1444 1445 /* Do we need to worry about swizzling a swizzle? */ 1446 assert(src->swizzle = BRW_SWIZZLE_NOOP); 1447 src->swizzle = swizzle_for_size(type->vector_elements); 1448 1449 vec4_instruction *inst = emit(MOV(*dst, *src)); 1450 inst->predicate = predicate; 1451 1452 dst->reg_offset++; 1453 src->reg_offset++; 1454} 1455 1456 1457/* If the RHS processing resulted in an instruction generating a 1458 * temporary value, and it would be easy to rewrite the instruction to 1459 * generate its result right into the LHS instead, do so. This ends 1460 * up reliably removing instructions where it can be tricky to do so 1461 * later without real UD chain information. 1462 */ 1463bool 1464vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 1465 dst_reg dst, 1466 src_reg src, 1467 vec4_instruction *pre_rhs_inst, 1468 vec4_instruction *last_rhs_inst) 1469{ 1470 /* This could be supported, but it would take more smarts. */ 1471 if (ir->condition) 1472 return false; 1473 1474 if (pre_rhs_inst == last_rhs_inst) 1475 return false; /* No instructions generated to work with. */ 1476 1477 /* Make sure the last instruction generated our source reg. */ 1478 if (src.file != GRF || 1479 src.file != last_rhs_inst->dst.file || 1480 src.reg != last_rhs_inst->dst.reg || 1481 src.reg_offset != last_rhs_inst->dst.reg_offset || 1482 src.reladdr || 1483 src.abs || 1484 src.negate || 1485 last_rhs_inst->predicate != BRW_PREDICATE_NONE) 1486 return false; 1487 1488 /* Check that that last instruction fully initialized the channels 1489 * we want to use, in the order we want to use them. We could 1490 * potentially reswizzle the operands of many instructions so that 1491 * we could handle out of order channels, but don't yet. 1492 */ 1493 for (int i = 0; i < 4; i++) { 1494 if (dst.writemask & (1 << i)) { 1495 if (!(last_rhs_inst->dst.writemask & (1 << i))) 1496 return false; 1497 1498 if (BRW_GET_SWZ(src.swizzle, i) != i) 1499 return false; 1500 } 1501 } 1502 1503 /* Success! Rewrite the instruction. */ 1504 last_rhs_inst->dst.file = dst.file; 1505 last_rhs_inst->dst.reg = dst.reg; 1506 last_rhs_inst->dst.reg_offset = dst.reg_offset; 1507 last_rhs_inst->dst.reladdr = dst.reladdr; 1508 last_rhs_inst->dst.writemask &= dst.writemask; 1509 1510 return true; 1511} 1512 1513void 1514vec4_visitor::visit(ir_assignment *ir) 1515{ 1516 dst_reg dst = get_assignment_lhs(ir->lhs, this); 1517 uint32_t predicate = BRW_PREDICATE_NONE; 1518 1519 if (!ir->lhs->type->is_scalar() && 1520 !ir->lhs->type->is_vector()) { 1521 ir->rhs->accept(this); 1522 src_reg src = this->result; 1523 1524 if (ir->condition) { 1525 emit_bool_to_cond_code(ir->condition, &predicate); 1526 } 1527 1528 emit_block_move(&dst, &src, ir->rhs->type, predicate); 1529 return; 1530 } 1531 1532 /* Now we're down to just a scalar/vector with writemasks. */ 1533 int i; 1534 1535 vec4_instruction *pre_rhs_inst, *last_rhs_inst; 1536 pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1537 1538 ir->rhs->accept(this); 1539 1540 last_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1541 1542 src_reg src = this->result; 1543 1544 int swizzles[4]; 1545 int first_enabled_chan = 0; 1546 int src_chan = 0; 1547 1548 assert(ir->lhs->type->is_vector() || 1549 ir->lhs->type->is_scalar()); 1550 dst.writemask = ir->write_mask; 1551 1552 for (int i = 0; i < 4; i++) { 1553 if (dst.writemask & (1 << i)) { 1554 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i); 1555 break; 1556 } 1557 } 1558 1559 /* Swizzle a small RHS vector into the channels being written. 1560 * 1561 * glsl ir treats write_mask as dictating how many channels are 1562 * present on the RHS while in our instructions we need to make 1563 * those channels appear in the slots of the vec4 they're written to. 1564 */ 1565 for (int i = 0; i < 4; i++) { 1566 if (dst.writemask & (1 << i)) 1567 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++); 1568 else 1569 swizzles[i] = first_enabled_chan; 1570 } 1571 src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 1572 swizzles[2], swizzles[3]); 1573 1574 if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) { 1575 return; 1576 } 1577 1578 if (ir->condition) { 1579 emit_bool_to_cond_code(ir->condition, &predicate); 1580 } 1581 1582 for (i = 0; i < type_size(ir->lhs->type); i++) { 1583 vec4_instruction *inst = emit(MOV(dst, src)); 1584 inst->predicate = predicate; 1585 1586 dst.reg_offset++; 1587 src.reg_offset++; 1588 } 1589} 1590 1591void 1592vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir) 1593{ 1594 if (ir->type->base_type == GLSL_TYPE_STRUCT) { 1595 foreach_list(node, &ir->components) { 1596 ir_constant *field_value = (ir_constant *)node; 1597 1598 emit_constant_values(dst, field_value); 1599 } 1600 return; 1601 } 1602 1603 if (ir->type->is_array()) { 1604 for (unsigned int i = 0; i < ir->type->length; i++) { 1605 emit_constant_values(dst, ir->array_elements[i]); 1606 } 1607 return; 1608 } 1609 1610 if (ir->type->is_matrix()) { 1611 for (int i = 0; i < ir->type->matrix_columns; i++) { 1612 for (int j = 0; j < ir->type->vector_elements; j++) { 1613 dst->writemask = 1 << j; 1614 dst->type = BRW_REGISTER_TYPE_F; 1615 1616 emit(MOV(*dst, 1617 src_reg(ir->value.f[i * ir->type->vector_elements + j]))); 1618 } 1619 dst->reg_offset++; 1620 } 1621 return; 1622 } 1623 1624 for (int i = 0; i < ir->type->vector_elements; i++) { 1625 dst->writemask = 1 << i; 1626 dst->type = brw_type_for_base_type(ir->type); 1627 1628 switch (ir->type->base_type) { 1629 case GLSL_TYPE_FLOAT: 1630 emit(MOV(*dst, src_reg(ir->value.f[i]))); 1631 break; 1632 case GLSL_TYPE_INT: 1633 emit(MOV(*dst, src_reg(ir->value.i[i]))); 1634 break; 1635 case GLSL_TYPE_UINT: 1636 emit(MOV(*dst, src_reg(ir->value.u[i]))); 1637 break; 1638 case GLSL_TYPE_BOOL: 1639 emit(MOV(*dst, src_reg(ir->value.b[i]))); 1640 break; 1641 default: 1642 assert(!"Non-float/uint/int/bool constant"); 1643 break; 1644 } 1645 } 1646 dst->reg_offset++; 1647} 1648 1649void 1650vec4_visitor::visit(ir_constant *ir) 1651{ 1652 dst_reg dst = dst_reg(this, ir->type); 1653 this->result = src_reg(dst); 1654 1655 emit_constant_values(&dst, ir); 1656} 1657 1658void 1659vec4_visitor::visit(ir_call *ir) 1660{ 1661 assert(!"not reached"); 1662} 1663 1664void 1665vec4_visitor::visit(ir_texture *ir) 1666{ 1667 /* FINISHME: Implement vertex texturing. 1668 * 1669 * With 0 vertex samplers available, the linker will reject 1670 * programs that do vertex texturing, but after our visitor has 1671 * run. 1672 */ 1673} 1674 1675void 1676vec4_visitor::visit(ir_return *ir) 1677{ 1678 assert(!"not reached"); 1679} 1680 1681void 1682vec4_visitor::visit(ir_discard *ir) 1683{ 1684 assert(!"not reached"); 1685} 1686 1687void 1688vec4_visitor::visit(ir_if *ir) 1689{ 1690 /* Don't point the annotation at the if statement, because then it plus 1691 * the then and else blocks get printed. 1692 */ 1693 this->base_ir = ir->condition; 1694 1695 if (intel->gen == 6) { 1696 emit_if_gen6(ir); 1697 } else { 1698 uint32_t predicate; 1699 emit_bool_to_cond_code(ir->condition, &predicate); 1700 emit(IF(predicate)); 1701 } 1702 1703 visit_instructions(&ir->then_instructions); 1704 1705 if (!ir->else_instructions.is_empty()) { 1706 this->base_ir = ir->condition; 1707 emit(BRW_OPCODE_ELSE); 1708 1709 visit_instructions(&ir->else_instructions); 1710 } 1711 1712 this->base_ir = ir->condition; 1713 emit(BRW_OPCODE_ENDIF); 1714} 1715 1716void 1717vec4_visitor::emit_ndc_computation() 1718{ 1719 /* Get the position */ 1720 src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]); 1721 1722 /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */ 1723 dst_reg ndc = dst_reg(this, glsl_type::vec4_type); 1724 output_reg[BRW_VERT_RESULT_NDC] = ndc; 1725 1726 current_annotation = "NDC"; 1727 dst_reg ndc_w = ndc; 1728 ndc_w.writemask = WRITEMASK_W; 1729 src_reg pos_w = pos; 1730 pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W); 1731 emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w); 1732 1733 dst_reg ndc_xyz = ndc; 1734 ndc_xyz.writemask = WRITEMASK_XYZ; 1735 1736 emit(MUL(ndc_xyz, pos, src_reg(ndc_w))); 1737} 1738 1739void 1740vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) 1741{ 1742 if (intel->gen < 6 && 1743 ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) || 1744 c->key.nr_userclip || brw->has_negative_rhw_bug)) { 1745 dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); 1746 GLuint i; 1747 1748 emit(MOV(header1, 0u)); 1749 1750 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1751 assert(!"finishme: psiz"); 1752 src_reg psiz; 1753 1754 header1.writemask = WRITEMASK_W; 1755 emit(MUL(header1, psiz, 1u << 11)); 1756 emit(AND(header1, src_reg(header1), 0x7ff << 8)); 1757 } 1758 1759 for (i = 0; i < c->key.nr_userclip; i++) { 1760 vec4_instruction *inst; 1761 1762 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]), 1763 src_reg(c->userplane[i]))); 1764 inst->conditional_mod = BRW_CONDITIONAL_L; 1765 1766 emit(OR(header1, src_reg(header1), 1u << i)); 1767 inst->predicate = BRW_PREDICATE_NORMAL; 1768 } 1769 1770 /* i965 clipping workaround: 1771 * 1) Test for -ve rhw 1772 * 2) If set, 1773 * set ndc = (0,0,0,0) 1774 * set ucp[6] = 1 1775 * 1776 * Later, clipping will detect ucp[6] and ensure the primitive is 1777 * clipped against all fixed planes. 1778 */ 1779 if (brw->has_negative_rhw_bug) { 1780#if 0 1781 /* FINISHME */ 1782 brw_CMP(p, 1783 vec8(brw_null_reg()), 1784 BRW_CONDITIONAL_L, 1785 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3), 1786 brw_imm_f(0)); 1787 1788 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6)); 1789 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0)); 1790 brw_set_predicate_control(p, BRW_PREDICATE_NONE); 1791#endif 1792 } 1793 1794 header1.writemask = WRITEMASK_XYZW; 1795 emit(MOV(reg, src_reg(header1))); 1796 } else if (intel->gen < 6) { 1797 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); 1798 } else { 1799 emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); 1800 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1801 emit(MOV(brw_writemask(reg, WRITEMASK_W), 1802 src_reg(output_reg[VERT_RESULT_PSIZ]))); 1803 } 1804 } 1805} 1806 1807void 1808vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset) 1809{ 1810 if (intel->gen < 6) { 1811 /* Clip distance slots are set aside in gen5, but they are not used. It 1812 * is not clear whether we actually need to set aside space for them, 1813 * but the performance cost is negligible. 1814 */ 1815 return; 1816 } 1817 1818 for (int i = 0; i + offset < c->key.nr_userclip && i < 4; ++i) { 1819 emit(DP4(dst_reg(brw_writemask(reg, 1 << i)), 1820 src_reg(output_reg[VERT_RESULT_HPOS]), 1821 src_reg(c->userplane[i + offset]))); 1822 } 1823} 1824 1825void 1826vec4_visitor::emit_urb_slot(int mrf, int vert_result) 1827{ 1828 struct brw_reg reg = brw_message_reg(mrf); 1829 1830 switch (vert_result) { 1831 case VERT_RESULT_PSIZ: 1832 /* PSIZ is always in slot 0, and is coupled with other flags. */ 1833 current_annotation = "indices, point width, clip flags"; 1834 emit_psiz_and_flags(reg); 1835 break; 1836 case BRW_VERT_RESULT_NDC: 1837 current_annotation = "NDC"; 1838 emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC]))); 1839 break; 1840 case BRW_VERT_RESULT_HPOS_DUPLICATE: 1841 case VERT_RESULT_HPOS: 1842 current_annotation = "gl_Position"; 1843 emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS]))); 1844 break; 1845 case BRW_VERT_RESULT_CLIP0: 1846 current_annotation = "user clip distances"; 1847 emit_clip_distances(reg, 0); 1848 break; 1849 case BRW_VERT_RESULT_CLIP1: 1850 current_annotation = "user clip distances"; 1851 emit_clip_distances(reg, 4); 1852 break; 1853 case BRW_VERT_RESULT_PAD: 1854 /* No need to write to this slot */ 1855 break; 1856 default: { 1857 assert (vert_result < VERT_RESULT_MAX); 1858 current_annotation = NULL; 1859 /* Copy the register, saturating if necessary */ 1860 vec4_instruction *inst = emit(MOV(reg, 1861 src_reg(output_reg[vert_result]))); 1862 if ((vert_result == VERT_RESULT_COL0 || 1863 vert_result == VERT_RESULT_COL1 || 1864 vert_result == VERT_RESULT_BFC0 || 1865 vert_result == VERT_RESULT_BFC1) && 1866 c->key.clamp_vertex_color) { 1867 inst->saturate = true; 1868 } 1869 } 1870 break; 1871 } 1872} 1873 1874static int 1875align_interleaved_urb_mlen(struct brw_context *brw, int mlen) 1876{ 1877 struct intel_context *intel = &brw->intel; 1878 1879 if (intel->gen >= 6) { 1880 /* URB data written (does not include the message header reg) must 1881 * be a multiple of 256 bits, or 2 VS registers. See vol5c.5, 1882 * section 5.4.3.2.2: URB_INTERLEAVED. 1883 * 1884 * URB entries are allocated on a multiple of 1024 bits, so an 1885 * extra 128 bits written here to make the end align to 256 is 1886 * no problem. 1887 */ 1888 if ((mlen % 2) != 1) 1889 mlen++; 1890 } 1891 1892 return mlen; 1893} 1894 1895/** 1896 * Generates the VUE payload plus the 1 or 2 URB write instructions to 1897 * complete the VS thread. 1898 * 1899 * The VUE layout is documented in Volume 2a. 1900 */ 1901void 1902vec4_visitor::emit_urb_writes() 1903{ 1904 /* MRF 0 is reserved for the debugger, so start with message header 1905 * in MRF 1. 1906 */ 1907 int base_mrf = 1; 1908 int mrf = base_mrf; 1909 /* In the process of generating our URB write message contents, we 1910 * may need to unspill a register or load from an array. Those 1911 * reads would use MRFs 14-15. 1912 */ 1913 int max_usable_mrf = 13; 1914 1915 /* The following assertion verifies that max_usable_mrf causes an 1916 * even-numbered amount of URB write data, which will meet gen6's 1917 * requirements for length alignment. 1918 */ 1919 assert ((max_usable_mrf - base_mrf) % 2 == 0); 1920 1921 /* FINISHME: edgeflag */ 1922 1923 brw_compute_vue_map(&c->vue_map, intel, c->key.nr_userclip, 1924 c->prog_data.outputs_written); 1925 1926 /* First mrf is the g0-based message header containing URB handles and such, 1927 * which is implied in VS_OPCODE_URB_WRITE. 1928 */ 1929 mrf++; 1930 1931 if (intel->gen < 6) { 1932 emit_ndc_computation(); 1933 } 1934 1935 /* Set up the VUE data for the first URB write */ 1936 int slot; 1937 for (slot = 0; slot < c->vue_map.num_slots; ++slot) { 1938 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 1939 1940 /* If this was max_usable_mrf, we can't fit anything more into this URB 1941 * WRITE. 1942 */ 1943 if (mrf > max_usable_mrf) { 1944 slot++; 1945 break; 1946 } 1947 } 1948 1949 vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE); 1950 inst->base_mrf = base_mrf; 1951 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 1952 inst->eot = (slot >= c->vue_map.num_slots); 1953 1954 /* Optional second URB write */ 1955 if (!inst->eot) { 1956 mrf = base_mrf + 1; 1957 1958 for (; slot < c->vue_map.num_slots; ++slot) { 1959 assert(mrf < max_usable_mrf); 1960 1961 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 1962 } 1963 1964 inst = emit(VS_OPCODE_URB_WRITE); 1965 inst->base_mrf = base_mrf; 1966 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 1967 inst->eot = true; 1968 /* URB destination offset. In the previous write, we got MRFs 1969 * 2-13 minus the one header MRF, so 12 regs. URB offset is in 1970 * URB row increments, and each of our MRFs is half of one of 1971 * those, since we're doing interleaved writes. 1972 */ 1973 inst->offset = (max_usable_mrf - base_mrf) / 2; 1974 } 1975 1976 if (intel->gen == 6) 1977 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 8) / 8; 1978 else 1979 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 4) / 4; 1980} 1981 1982src_reg 1983vec4_visitor::get_scratch_offset(vec4_instruction *inst, 1984 src_reg *reladdr, int reg_offset) 1985{ 1986 /* Because we store the values to scratch interleaved like our 1987 * vertex data, we need to scale the vec4 index by 2. 1988 */ 1989 int message_header_scale = 2; 1990 1991 /* Pre-gen6, the message header uses byte offsets instead of vec4 1992 * (16-byte) offset units. 1993 */ 1994 if (intel->gen < 6) 1995 message_header_scale *= 16; 1996 1997 if (reladdr) { 1998 src_reg index = src_reg(this, glsl_type::int_type); 1999 2000 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2001 emit_before(inst, MUL(dst_reg(index), 2002 index, src_reg(message_header_scale))); 2003 2004 return index; 2005 } else { 2006 return src_reg(reg_offset * message_header_scale); 2007 } 2008} 2009 2010src_reg 2011vec4_visitor::get_pull_constant_offset(vec4_instruction *inst, 2012 src_reg *reladdr, int reg_offset) 2013{ 2014 if (reladdr) { 2015 src_reg index = src_reg(this, glsl_type::int_type); 2016 2017 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2018 2019 /* Pre-gen6, the message header uses byte offsets instead of vec4 2020 * (16-byte) offset units. 2021 */ 2022 if (intel->gen < 6) { 2023 emit_before(inst, MUL(dst_reg(index), index, src_reg(16))); 2024 } 2025 2026 return index; 2027 } else { 2028 int message_header_scale = intel->gen < 6 ? 16 : 1; 2029 return src_reg(reg_offset * message_header_scale); 2030 } 2031} 2032 2033/** 2034 * Emits an instruction before @inst to load the value named by @orig_src 2035 * from scratch space at @base_offset to @temp. 2036 */ 2037void 2038vec4_visitor::emit_scratch_read(vec4_instruction *inst, 2039 dst_reg temp, src_reg orig_src, 2040 int base_offset) 2041{ 2042 int reg_offset = base_offset + orig_src.reg_offset; 2043 src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset); 2044 2045 emit_before(inst, SCRATCH_READ(temp, index)); 2046} 2047 2048/** 2049 * Emits an instruction after @inst to store the value to be written 2050 * to @orig_dst to scratch space at @base_offset, from @temp. 2051 */ 2052void 2053vec4_visitor::emit_scratch_write(vec4_instruction *inst, 2054 src_reg temp, dst_reg orig_dst, 2055 int base_offset) 2056{ 2057 int reg_offset = base_offset + orig_dst.reg_offset; 2058 src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset); 2059 2060 dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0), 2061 orig_dst.writemask)); 2062 vec4_instruction *write = SCRATCH_WRITE(dst, temp, index); 2063 write->predicate = inst->predicate; 2064 write->ir = inst->ir; 2065 write->annotation = inst->annotation; 2066 inst->insert_after(write); 2067} 2068 2069/** 2070 * We can't generally support array access in GRF space, because a 2071 * single instruction's destination can only span 2 contiguous 2072 * registers. So, we send all GRF arrays that get variable index 2073 * access to scratch space. 2074 */ 2075void 2076vec4_visitor::move_grf_array_access_to_scratch() 2077{ 2078 int scratch_loc[this->virtual_grf_count]; 2079 2080 for (int i = 0; i < this->virtual_grf_count; i++) { 2081 scratch_loc[i] = -1; 2082 } 2083 2084 /* First, calculate the set of virtual GRFs that need to be punted 2085 * to scratch due to having any array access on them, and where in 2086 * scratch. 2087 */ 2088 foreach_list(node, &this->instructions) { 2089 vec4_instruction *inst = (vec4_instruction *)node; 2090 2091 if (inst->dst.file == GRF && inst->dst.reladdr && 2092 scratch_loc[inst->dst.reg] == -1) { 2093 scratch_loc[inst->dst.reg] = c->last_scratch; 2094 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4; 2095 } 2096 2097 for (int i = 0 ; i < 3; i++) { 2098 src_reg *src = &inst->src[i]; 2099 2100 if (src->file == GRF && src->reladdr && 2101 scratch_loc[src->reg] == -1) { 2102 scratch_loc[src->reg] = c->last_scratch; 2103 c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4; 2104 } 2105 } 2106 } 2107 2108 /* Now, for anything that will be accessed through scratch, rewrite 2109 * it to load/store. Note that this is a _safe list walk, because 2110 * we may generate a new scratch_write instruction after the one 2111 * we're processing. 2112 */ 2113 foreach_list_safe(node, &this->instructions) { 2114 vec4_instruction *inst = (vec4_instruction *)node; 2115 2116 /* Set up the annotation tracking for new generated instructions. */ 2117 base_ir = inst->ir; 2118 current_annotation = inst->annotation; 2119 2120 if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) { 2121 src_reg temp = src_reg(this, glsl_type::vec4_type); 2122 2123 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]); 2124 2125 inst->dst.file = temp.file; 2126 inst->dst.reg = temp.reg; 2127 inst->dst.reg_offset = temp.reg_offset; 2128 inst->dst.reladdr = NULL; 2129 } 2130 2131 for (int i = 0 ; i < 3; i++) { 2132 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1) 2133 continue; 2134 2135 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2136 2137 emit_scratch_read(inst, temp, inst->src[i], 2138 scratch_loc[inst->src[i].reg]); 2139 2140 inst->src[i].file = temp.file; 2141 inst->src[i].reg = temp.reg; 2142 inst->src[i].reg_offset = temp.reg_offset; 2143 inst->src[i].reladdr = NULL; 2144 } 2145 } 2146} 2147 2148/** 2149 * Emits an instruction before @inst to load the value named by @orig_src 2150 * from the pull constant buffer (surface) at @base_offset to @temp. 2151 */ 2152void 2153vec4_visitor::emit_pull_constant_load(vec4_instruction *inst, 2154 dst_reg temp, src_reg orig_src, 2155 int base_offset) 2156{ 2157 int reg_offset = base_offset + orig_src.reg_offset; 2158 src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset); 2159 vec4_instruction *load; 2160 2161 load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD, 2162 temp, index); 2163 load->base_mrf = 14; 2164 load->mlen = 1; 2165 emit_before(inst, load); 2166} 2167 2168/** 2169 * Implements array access of uniforms by inserting a 2170 * PULL_CONSTANT_LOAD instruction. 2171 * 2172 * Unlike temporary GRF array access (where we don't support it due to 2173 * the difficulty of doing relative addressing on instruction 2174 * destinations), we could potentially do array access of uniforms 2175 * that were loaded in GRF space as push constants. In real-world 2176 * usage we've seen, though, the arrays being used are always larger 2177 * than we could load as push constants, so just always move all 2178 * uniform array access out to a pull constant buffer. 2179 */ 2180void 2181vec4_visitor::move_uniform_array_access_to_pull_constants() 2182{ 2183 int pull_constant_loc[this->uniforms]; 2184 2185 for (int i = 0; i < this->uniforms; i++) { 2186 pull_constant_loc[i] = -1; 2187 } 2188 2189 /* Walk through and find array access of uniforms. Put a copy of that 2190 * uniform in the pull constant buffer. 2191 * 2192 * Note that we don't move constant-indexed accesses to arrays. No 2193 * testing has been done of the performance impact of this choice. 2194 */ 2195 foreach_list_safe(node, &this->instructions) { 2196 vec4_instruction *inst = (vec4_instruction *)node; 2197 2198 for (int i = 0 ; i < 3; i++) { 2199 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr) 2200 continue; 2201 2202 int uniform = inst->src[i].reg; 2203 2204 /* If this array isn't already present in the pull constant buffer, 2205 * add it. 2206 */ 2207 if (pull_constant_loc[uniform] == -1) { 2208 const float **values = &prog_data->param[uniform * 4]; 2209 2210 pull_constant_loc[uniform] = prog_data->nr_pull_params; 2211 2212 for (int j = 0; j < uniform_size[uniform] * 4; j++) { 2213 prog_data->pull_param[prog_data->nr_pull_params++] = values[j]; 2214 } 2215 } 2216 2217 /* Set up the annotation tracking for new generated instructions. */ 2218 base_ir = inst->ir; 2219 current_annotation = inst->annotation; 2220 2221 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2222 2223 emit_pull_constant_load(inst, temp, inst->src[i], 2224 pull_constant_loc[uniform]); 2225 2226 inst->src[i].file = temp.file; 2227 inst->src[i].reg = temp.reg; 2228 inst->src[i].reg_offset = temp.reg_offset; 2229 inst->src[i].reladdr = NULL; 2230 } 2231 } 2232 2233 /* Now there are no accesses of the UNIFORM file with a reladdr, so 2234 * no need to track them as larger-than-vec4 objects. This will be 2235 * relied on in cutting out unused uniform vectors from push 2236 * constants. 2237 */ 2238 split_uniform_registers(); 2239} 2240 2241vec4_visitor::vec4_visitor(struct brw_vs_compile *c, 2242 struct gl_shader_program *prog, 2243 struct brw_shader *shader) 2244{ 2245 this->c = c; 2246 this->p = &c->func; 2247 this->brw = p->brw; 2248 this->intel = &brw->intel; 2249 this->ctx = &intel->ctx; 2250 this->prog = prog; 2251 this->shader = shader; 2252 2253 this->mem_ctx = ralloc_context(NULL); 2254 this->failed = false; 2255 2256 this->base_ir = NULL; 2257 this->current_annotation = NULL; 2258 2259 this->c = c; 2260 this->vp = prog->VertexProgram; 2261 this->prog_data = &c->prog_data; 2262 2263 this->variable_ht = hash_table_ctor(0, 2264 hash_table_pointer_hash, 2265 hash_table_pointer_compare); 2266 2267 this->virtual_grf_def = NULL; 2268 this->virtual_grf_use = NULL; 2269 this->virtual_grf_sizes = NULL; 2270 this->virtual_grf_count = 0; 2271 this->virtual_grf_array_size = 0; 2272 this->live_intervals_valid = false; 2273 2274 this->uniforms = 0; 2275 2276 this->variable_ht = hash_table_ctor(0, 2277 hash_table_pointer_hash, 2278 hash_table_pointer_compare); 2279} 2280 2281vec4_visitor::~vec4_visitor() 2282{ 2283 ralloc_free(this->mem_ctx); 2284 hash_table_dtor(this->variable_ht); 2285} 2286 2287 2288void 2289vec4_visitor::fail(const char *format, ...) 2290{ 2291 va_list va; 2292 char *msg; 2293 2294 if (failed) 2295 return; 2296 2297 failed = true; 2298 2299 va_start(va, format); 2300 msg = ralloc_vasprintf(mem_ctx, format, va); 2301 va_end(va); 2302 msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg); 2303 2304 this->fail_msg = msg; 2305 2306 if (INTEL_DEBUG & DEBUG_VS) { 2307 fprintf(stderr, "%s", msg); 2308 } 2309} 2310 2311} /* namespace brw */ 2312