brw_vec4_visitor.cpp revision d912669034eb7bf5c162358a7a574ec7a4c963c7
1/* 2 * Copyright © 2011 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "brw_vec4.h" 25extern "C" { 26#include "main/macros.h" 27#include "program/prog_parameter.h" 28} 29 30namespace brw { 31 32src_reg::src_reg(dst_reg reg) 33{ 34 init(); 35 36 this->file = reg.file; 37 this->reg = reg.reg; 38 this->reg_offset = reg.reg_offset; 39 this->type = reg.type; 40 this->reladdr = reg.reladdr; 41 this->fixed_hw_reg = reg.fixed_hw_reg; 42 43 int swizzles[4]; 44 int next_chan = 0; 45 int last = 0; 46 47 for (int i = 0; i < 4; i++) { 48 if (!(reg.writemask & (1 << i))) 49 continue; 50 51 swizzles[next_chan++] = last = i; 52 } 53 54 for (; next_chan < 4; next_chan++) { 55 swizzles[next_chan] = last; 56 } 57 58 this->swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 59 swizzles[2], swizzles[3]); 60} 61 62dst_reg::dst_reg(src_reg reg) 63{ 64 init(); 65 66 this->file = reg.file; 67 this->reg = reg.reg; 68 this->reg_offset = reg.reg_offset; 69 this->type = reg.type; 70 this->writemask = WRITEMASK_XYZW; 71 this->reladdr = reg.reladdr; 72 this->fixed_hw_reg = reg.fixed_hw_reg; 73} 74 75vec4_instruction::vec4_instruction(vec4_visitor *v, 76 enum opcode opcode, dst_reg dst, 77 src_reg src0, src_reg src1, src_reg src2) 78{ 79 this->opcode = opcode; 80 this->dst = dst; 81 this->src[0] = src0; 82 this->src[1] = src1; 83 this->src[2] = src2; 84 this->ir = v->base_ir; 85 this->annotation = v->current_annotation; 86} 87 88vec4_instruction * 89vec4_visitor::emit(vec4_instruction *inst) 90{ 91 this->instructions.push_tail(inst); 92 93 return inst; 94} 95 96vec4_instruction * 97vec4_visitor::emit_before(vec4_instruction *inst, vec4_instruction *new_inst) 98{ 99 new_inst->ir = inst->ir; 100 new_inst->annotation = inst->annotation; 101 102 inst->insert_before(new_inst); 103 104 return inst; 105} 106 107vec4_instruction * 108vec4_visitor::emit(enum opcode opcode, dst_reg dst, 109 src_reg src0, src_reg src1, src_reg src2) 110{ 111 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, 112 src0, src1, src2)); 113} 114 115 116vec4_instruction * 117vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1) 118{ 119 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0, src1)); 120} 121 122vec4_instruction * 123vec4_visitor::emit(enum opcode opcode, dst_reg dst, src_reg src0) 124{ 125 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst, src0)); 126} 127 128vec4_instruction * 129vec4_visitor::emit(enum opcode opcode) 130{ 131 return emit(new(mem_ctx) vec4_instruction(this, opcode, dst_reg())); 132} 133 134#define ALU1(op) \ 135 vec4_instruction * \ 136 vec4_visitor::op(dst_reg dst, src_reg src0) \ 137 { \ 138 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 139 src0); \ 140 } 141 142#define ALU2(op) \ 143 vec4_instruction * \ 144 vec4_visitor::op(dst_reg dst, src_reg src0, src_reg src1) \ 145 { \ 146 return new(mem_ctx) vec4_instruction(this, BRW_OPCODE_##op, dst, \ 147 src0, src1); \ 148 } 149 150ALU1(NOT) 151ALU1(MOV) 152ALU1(FRC) 153ALU1(RNDD) 154ALU1(RNDE) 155ALU1(RNDZ) 156ALU2(ADD) 157ALU2(MUL) 158ALU2(MACH) 159ALU2(AND) 160ALU2(OR) 161ALU2(XOR) 162ALU2(DP3) 163ALU2(DP4) 164 165/** Gen4 predicated IF. */ 166vec4_instruction * 167vec4_visitor::IF(uint32_t predicate) 168{ 169 vec4_instruction *inst; 170 171 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF); 172 inst->predicate = predicate; 173 174 return inst; 175} 176 177/** Gen6+ IF with embedded comparison. */ 178vec4_instruction * 179vec4_visitor::IF(src_reg src0, src_reg src1, uint32_t condition) 180{ 181 assert(intel->gen >= 6); 182 183 vec4_instruction *inst; 184 185 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_IF, dst_null_d(), 186 src0, src1); 187 inst->conditional_mod = condition; 188 189 return inst; 190} 191 192/** 193 * CMP: Sets the low bit of the destination channels with the result 194 * of the comparison, while the upper bits are undefined, and updates 195 * the flag register with the packed 16 bits of the result. 196 */ 197vec4_instruction * 198vec4_visitor::CMP(dst_reg dst, src_reg src0, src_reg src1, uint32_t condition) 199{ 200 vec4_instruction *inst; 201 202 /* original gen4 does type conversion to the destination type 203 * before before comparison, producing garbage results for floating 204 * point comparisons. 205 */ 206 if (intel->gen == 4) { 207 dst.type = src0.type; 208 if (dst.file == HW_REG) 209 dst.fixed_hw_reg.type = dst.type; 210 } 211 212 inst = new(mem_ctx) vec4_instruction(this, BRW_OPCODE_CMP, dst, src0, src1); 213 inst->conditional_mod = condition; 214 215 return inst; 216} 217 218vec4_instruction * 219vec4_visitor::SCRATCH_READ(dst_reg dst, src_reg index) 220{ 221 vec4_instruction *inst; 222 223 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_READ, 224 dst, index); 225 inst->base_mrf = 14; 226 inst->mlen = 1; 227 228 return inst; 229} 230 231vec4_instruction * 232vec4_visitor::SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index) 233{ 234 vec4_instruction *inst; 235 236 inst = new(mem_ctx) vec4_instruction(this, VS_OPCODE_SCRATCH_WRITE, 237 dst, src, index); 238 inst->base_mrf = 13; 239 inst->mlen = 2; 240 241 return inst; 242} 243 244void 245vec4_visitor::emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements) 246{ 247 static enum opcode dot_opcodes[] = { 248 BRW_OPCODE_DP2, BRW_OPCODE_DP3, BRW_OPCODE_DP4 249 }; 250 251 emit(dot_opcodes[elements - 2], dst, src0, src1); 252} 253 254void 255vec4_visitor::emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src) 256{ 257 /* The gen6 math instruction ignores the source modifiers -- 258 * swizzle, abs, negate, and at least some parts of the register 259 * region description. 260 * 261 * While it would seem that this MOV could be avoided at this point 262 * in the case that the swizzle is matched up with the destination 263 * writemask, note that uniform packing and register allocation 264 * could rearrange our swizzle, so let's leave this matter up to 265 * copy propagation later. 266 */ 267 src_reg temp_src = src_reg(this, glsl_type::vec4_type); 268 emit(MOV(dst_reg(temp_src), src)); 269 270 if (dst.writemask != WRITEMASK_XYZW) { 271 /* The gen6 math instruction must be align1, so we can't do 272 * writemasks. 273 */ 274 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 275 276 emit(opcode, temp_dst, temp_src); 277 278 emit(MOV(dst, src_reg(temp_dst))); 279 } else { 280 emit(opcode, dst, temp_src); 281 } 282} 283 284void 285vec4_visitor::emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src) 286{ 287 vec4_instruction *inst = emit(opcode, dst, src); 288 inst->base_mrf = 1; 289 inst->mlen = 1; 290} 291 292void 293vec4_visitor::emit_math(opcode opcode, dst_reg dst, src_reg src) 294{ 295 switch (opcode) { 296 case SHADER_OPCODE_RCP: 297 case SHADER_OPCODE_RSQ: 298 case SHADER_OPCODE_SQRT: 299 case SHADER_OPCODE_EXP2: 300 case SHADER_OPCODE_LOG2: 301 case SHADER_OPCODE_SIN: 302 case SHADER_OPCODE_COS: 303 break; 304 default: 305 assert(!"not reached: bad math opcode"); 306 return; 307 } 308 309 if (intel->gen >= 6) { 310 return emit_math1_gen6(opcode, dst, src); 311 } else { 312 return emit_math1_gen4(opcode, dst, src); 313 } 314} 315 316void 317vec4_visitor::emit_math2_gen6(enum opcode opcode, 318 dst_reg dst, src_reg src0, src_reg src1) 319{ 320 src_reg expanded; 321 322 /* The gen6 math instruction ignores the source modifiers -- 323 * swizzle, abs, negate, and at least some parts of the register 324 * region description. Move the sources to temporaries to make it 325 * generally work. 326 */ 327 328 expanded = src_reg(this, glsl_type::vec4_type); 329 expanded.type = src0.type; 330 emit(MOV(dst_reg(expanded), src0)); 331 src0 = expanded; 332 333 expanded = src_reg(this, glsl_type::vec4_type); 334 expanded.type = src1.type; 335 emit(MOV(dst_reg(expanded), src1)); 336 src1 = expanded; 337 338 if (dst.writemask != WRITEMASK_XYZW) { 339 /* The gen6 math instruction must be align1, so we can't do 340 * writemasks. 341 */ 342 dst_reg temp_dst = dst_reg(this, glsl_type::vec4_type); 343 temp_dst.type = dst.type; 344 345 emit(opcode, temp_dst, src0, src1); 346 347 emit(MOV(dst, src_reg(temp_dst))); 348 } else { 349 emit(opcode, dst, src0, src1); 350 } 351} 352 353void 354vec4_visitor::emit_math2_gen4(enum opcode opcode, 355 dst_reg dst, src_reg src0, src_reg src1) 356{ 357 vec4_instruction *inst = emit(opcode, dst, src0, src1); 358 inst->base_mrf = 1; 359 inst->mlen = 2; 360} 361 362void 363vec4_visitor::emit_math(enum opcode opcode, 364 dst_reg dst, src_reg src0, src_reg src1) 365{ 366 switch (opcode) { 367 case SHADER_OPCODE_POW: 368 case SHADER_OPCODE_INT_QUOTIENT: 369 case SHADER_OPCODE_INT_REMAINDER: 370 break; 371 default: 372 assert(!"not reached: unsupported binary math opcode"); 373 return; 374 } 375 376 if (intel->gen >= 6) { 377 return emit_math2_gen6(opcode, dst, src0, src1); 378 } else { 379 return emit_math2_gen4(opcode, dst, src0, src1); 380 } 381} 382 383void 384vec4_visitor::visit_instructions(const exec_list *list) 385{ 386 foreach_list(node, list) { 387 ir_instruction *ir = (ir_instruction *)node; 388 389 base_ir = ir; 390 ir->accept(this); 391 } 392} 393 394 395static int 396type_size(const struct glsl_type *type) 397{ 398 unsigned int i; 399 int size; 400 401 switch (type->base_type) { 402 case GLSL_TYPE_UINT: 403 case GLSL_TYPE_INT: 404 case GLSL_TYPE_FLOAT: 405 case GLSL_TYPE_BOOL: 406 if (type->is_matrix()) { 407 return type->matrix_columns; 408 } else { 409 /* Regardless of size of vector, it gets a vec4. This is bad 410 * packing for things like floats, but otherwise arrays become a 411 * mess. Hopefully a later pass over the code can pack scalars 412 * down if appropriate. 413 */ 414 return 1; 415 } 416 case GLSL_TYPE_ARRAY: 417 assert(type->length > 0); 418 return type_size(type->fields.array) * type->length; 419 case GLSL_TYPE_STRUCT: 420 size = 0; 421 for (i = 0; i < type->length; i++) { 422 size += type_size(type->fields.structure[i].type); 423 } 424 return size; 425 case GLSL_TYPE_SAMPLER: 426 /* Samplers take up one slot in UNIFORMS[], but they're baked in 427 * at link time. 428 */ 429 return 1; 430 default: 431 assert(0); 432 return 0; 433 } 434} 435 436int 437vec4_visitor::virtual_grf_alloc(int size) 438{ 439 if (virtual_grf_array_size <= virtual_grf_count) { 440 if (virtual_grf_array_size == 0) 441 virtual_grf_array_size = 16; 442 else 443 virtual_grf_array_size *= 2; 444 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int, 445 virtual_grf_array_size); 446 virtual_grf_reg_map = reralloc(mem_ctx, virtual_grf_reg_map, int, 447 virtual_grf_array_size); 448 } 449 virtual_grf_reg_map[virtual_grf_count] = virtual_grf_reg_count; 450 virtual_grf_reg_count += size; 451 virtual_grf_sizes[virtual_grf_count] = size; 452 return virtual_grf_count++; 453} 454 455src_reg::src_reg(class vec4_visitor *v, const struct glsl_type *type) 456{ 457 init(); 458 459 this->file = GRF; 460 this->reg = v->virtual_grf_alloc(type_size(type)); 461 462 if (type->is_array() || type->is_record()) { 463 this->swizzle = BRW_SWIZZLE_NOOP; 464 } else { 465 this->swizzle = swizzle_for_size(type->vector_elements); 466 } 467 468 this->type = brw_type_for_base_type(type); 469} 470 471dst_reg::dst_reg(class vec4_visitor *v, const struct glsl_type *type) 472{ 473 init(); 474 475 this->file = GRF; 476 this->reg = v->virtual_grf_alloc(type_size(type)); 477 478 if (type->is_array() || type->is_record()) { 479 this->writemask = WRITEMASK_XYZW; 480 } else { 481 this->writemask = (1 << type->vector_elements) - 1; 482 } 483 484 this->type = brw_type_for_base_type(type); 485} 486 487/* Our support for uniforms is piggy-backed on the struct 488 * gl_fragment_program, because that's where the values actually 489 * get stored, rather than in some global gl_shader_program uniform 490 * store. 491 */ 492int 493vec4_visitor::setup_uniform_values(int loc, const glsl_type *type) 494{ 495 unsigned int offset = 0; 496 float *values = &this->vp->Base.Parameters->ParameterValues[loc][0].f; 497 498 if (type->is_matrix()) { 499 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT, 500 type->vector_elements, 501 1); 502 503 for (unsigned int i = 0; i < type->matrix_columns; i++) { 504 offset += setup_uniform_values(loc + offset, column); 505 } 506 507 return offset; 508 } 509 510 switch (type->base_type) { 511 case GLSL_TYPE_FLOAT: 512 case GLSL_TYPE_UINT: 513 case GLSL_TYPE_INT: 514 case GLSL_TYPE_BOOL: 515 for (unsigned int i = 0; i < type->vector_elements; i++) { 516 c->prog_data.param[this->uniforms * 4 + i] = &values[i]; 517 } 518 519 /* Set up pad elements to get things aligned to a vec4 boundary. */ 520 for (unsigned int i = type->vector_elements; i < 4; i++) { 521 static float zero = 0; 522 523 c->prog_data.param[this->uniforms * 4 + i] = &zero; 524 } 525 526 /* Track the size of this uniform vector, for future packing of 527 * uniforms. 528 */ 529 this->uniform_vector_size[this->uniforms] = type->vector_elements; 530 this->uniforms++; 531 532 return 1; 533 534 case GLSL_TYPE_STRUCT: 535 for (unsigned int i = 0; i < type->length; i++) { 536 offset += setup_uniform_values(loc + offset, 537 type->fields.structure[i].type); 538 } 539 return offset; 540 541 case GLSL_TYPE_ARRAY: 542 for (unsigned int i = 0; i < type->length; i++) { 543 offset += setup_uniform_values(loc + offset, type->fields.array); 544 } 545 return offset; 546 547 case GLSL_TYPE_SAMPLER: 548 /* The sampler takes up a slot, but we don't use any values from it. */ 549 return 1; 550 551 default: 552 assert(!"not reached"); 553 return 0; 554 } 555} 556 557void 558vec4_visitor::setup_uniform_clipplane_values() 559{ 560 gl_clip_plane *clip_planes = brw_select_clip_planes(ctx); 561 562 int compacted_clipplane_index = 0; 563 for (int i = 0; i < MAX_CLIP_PLANES; ++i) { 564 if (ctx->Transform.ClipPlanesEnabled & (1 << i)) { 565 this->uniform_vector_size[this->uniforms] = 4; 566 this->userplane[compacted_clipplane_index] = dst_reg(UNIFORM, this->uniforms); 567 this->userplane[compacted_clipplane_index].type = BRW_REGISTER_TYPE_F; 568 for (int j = 0; j < 4; ++j) { 569 c->prog_data.param[this->uniforms * 4 + j] = &clip_planes[i][j]; 570 } 571 ++compacted_clipplane_index; 572 ++this->uniforms; 573 } 574 } 575} 576 577/* Our support for builtin uniforms is even scarier than non-builtin. 578 * It sits on top of the PROG_STATE_VAR parameters that are 579 * automatically updated from GL context state. 580 */ 581void 582vec4_visitor::setup_builtin_uniform_values(ir_variable *ir) 583{ 584 const ir_state_slot *const slots = ir->state_slots; 585 assert(ir->state_slots != NULL); 586 587 for (unsigned int i = 0; i < ir->num_state_slots; i++) { 588 /* This state reference has already been setup by ir_to_mesa, 589 * but we'll get the same index back here. We can reference 590 * ParameterValues directly, since unlike brw_fs.cpp, we never 591 * add new state references during compile. 592 */ 593 int index = _mesa_add_state_reference(this->vp->Base.Parameters, 594 (gl_state_index *)slots[i].tokens); 595 float *values = &this->vp->Base.Parameters->ParameterValues[index][0].f; 596 597 this->uniform_vector_size[this->uniforms] = 0; 598 /* Add each of the unique swizzled channels of the element. 599 * This will end up matching the size of the glsl_type of this field. 600 */ 601 int last_swiz = -1; 602 for (unsigned int j = 0; j < 4; j++) { 603 int swiz = GET_SWZ(slots[i].swizzle, j); 604 last_swiz = swiz; 605 606 c->prog_data.param[this->uniforms * 4 + j] = &values[swiz]; 607 if (swiz <= last_swiz) 608 this->uniform_vector_size[this->uniforms]++; 609 } 610 this->uniforms++; 611 } 612} 613 614dst_reg * 615vec4_visitor::variable_storage(ir_variable *var) 616{ 617 return (dst_reg *)hash_table_find(this->variable_ht, var); 618} 619 620void 621vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate) 622{ 623 ir_expression *expr = ir->as_expression(); 624 625 *predicate = BRW_PREDICATE_NORMAL; 626 627 if (expr) { 628 src_reg op[2]; 629 vec4_instruction *inst; 630 631 assert(expr->get_num_operands() <= 2); 632 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 633 expr->operands[i]->accept(this); 634 op[i] = this->result; 635 } 636 637 switch (expr->operation) { 638 case ir_unop_logic_not: 639 inst = emit(AND(dst_null_d(), op[0], src_reg(1))); 640 inst->conditional_mod = BRW_CONDITIONAL_Z; 641 break; 642 643 case ir_binop_logic_xor: 644 inst = emit(XOR(dst_null_d(), op[0], op[1])); 645 inst->conditional_mod = BRW_CONDITIONAL_NZ; 646 break; 647 648 case ir_binop_logic_or: 649 inst = emit(OR(dst_null_d(), op[0], op[1])); 650 inst->conditional_mod = BRW_CONDITIONAL_NZ; 651 break; 652 653 case ir_binop_logic_and: 654 inst = emit(AND(dst_null_d(), op[0], op[1])); 655 inst->conditional_mod = BRW_CONDITIONAL_NZ; 656 break; 657 658 case ir_unop_f2b: 659 if (intel->gen >= 6) { 660 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 661 } else { 662 inst = emit(MOV(dst_null_f(), op[0])); 663 inst->conditional_mod = BRW_CONDITIONAL_NZ; 664 } 665 break; 666 667 case ir_unop_i2b: 668 if (intel->gen >= 6) { 669 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 670 } else { 671 inst = emit(MOV(dst_null_d(), op[0])); 672 inst->conditional_mod = BRW_CONDITIONAL_NZ; 673 } 674 break; 675 676 case ir_binop_all_equal: 677 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 678 *predicate = BRW_PREDICATE_ALIGN16_ALL4H; 679 break; 680 681 case ir_binop_any_nequal: 682 inst = emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 683 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 684 break; 685 686 case ir_unop_any: 687 inst = emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 688 *predicate = BRW_PREDICATE_ALIGN16_ANY4H; 689 break; 690 691 case ir_binop_greater: 692 case ir_binop_gequal: 693 case ir_binop_less: 694 case ir_binop_lequal: 695 case ir_binop_equal: 696 case ir_binop_nequal: 697 emit(CMP(dst_null_d(), op[0], op[1], 698 brw_conditional_for_comparison(expr->operation))); 699 break; 700 701 default: 702 assert(!"not reached"); 703 break; 704 } 705 return; 706 } 707 708 ir->accept(this); 709 710 if (intel->gen >= 6) { 711 vec4_instruction *inst = emit(AND(dst_null_d(), 712 this->result, src_reg(1))); 713 inst->conditional_mod = BRW_CONDITIONAL_NZ; 714 } else { 715 vec4_instruction *inst = emit(MOV(dst_null_d(), this->result)); 716 inst->conditional_mod = BRW_CONDITIONAL_NZ; 717 } 718} 719 720/** 721 * Emit a gen6 IF statement with the comparison folded into the IF 722 * instruction. 723 */ 724void 725vec4_visitor::emit_if_gen6(ir_if *ir) 726{ 727 ir_expression *expr = ir->condition->as_expression(); 728 729 if (expr) { 730 src_reg op[2]; 731 dst_reg temp; 732 733 assert(expr->get_num_operands() <= 2); 734 for (unsigned int i = 0; i < expr->get_num_operands(); i++) { 735 expr->operands[i]->accept(this); 736 op[i] = this->result; 737 } 738 739 switch (expr->operation) { 740 case ir_unop_logic_not: 741 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_Z)); 742 return; 743 744 case ir_binop_logic_xor: 745 emit(IF(op[0], op[1], BRW_CONDITIONAL_NZ)); 746 return; 747 748 case ir_binop_logic_or: 749 temp = dst_reg(this, glsl_type::bool_type); 750 emit(OR(temp, op[0], op[1])); 751 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 752 return; 753 754 case ir_binop_logic_and: 755 temp = dst_reg(this, glsl_type::bool_type); 756 emit(AND(temp, op[0], op[1])); 757 emit(IF(src_reg(temp), src_reg(0), BRW_CONDITIONAL_NZ)); 758 return; 759 760 case ir_unop_f2b: 761 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 762 return; 763 764 case ir_unop_i2b: 765 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 766 return; 767 768 case ir_binop_greater: 769 case ir_binop_gequal: 770 case ir_binop_less: 771 case ir_binop_lequal: 772 case ir_binop_equal: 773 case ir_binop_nequal: 774 emit(IF(op[0], op[1], 775 brw_conditional_for_comparison(expr->operation))); 776 return; 777 778 case ir_binop_all_equal: 779 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 780 emit(IF(BRW_PREDICATE_ALIGN16_ALL4H)); 781 return; 782 783 case ir_binop_any_nequal: 784 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 785 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 786 return; 787 788 case ir_unop_any: 789 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 790 emit(IF(BRW_PREDICATE_ALIGN16_ANY4H)); 791 return; 792 793 default: 794 assert(!"not reached"); 795 emit(IF(op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 796 return; 797 } 798 return; 799 } 800 801 ir->condition->accept(this); 802 803 emit(IF(this->result, src_reg(0), BRW_CONDITIONAL_NZ)); 804} 805 806void 807vec4_visitor::visit(ir_variable *ir) 808{ 809 dst_reg *reg = NULL; 810 811 if (variable_storage(ir)) 812 return; 813 814 switch (ir->mode) { 815 case ir_var_in: 816 reg = new(mem_ctx) dst_reg(ATTR, ir->location); 817 818 /* Do GL_FIXED rescaling for GLES2.0. Our GL_FIXED attributes 819 * come in as floating point conversions of the integer values. 820 */ 821 for (int i = ir->location; i < ir->location + type_size(ir->type); i++) { 822 if (!c->key.gl_fixed_input_size[i]) 823 continue; 824 825 dst_reg dst = *reg; 826 dst.writemask = (1 << c->key.gl_fixed_input_size[i]) - 1; 827 emit(MUL(dst, src_reg(dst), src_reg(1.0f / 65536.0f))); 828 } 829 break; 830 831 case ir_var_out: 832 reg = new(mem_ctx) dst_reg(this, ir->type); 833 834 for (int i = 0; i < type_size(ir->type); i++) { 835 output_reg[ir->location + i] = *reg; 836 output_reg[ir->location + i].reg_offset = i; 837 output_reg[ir->location + i].type = BRW_REGISTER_TYPE_F; 838 output_reg_annotation[ir->location + i] = ir->name; 839 } 840 break; 841 842 case ir_var_auto: 843 case ir_var_temporary: 844 reg = new(mem_ctx) dst_reg(this, ir->type); 845 break; 846 847 case ir_var_uniform: 848 reg = new(this->mem_ctx) dst_reg(UNIFORM, this->uniforms); 849 850 /* Track how big the whole uniform variable is, in case we need to put a 851 * copy of its data into pull constants for array access. 852 */ 853 this->uniform_size[this->uniforms] = type_size(ir->type); 854 855 if (!strncmp(ir->name, "gl_", 3)) { 856 setup_builtin_uniform_values(ir); 857 } else { 858 setup_uniform_values(ir->location, ir->type); 859 } 860 break; 861 862 default: 863 assert(!"not reached"); 864 } 865 866 reg->type = brw_type_for_base_type(ir->type); 867 hash_table_insert(this->variable_ht, reg, ir); 868} 869 870void 871vec4_visitor::visit(ir_loop *ir) 872{ 873 dst_reg counter; 874 875 /* We don't want debugging output to print the whole body of the 876 * loop as the annotation. 877 */ 878 this->base_ir = NULL; 879 880 if (ir->counter != NULL) { 881 this->base_ir = ir->counter; 882 ir->counter->accept(this); 883 counter = *(variable_storage(ir->counter)); 884 885 if (ir->from != NULL) { 886 this->base_ir = ir->from; 887 ir->from->accept(this); 888 889 emit(MOV(counter, this->result)); 890 } 891 } 892 893 emit(BRW_OPCODE_DO); 894 895 if (ir->to) { 896 this->base_ir = ir->to; 897 ir->to->accept(this); 898 899 emit(CMP(dst_null_d(), src_reg(counter), this->result, 900 brw_conditional_for_comparison(ir->cmp))); 901 902 vec4_instruction *inst = emit(BRW_OPCODE_BREAK); 903 inst->predicate = BRW_PREDICATE_NORMAL; 904 } 905 906 visit_instructions(&ir->body_instructions); 907 908 909 if (ir->increment) { 910 this->base_ir = ir->increment; 911 ir->increment->accept(this); 912 emit(ADD(counter, src_reg(counter), this->result)); 913 } 914 915 emit(BRW_OPCODE_WHILE); 916} 917 918void 919vec4_visitor::visit(ir_loop_jump *ir) 920{ 921 switch (ir->mode) { 922 case ir_loop_jump::jump_break: 923 emit(BRW_OPCODE_BREAK); 924 break; 925 case ir_loop_jump::jump_continue: 926 emit(BRW_OPCODE_CONTINUE); 927 break; 928 } 929} 930 931 932void 933vec4_visitor::visit(ir_function_signature *ir) 934{ 935 assert(0); 936 (void)ir; 937} 938 939void 940vec4_visitor::visit(ir_function *ir) 941{ 942 /* Ignore function bodies other than main() -- we shouldn't see calls to 943 * them since they should all be inlined. 944 */ 945 if (strcmp(ir->name, "main") == 0) { 946 const ir_function_signature *sig; 947 exec_list empty; 948 949 sig = ir->matching_signature(&empty); 950 951 assert(sig); 952 953 visit_instructions(&sig->body); 954 } 955} 956 957GLboolean 958vec4_visitor::try_emit_sat(ir_expression *ir) 959{ 960 ir_rvalue *sat_src = ir->as_rvalue_to_saturate(); 961 if (!sat_src) 962 return false; 963 964 sat_src->accept(this); 965 src_reg src = this->result; 966 967 this->result = src_reg(this, ir->type); 968 vec4_instruction *inst; 969 inst = emit(MOV(dst_reg(this->result), src)); 970 inst->saturate = true; 971 972 return true; 973} 974 975void 976vec4_visitor::emit_bool_comparison(unsigned int op, 977 dst_reg dst, src_reg src0, src_reg src1) 978{ 979 /* original gen4 does destination conversion before comparison. */ 980 if (intel->gen < 5) 981 dst.type = src0.type; 982 983 emit(CMP(dst, src0, src1, brw_conditional_for_comparison(op))); 984 985 dst.type = BRW_REGISTER_TYPE_D; 986 emit(AND(dst, src_reg(dst), src_reg(0x1))); 987} 988 989void 990vec4_visitor::visit(ir_expression *ir) 991{ 992 unsigned int operand; 993 src_reg op[Elements(ir->operands)]; 994 src_reg result_src; 995 dst_reg result_dst; 996 vec4_instruction *inst; 997 998 if (try_emit_sat(ir)) 999 return; 1000 1001 for (operand = 0; operand < ir->get_num_operands(); operand++) { 1002 this->result.file = BAD_FILE; 1003 ir->operands[operand]->accept(this); 1004 if (this->result.file == BAD_FILE) { 1005 printf("Failed to get tree for expression operand:\n"); 1006 ir->operands[operand]->print(); 1007 exit(1); 1008 } 1009 op[operand] = this->result; 1010 1011 /* Matrix expression operands should have been broken down to vector 1012 * operations already. 1013 */ 1014 assert(!ir->operands[operand]->type->is_matrix()); 1015 } 1016 1017 int vector_elements = ir->operands[0]->type->vector_elements; 1018 if (ir->operands[1]) { 1019 vector_elements = MAX2(vector_elements, 1020 ir->operands[1]->type->vector_elements); 1021 } 1022 1023 this->result.file = BAD_FILE; 1024 1025 /* Storage for our result. Ideally for an assignment we'd be using 1026 * the actual storage for the result here, instead. 1027 */ 1028 result_src = src_reg(this, ir->type); 1029 /* convenience for the emit functions below. */ 1030 result_dst = dst_reg(result_src); 1031 /* If nothing special happens, this is the result. */ 1032 this->result = result_src; 1033 /* Limit writes to the channels that will be used by result_src later. 1034 * This does limit this temp's use as a temporary for multi-instruction 1035 * sequences. 1036 */ 1037 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1038 1039 switch (ir->operation) { 1040 case ir_unop_logic_not: 1041 /* Note that BRW_OPCODE_NOT is not appropriate here, since it is 1042 * ones complement of the whole register, not just bit 0. 1043 */ 1044 emit(XOR(result_dst, op[0], src_reg(1))); 1045 break; 1046 case ir_unop_neg: 1047 op[0].negate = !op[0].negate; 1048 this->result = op[0]; 1049 break; 1050 case ir_unop_abs: 1051 op[0].abs = true; 1052 op[0].negate = false; 1053 this->result = op[0]; 1054 break; 1055 1056 case ir_unop_sign: 1057 emit(MOV(result_dst, src_reg(0.0f))); 1058 1059 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_G)); 1060 inst = emit(MOV(result_dst, src_reg(1.0f))); 1061 inst->predicate = BRW_PREDICATE_NORMAL; 1062 1063 emit(CMP(dst_null_d(), op[0], src_reg(0.0f), BRW_CONDITIONAL_L)); 1064 inst = emit(MOV(result_dst, src_reg(-1.0f))); 1065 inst->predicate = BRW_PREDICATE_NORMAL; 1066 1067 break; 1068 1069 case ir_unop_rcp: 1070 emit_math(SHADER_OPCODE_RCP, result_dst, op[0]); 1071 break; 1072 1073 case ir_unop_exp2: 1074 emit_math(SHADER_OPCODE_EXP2, result_dst, op[0]); 1075 break; 1076 case ir_unop_log2: 1077 emit_math(SHADER_OPCODE_LOG2, result_dst, op[0]); 1078 break; 1079 case ir_unop_exp: 1080 case ir_unop_log: 1081 assert(!"not reached: should be handled by ir_explog_to_explog2"); 1082 break; 1083 case ir_unop_sin: 1084 case ir_unop_sin_reduced: 1085 emit_math(SHADER_OPCODE_SIN, result_dst, op[0]); 1086 break; 1087 case ir_unop_cos: 1088 case ir_unop_cos_reduced: 1089 emit_math(SHADER_OPCODE_COS, result_dst, op[0]); 1090 break; 1091 1092 case ir_unop_dFdx: 1093 case ir_unop_dFdy: 1094 assert(!"derivatives not valid in vertex shader"); 1095 break; 1096 1097 case ir_unop_noise: 1098 assert(!"not reached: should be handled by lower_noise"); 1099 break; 1100 1101 case ir_binop_add: 1102 emit(ADD(result_dst, op[0], op[1])); 1103 break; 1104 case ir_binop_sub: 1105 assert(!"not reached: should be handled by ir_sub_to_add_neg"); 1106 break; 1107 1108 case ir_binop_mul: 1109 if (ir->type->is_integer()) { 1110 /* For integer multiplication, the MUL uses the low 16 bits 1111 * of one of the operands (src0 on gen6, src1 on gen7). The 1112 * MACH accumulates in the contribution of the upper 16 bits 1113 * of that operand. 1114 * 1115 * FINISHME: Emit just the MUL if we know an operand is small 1116 * enough. 1117 */ 1118 struct brw_reg acc = retype(brw_acc_reg(), BRW_REGISTER_TYPE_D); 1119 1120 emit(MUL(acc, op[0], op[1])); 1121 emit(MACH(dst_null_d(), op[0], op[1])); 1122 emit(MOV(result_dst, src_reg(acc))); 1123 } else { 1124 emit(MUL(result_dst, op[0], op[1])); 1125 } 1126 break; 1127 case ir_binop_div: 1128 /* Floating point should be lowered by DIV_TO_MUL_RCP in the compiler. */ 1129 assert(ir->type->is_integer()); 1130 emit_math(SHADER_OPCODE_INT_QUOTIENT, result_dst, op[0], op[1]); 1131 break; 1132 case ir_binop_mod: 1133 /* Floating point should be lowered by MOD_TO_FRACT in the compiler. */ 1134 assert(ir->type->is_integer()); 1135 emit_math(SHADER_OPCODE_INT_REMAINDER, result_dst, op[0], op[1]); 1136 break; 1137 1138 case ir_binop_less: 1139 case ir_binop_greater: 1140 case ir_binop_lequal: 1141 case ir_binop_gequal: 1142 case ir_binop_equal: 1143 case ir_binop_nequal: { 1144 emit(CMP(result_dst, op[0], op[1], 1145 brw_conditional_for_comparison(ir->operation))); 1146 emit(AND(result_dst, result_src, src_reg(0x1))); 1147 break; 1148 } 1149 1150 case ir_binop_all_equal: 1151 /* "==" operator producing a scalar boolean. */ 1152 if (ir->operands[0]->type->is_vector() || 1153 ir->operands[1]->type->is_vector()) { 1154 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_Z)); 1155 emit(MOV(result_dst, src_reg(0))); 1156 inst = emit(MOV(result_dst, src_reg(1))); 1157 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H; 1158 } else { 1159 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_Z)); 1160 emit(AND(result_dst, result_src, src_reg(0x1))); 1161 } 1162 break; 1163 case ir_binop_any_nequal: 1164 /* "!=" operator producing a scalar boolean. */ 1165 if (ir->operands[0]->type->is_vector() || 1166 ir->operands[1]->type->is_vector()) { 1167 emit(CMP(dst_null_d(), op[0], op[1], BRW_CONDITIONAL_NZ)); 1168 1169 emit(MOV(result_dst, src_reg(0))); 1170 inst = emit(MOV(result_dst, src_reg(1))); 1171 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1172 } else { 1173 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_NZ)); 1174 emit(AND(result_dst, result_src, src_reg(0x1))); 1175 } 1176 break; 1177 1178 case ir_unop_any: 1179 emit(CMP(dst_null_d(), op[0], src_reg(0), BRW_CONDITIONAL_NZ)); 1180 emit(MOV(result_dst, src_reg(0))); 1181 1182 inst = emit(MOV(result_dst, src_reg(1))); 1183 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H; 1184 break; 1185 1186 case ir_binop_logic_xor: 1187 emit(XOR(result_dst, op[0], op[1])); 1188 break; 1189 1190 case ir_binop_logic_or: 1191 emit(OR(result_dst, op[0], op[1])); 1192 break; 1193 1194 case ir_binop_logic_and: 1195 emit(AND(result_dst, op[0], op[1])); 1196 break; 1197 1198 case ir_binop_dot: 1199 assert(ir->operands[0]->type->is_vector()); 1200 assert(ir->operands[0]->type == ir->operands[1]->type); 1201 emit_dp(result_dst, op[0], op[1], ir->operands[0]->type->vector_elements); 1202 break; 1203 1204 case ir_unop_sqrt: 1205 emit_math(SHADER_OPCODE_SQRT, result_dst, op[0]); 1206 break; 1207 case ir_unop_rsq: 1208 emit_math(SHADER_OPCODE_RSQ, result_dst, op[0]); 1209 break; 1210 case ir_unop_i2f: 1211 case ir_unop_i2u: 1212 case ir_unop_u2i: 1213 case ir_unop_u2f: 1214 case ir_unop_b2f: 1215 case ir_unop_b2i: 1216 case ir_unop_f2i: 1217 emit(MOV(result_dst, op[0])); 1218 break; 1219 case ir_unop_f2b: 1220 case ir_unop_i2b: { 1221 emit(CMP(result_dst, op[0], src_reg(0.0f), BRW_CONDITIONAL_NZ)); 1222 emit(AND(result_dst, result_src, src_reg(1))); 1223 break; 1224 } 1225 1226 case ir_unop_trunc: 1227 emit(RNDZ(result_dst, op[0])); 1228 break; 1229 case ir_unop_ceil: 1230 op[0].negate = !op[0].negate; 1231 inst = emit(RNDD(result_dst, op[0])); 1232 this->result.negate = true; 1233 break; 1234 case ir_unop_floor: 1235 inst = emit(RNDD(result_dst, op[0])); 1236 break; 1237 case ir_unop_fract: 1238 inst = emit(FRC(result_dst, op[0])); 1239 break; 1240 case ir_unop_round_even: 1241 emit(RNDE(result_dst, op[0])); 1242 break; 1243 1244 case ir_binop_min: 1245 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_L)); 1246 1247 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1248 inst->predicate = BRW_PREDICATE_NORMAL; 1249 break; 1250 case ir_binop_max: 1251 emit(CMP(result_dst, op[0], op[1], BRW_CONDITIONAL_G)); 1252 1253 inst = emit(BRW_OPCODE_SEL, result_dst, op[0], op[1]); 1254 inst->predicate = BRW_PREDICATE_NORMAL; 1255 break; 1256 1257 case ir_binop_pow: 1258 emit_math(SHADER_OPCODE_POW, result_dst, op[0], op[1]); 1259 break; 1260 1261 case ir_unop_bit_not: 1262 inst = emit(NOT(result_dst, op[0])); 1263 break; 1264 case ir_binop_bit_and: 1265 inst = emit(AND(result_dst, op[0], op[1])); 1266 break; 1267 case ir_binop_bit_xor: 1268 inst = emit(XOR(result_dst, op[0], op[1])); 1269 break; 1270 case ir_binop_bit_or: 1271 inst = emit(OR(result_dst, op[0], op[1])); 1272 break; 1273 1274 case ir_binop_lshift: 1275 inst = emit(BRW_OPCODE_SHL, result_dst, op[0], op[1]); 1276 break; 1277 1278 case ir_binop_rshift: 1279 if (ir->type->base_type == GLSL_TYPE_INT) 1280 inst = emit(BRW_OPCODE_ASR, result_dst, op[0], op[1]); 1281 else 1282 inst = emit(BRW_OPCODE_SHR, result_dst, op[0], op[1]); 1283 break; 1284 1285 case ir_quadop_vector: 1286 assert(!"not reached: should be handled by lower_quadop_vector"); 1287 break; 1288 } 1289} 1290 1291 1292void 1293vec4_visitor::visit(ir_swizzle *ir) 1294{ 1295 src_reg src; 1296 int i = 0; 1297 int swizzle[4]; 1298 1299 /* Note that this is only swizzles in expressions, not those on the left 1300 * hand side of an assignment, which do write masking. See ir_assignment 1301 * for that. 1302 */ 1303 1304 ir->val->accept(this); 1305 src = this->result; 1306 assert(src.file != BAD_FILE); 1307 1308 for (i = 0; i < ir->type->vector_elements; i++) { 1309 switch (i) { 1310 case 0: 1311 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.x); 1312 break; 1313 case 1: 1314 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.y); 1315 break; 1316 case 2: 1317 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.z); 1318 break; 1319 case 3: 1320 swizzle[i] = BRW_GET_SWZ(src.swizzle, ir->mask.w); 1321 break; 1322 } 1323 } 1324 for (; i < 4; i++) { 1325 /* Replicate the last channel out. */ 1326 swizzle[i] = swizzle[ir->type->vector_elements - 1]; 1327 } 1328 1329 src.swizzle = BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); 1330 1331 this->result = src; 1332} 1333 1334void 1335vec4_visitor::visit(ir_dereference_variable *ir) 1336{ 1337 const struct glsl_type *type = ir->type; 1338 dst_reg *reg = variable_storage(ir->var); 1339 1340 if (!reg) { 1341 fail("Failed to find variable storage for %s\n", ir->var->name); 1342 this->result = src_reg(brw_null_reg()); 1343 return; 1344 } 1345 1346 this->result = src_reg(*reg); 1347 1348 if (type->is_scalar() || type->is_vector() || type->is_matrix()) 1349 this->result.swizzle = swizzle_for_size(type->vector_elements); 1350} 1351 1352void 1353vec4_visitor::visit(ir_dereference_array *ir) 1354{ 1355 ir_constant *constant_index; 1356 src_reg src; 1357 int element_size = type_size(ir->type); 1358 1359 constant_index = ir->array_index->constant_expression_value(); 1360 1361 ir->array->accept(this); 1362 src = this->result; 1363 1364 if (constant_index) { 1365 src.reg_offset += constant_index->value.i[0] * element_size; 1366 } else { 1367 /* Variable index array dereference. It eats the "vec4" of the 1368 * base of the array and an index that offsets the Mesa register 1369 * index. 1370 */ 1371 ir->array_index->accept(this); 1372 1373 src_reg index_reg; 1374 1375 if (element_size == 1) { 1376 index_reg = this->result; 1377 } else { 1378 index_reg = src_reg(this, glsl_type::int_type); 1379 1380 emit(MUL(dst_reg(index_reg), this->result, src_reg(element_size))); 1381 } 1382 1383 if (src.reladdr) { 1384 src_reg temp = src_reg(this, glsl_type::int_type); 1385 1386 emit(ADD(dst_reg(temp), *src.reladdr, index_reg)); 1387 1388 index_reg = temp; 1389 } 1390 1391 src.reladdr = ralloc(mem_ctx, src_reg); 1392 memcpy(src.reladdr, &index_reg, sizeof(index_reg)); 1393 } 1394 1395 /* If the type is smaller than a vec4, replicate the last channel out. */ 1396 if (ir->type->is_scalar() || ir->type->is_vector()) 1397 src.swizzle = swizzle_for_size(ir->type->vector_elements); 1398 else 1399 src.swizzle = BRW_SWIZZLE_NOOP; 1400 src.type = brw_type_for_base_type(ir->type); 1401 1402 this->result = src; 1403} 1404 1405void 1406vec4_visitor::visit(ir_dereference_record *ir) 1407{ 1408 unsigned int i; 1409 const glsl_type *struct_type = ir->record->type; 1410 int offset = 0; 1411 1412 ir->record->accept(this); 1413 1414 for (i = 0; i < struct_type->length; i++) { 1415 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 1416 break; 1417 offset += type_size(struct_type->fields.structure[i].type); 1418 } 1419 1420 /* If the type is smaller than a vec4, replicate the last channel out. */ 1421 if (ir->type->is_scalar() || ir->type->is_vector()) 1422 this->result.swizzle = swizzle_for_size(ir->type->vector_elements); 1423 else 1424 this->result.swizzle = BRW_SWIZZLE_NOOP; 1425 this->result.type = brw_type_for_base_type(ir->type); 1426 1427 this->result.reg_offset += offset; 1428} 1429 1430/** 1431 * We want to be careful in assignment setup to hit the actual storage 1432 * instead of potentially using a temporary like we might with the 1433 * ir_dereference handler. 1434 */ 1435static dst_reg 1436get_assignment_lhs(ir_dereference *ir, vec4_visitor *v) 1437{ 1438 /* The LHS must be a dereference. If the LHS is a variable indexed array 1439 * access of a vector, it must be separated into a series conditional moves 1440 * before reaching this point (see ir_vec_index_to_cond_assign). 1441 */ 1442 assert(ir->as_dereference()); 1443 ir_dereference_array *deref_array = ir->as_dereference_array(); 1444 if (deref_array) { 1445 assert(!deref_array->array->type->is_vector()); 1446 } 1447 1448 /* Use the rvalue deref handler for the most part. We'll ignore 1449 * swizzles in it and write swizzles using writemask, though. 1450 */ 1451 ir->accept(v); 1452 return dst_reg(v->result); 1453} 1454 1455void 1456vec4_visitor::emit_block_move(dst_reg *dst, src_reg *src, 1457 const struct glsl_type *type, uint32_t predicate) 1458{ 1459 if (type->base_type == GLSL_TYPE_STRUCT) { 1460 for (unsigned int i = 0; i < type->length; i++) { 1461 emit_block_move(dst, src, type->fields.structure[i].type, predicate); 1462 } 1463 return; 1464 } 1465 1466 if (type->is_array()) { 1467 for (unsigned int i = 0; i < type->length; i++) { 1468 emit_block_move(dst, src, type->fields.array, predicate); 1469 } 1470 return; 1471 } 1472 1473 if (type->is_matrix()) { 1474 const struct glsl_type *vec_type; 1475 1476 vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, 1477 type->vector_elements, 1); 1478 1479 for (int i = 0; i < type->matrix_columns; i++) { 1480 emit_block_move(dst, src, vec_type, predicate); 1481 } 1482 return; 1483 } 1484 1485 assert(type->is_scalar() || type->is_vector()); 1486 1487 dst->type = brw_type_for_base_type(type); 1488 src->type = dst->type; 1489 1490 dst->writemask = (1 << type->vector_elements) - 1; 1491 1492 /* Do we need to worry about swizzling a swizzle? */ 1493 assert(src->swizzle == BRW_SWIZZLE_NOOP 1494 || src->swizzle == swizzle_for_size(type->vector_elements)); 1495 src->swizzle = swizzle_for_size(type->vector_elements); 1496 1497 vec4_instruction *inst = emit(MOV(*dst, *src)); 1498 inst->predicate = predicate; 1499 1500 dst->reg_offset++; 1501 src->reg_offset++; 1502} 1503 1504 1505/* If the RHS processing resulted in an instruction generating a 1506 * temporary value, and it would be easy to rewrite the instruction to 1507 * generate its result right into the LHS instead, do so. This ends 1508 * up reliably removing instructions where it can be tricky to do so 1509 * later without real UD chain information. 1510 */ 1511bool 1512vec4_visitor::try_rewrite_rhs_to_dst(ir_assignment *ir, 1513 dst_reg dst, 1514 src_reg src, 1515 vec4_instruction *pre_rhs_inst, 1516 vec4_instruction *last_rhs_inst) 1517{ 1518 /* This could be supported, but it would take more smarts. */ 1519 if (ir->condition) 1520 return false; 1521 1522 if (pre_rhs_inst == last_rhs_inst) 1523 return false; /* No instructions generated to work with. */ 1524 1525 /* Make sure the last instruction generated our source reg. */ 1526 if (src.file != GRF || 1527 src.file != last_rhs_inst->dst.file || 1528 src.reg != last_rhs_inst->dst.reg || 1529 src.reg_offset != last_rhs_inst->dst.reg_offset || 1530 src.reladdr || 1531 src.abs || 1532 src.negate || 1533 last_rhs_inst->predicate != BRW_PREDICATE_NONE) 1534 return false; 1535 1536 /* Check that that last instruction fully initialized the channels 1537 * we want to use, in the order we want to use them. We could 1538 * potentially reswizzle the operands of many instructions so that 1539 * we could handle out of order channels, but don't yet. 1540 */ 1541 for (int i = 0; i < 4; i++) { 1542 if (dst.writemask & (1 << i)) { 1543 if (!(last_rhs_inst->dst.writemask & (1 << i))) 1544 return false; 1545 1546 if (BRW_GET_SWZ(src.swizzle, i) != i) 1547 return false; 1548 } 1549 } 1550 1551 /* Success! Rewrite the instruction. */ 1552 last_rhs_inst->dst.file = dst.file; 1553 last_rhs_inst->dst.reg = dst.reg; 1554 last_rhs_inst->dst.reg_offset = dst.reg_offset; 1555 last_rhs_inst->dst.reladdr = dst.reladdr; 1556 last_rhs_inst->dst.writemask &= dst.writemask; 1557 1558 return true; 1559} 1560 1561void 1562vec4_visitor::visit(ir_assignment *ir) 1563{ 1564 dst_reg dst = get_assignment_lhs(ir->lhs, this); 1565 uint32_t predicate = BRW_PREDICATE_NONE; 1566 1567 if (!ir->lhs->type->is_scalar() && 1568 !ir->lhs->type->is_vector()) { 1569 ir->rhs->accept(this); 1570 src_reg src = this->result; 1571 1572 if (ir->condition) { 1573 emit_bool_to_cond_code(ir->condition, &predicate); 1574 } 1575 1576 emit_block_move(&dst, &src, ir->rhs->type, predicate); 1577 return; 1578 } 1579 1580 /* Now we're down to just a scalar/vector with writemasks. */ 1581 int i; 1582 1583 vec4_instruction *pre_rhs_inst, *last_rhs_inst; 1584 pre_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1585 1586 ir->rhs->accept(this); 1587 1588 last_rhs_inst = (vec4_instruction *)this->instructions.get_tail(); 1589 1590 src_reg src = this->result; 1591 1592 int swizzles[4]; 1593 int first_enabled_chan = 0; 1594 int src_chan = 0; 1595 1596 assert(ir->lhs->type->is_vector() || 1597 ir->lhs->type->is_scalar()); 1598 dst.writemask = ir->write_mask; 1599 1600 for (int i = 0; i < 4; i++) { 1601 if (dst.writemask & (1 << i)) { 1602 first_enabled_chan = BRW_GET_SWZ(src.swizzle, i); 1603 break; 1604 } 1605 } 1606 1607 /* Swizzle a small RHS vector into the channels being written. 1608 * 1609 * glsl ir treats write_mask as dictating how many channels are 1610 * present on the RHS while in our instructions we need to make 1611 * those channels appear in the slots of the vec4 they're written to. 1612 */ 1613 for (int i = 0; i < 4; i++) { 1614 if (dst.writemask & (1 << i)) 1615 swizzles[i] = BRW_GET_SWZ(src.swizzle, src_chan++); 1616 else 1617 swizzles[i] = first_enabled_chan; 1618 } 1619 src.swizzle = BRW_SWIZZLE4(swizzles[0], swizzles[1], 1620 swizzles[2], swizzles[3]); 1621 1622 if (try_rewrite_rhs_to_dst(ir, dst, src, pre_rhs_inst, last_rhs_inst)) { 1623 return; 1624 } 1625 1626 if (ir->condition) { 1627 emit_bool_to_cond_code(ir->condition, &predicate); 1628 } 1629 1630 for (i = 0; i < type_size(ir->lhs->type); i++) { 1631 vec4_instruction *inst = emit(MOV(dst, src)); 1632 inst->predicate = predicate; 1633 1634 dst.reg_offset++; 1635 src.reg_offset++; 1636 } 1637} 1638 1639void 1640vec4_visitor::emit_constant_values(dst_reg *dst, ir_constant *ir) 1641{ 1642 if (ir->type->base_type == GLSL_TYPE_STRUCT) { 1643 foreach_list(node, &ir->components) { 1644 ir_constant *field_value = (ir_constant *)node; 1645 1646 emit_constant_values(dst, field_value); 1647 } 1648 return; 1649 } 1650 1651 if (ir->type->is_array()) { 1652 for (unsigned int i = 0; i < ir->type->length; i++) { 1653 emit_constant_values(dst, ir->array_elements[i]); 1654 } 1655 return; 1656 } 1657 1658 if (ir->type->is_matrix()) { 1659 for (int i = 0; i < ir->type->matrix_columns; i++) { 1660 for (int j = 0; j < ir->type->vector_elements; j++) { 1661 dst->writemask = 1 << j; 1662 dst->type = BRW_REGISTER_TYPE_F; 1663 1664 emit(MOV(*dst, 1665 src_reg(ir->value.f[i * ir->type->vector_elements + j]))); 1666 } 1667 dst->reg_offset++; 1668 } 1669 return; 1670 } 1671 1672 for (int i = 0; i < ir->type->vector_elements; i++) { 1673 dst->writemask = 1 << i; 1674 dst->type = brw_type_for_base_type(ir->type); 1675 1676 switch (ir->type->base_type) { 1677 case GLSL_TYPE_FLOAT: 1678 emit(MOV(*dst, src_reg(ir->value.f[i]))); 1679 break; 1680 case GLSL_TYPE_INT: 1681 emit(MOV(*dst, src_reg(ir->value.i[i]))); 1682 break; 1683 case GLSL_TYPE_UINT: 1684 emit(MOV(*dst, src_reg(ir->value.u[i]))); 1685 break; 1686 case GLSL_TYPE_BOOL: 1687 emit(MOV(*dst, src_reg(ir->value.b[i]))); 1688 break; 1689 default: 1690 assert(!"Non-float/uint/int/bool constant"); 1691 break; 1692 } 1693 } 1694 dst->reg_offset++; 1695} 1696 1697void 1698vec4_visitor::visit(ir_constant *ir) 1699{ 1700 dst_reg dst = dst_reg(this, ir->type); 1701 this->result = src_reg(dst); 1702 1703 emit_constant_values(&dst, ir); 1704} 1705 1706void 1707vec4_visitor::visit(ir_call *ir) 1708{ 1709 assert(!"not reached"); 1710} 1711 1712void 1713vec4_visitor::visit(ir_texture *ir) 1714{ 1715 /* FINISHME: Implement vertex texturing. 1716 * 1717 * With 0 vertex samplers available, the linker will reject 1718 * programs that do vertex texturing, but after our visitor has 1719 * run. 1720 */ 1721 this->result = src_reg(this, glsl_type::vec4_type); 1722} 1723 1724void 1725vec4_visitor::visit(ir_return *ir) 1726{ 1727 assert(!"not reached"); 1728} 1729 1730void 1731vec4_visitor::visit(ir_discard *ir) 1732{ 1733 assert(!"not reached"); 1734} 1735 1736void 1737vec4_visitor::visit(ir_if *ir) 1738{ 1739 /* Don't point the annotation at the if statement, because then it plus 1740 * the then and else blocks get printed. 1741 */ 1742 this->base_ir = ir->condition; 1743 1744 if (intel->gen == 6) { 1745 emit_if_gen6(ir); 1746 } else { 1747 uint32_t predicate; 1748 emit_bool_to_cond_code(ir->condition, &predicate); 1749 emit(IF(predicate)); 1750 } 1751 1752 visit_instructions(&ir->then_instructions); 1753 1754 if (!ir->else_instructions.is_empty()) { 1755 this->base_ir = ir->condition; 1756 emit(BRW_OPCODE_ELSE); 1757 1758 visit_instructions(&ir->else_instructions); 1759 } 1760 1761 this->base_ir = ir->condition; 1762 emit(BRW_OPCODE_ENDIF); 1763} 1764 1765void 1766vec4_visitor::emit_ndc_computation() 1767{ 1768 /* Get the position */ 1769 src_reg pos = src_reg(output_reg[VERT_RESULT_HPOS]); 1770 1771 /* Build ndc coords, which are (x/w, y/w, z/w, 1/w) */ 1772 dst_reg ndc = dst_reg(this, glsl_type::vec4_type); 1773 output_reg[BRW_VERT_RESULT_NDC] = ndc; 1774 1775 current_annotation = "NDC"; 1776 dst_reg ndc_w = ndc; 1777 ndc_w.writemask = WRITEMASK_W; 1778 src_reg pos_w = pos; 1779 pos_w.swizzle = BRW_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W); 1780 emit_math(SHADER_OPCODE_RCP, ndc_w, pos_w); 1781 1782 dst_reg ndc_xyz = ndc; 1783 ndc_xyz.writemask = WRITEMASK_XYZ; 1784 1785 emit(MUL(ndc_xyz, pos, src_reg(ndc_w))); 1786} 1787 1788void 1789vec4_visitor::emit_psiz_and_flags(struct brw_reg reg) 1790{ 1791 if (intel->gen < 6 && 1792 ((c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) || 1793 c->key.nr_userclip || brw->has_negative_rhw_bug)) { 1794 dst_reg header1 = dst_reg(this, glsl_type::uvec4_type); 1795 dst_reg header1_w = header1; 1796 header1_w.writemask = WRITEMASK_W; 1797 GLuint i; 1798 1799 emit(MOV(header1, 0u)); 1800 1801 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1802 src_reg psiz = src_reg(output_reg[VERT_RESULT_PSIZ]); 1803 1804 current_annotation = "Point size"; 1805 emit(MUL(header1_w, psiz, src_reg((float)(1 << 11)))); 1806 emit(AND(header1_w, src_reg(header1_w), 0x7ff << 8)); 1807 } 1808 1809 current_annotation = "Clipping flags"; 1810 for (i = 0; i < c->key.nr_userclip; i++) { 1811 vec4_instruction *inst; 1812 1813 inst = emit(DP4(dst_null_f(), src_reg(output_reg[VERT_RESULT_HPOS]), 1814 src_reg(this->userplane[i]))); 1815 inst->conditional_mod = BRW_CONDITIONAL_L; 1816 1817 inst = emit(OR(header1_w, src_reg(header1_w), 1u << i)); 1818 inst->predicate = BRW_PREDICATE_NORMAL; 1819 } 1820 1821 /* i965 clipping workaround: 1822 * 1) Test for -ve rhw 1823 * 2) If set, 1824 * set ndc = (0,0,0,0) 1825 * set ucp[6] = 1 1826 * 1827 * Later, clipping will detect ucp[6] and ensure the primitive is 1828 * clipped against all fixed planes. 1829 */ 1830 if (brw->has_negative_rhw_bug) { 1831#if 0 1832 /* FINISHME */ 1833 brw_CMP(p, 1834 vec8(brw_null_reg()), 1835 BRW_CONDITIONAL_L, 1836 brw_swizzle1(output_reg[BRW_VERT_RESULT_NDC], 3), 1837 brw_imm_f(0)); 1838 1839 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6)); 1840 brw_MOV(p, output_reg[BRW_VERT_RESULT_NDC], brw_imm_f(0)); 1841 brw_set_predicate_control(p, BRW_PREDICATE_NONE); 1842#endif 1843 } 1844 1845 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), src_reg(header1))); 1846 } else if (intel->gen < 6) { 1847 emit(MOV(retype(reg, BRW_REGISTER_TYPE_UD), 0u)); 1848 } else { 1849 emit(MOV(retype(reg, BRW_REGISTER_TYPE_D), src_reg(0))); 1850 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) { 1851 emit(MOV(brw_writemask(reg, WRITEMASK_W), 1852 src_reg(output_reg[VERT_RESULT_PSIZ]))); 1853 } 1854 } 1855} 1856 1857void 1858vec4_visitor::emit_clip_distances(struct brw_reg reg, int offset) 1859{ 1860 if (intel->gen < 6) { 1861 /* Clip distance slots are set aside in gen5, but they are not used. It 1862 * is not clear whether we actually need to set aside space for them, 1863 * but the performance cost is negligible. 1864 */ 1865 return; 1866 } 1867 1868 /* From the GLSL 1.30 spec, section 7.1 (Vertex Shader Special Variables): 1869 * 1870 * "If a linked set of shaders forming the vertex stage contains no 1871 * static write to gl_ClipVertex or gl_ClipDistance, but the 1872 * application has requested clipping against user clip planes through 1873 * the API, then the coordinate written to gl_Position is used for 1874 * comparison against the user clip planes." 1875 * 1876 * This function is only called if the shader didn't write to 1877 * gl_ClipDistance. Accordingly, we use gl_ClipVertex to perform clipping 1878 * if the user wrote to it; otherwise we use gl_Position. 1879 */ 1880 gl_vert_result clip_vertex = VERT_RESULT_CLIP_VERTEX; 1881 if (!(c->prog_data.outputs_written 1882 & BITFIELD64_BIT(VERT_RESULT_CLIP_VERTEX))) { 1883 clip_vertex = VERT_RESULT_HPOS; 1884 } 1885 1886 for (int i = 0; i + offset < c->key.nr_userclip && i < 4; ++i) { 1887 emit(DP4(dst_reg(brw_writemask(reg, 1 << i)), 1888 src_reg(output_reg[clip_vertex]), 1889 src_reg(this->userplane[i + offset]))); 1890 } 1891} 1892 1893void 1894vec4_visitor::emit_generic_urb_slot(dst_reg reg, int vert_result) 1895{ 1896 assert (vert_result < VERT_RESULT_MAX); 1897 current_annotation = output_reg_annotation[vert_result]; 1898 /* Copy the register, saturating if necessary */ 1899 vec4_instruction *inst = emit(MOV(reg, 1900 src_reg(output_reg[vert_result]))); 1901 if ((vert_result == VERT_RESULT_COL0 || 1902 vert_result == VERT_RESULT_COL1 || 1903 vert_result == VERT_RESULT_BFC0 || 1904 vert_result == VERT_RESULT_BFC1) && 1905 c->key.clamp_vertex_color) { 1906 inst->saturate = true; 1907 } 1908} 1909 1910void 1911vec4_visitor::emit_urb_slot(int mrf, int vert_result) 1912{ 1913 struct brw_reg hw_reg = brw_message_reg(mrf); 1914 dst_reg reg = dst_reg(MRF, mrf); 1915 reg.type = BRW_REGISTER_TYPE_F; 1916 1917 switch (vert_result) { 1918 case VERT_RESULT_PSIZ: 1919 /* PSIZ is always in slot 0, and is coupled with other flags. */ 1920 current_annotation = "indices, point width, clip flags"; 1921 emit_psiz_and_flags(hw_reg); 1922 break; 1923 case BRW_VERT_RESULT_NDC: 1924 current_annotation = "NDC"; 1925 emit(MOV(reg, src_reg(output_reg[BRW_VERT_RESULT_NDC]))); 1926 break; 1927 case BRW_VERT_RESULT_HPOS_DUPLICATE: 1928 case VERT_RESULT_HPOS: 1929 current_annotation = "gl_Position"; 1930 emit(MOV(reg, src_reg(output_reg[VERT_RESULT_HPOS]))); 1931 break; 1932 case VERT_RESULT_CLIP_DIST0: 1933 case VERT_RESULT_CLIP_DIST1: 1934 if (this->c->key.uses_clip_distance) { 1935 emit_generic_urb_slot(reg, vert_result); 1936 } else { 1937 current_annotation = "user clip distances"; 1938 emit_clip_distances(hw_reg, (vert_result - VERT_RESULT_CLIP_DIST0) * 4); 1939 } 1940 break; 1941 case BRW_VERT_RESULT_PAD: 1942 /* No need to write to this slot */ 1943 break; 1944 default: 1945 emit_generic_urb_slot(reg, vert_result); 1946 break; 1947 } 1948} 1949 1950static int 1951align_interleaved_urb_mlen(struct brw_context *brw, int mlen) 1952{ 1953 struct intel_context *intel = &brw->intel; 1954 1955 if (intel->gen >= 6) { 1956 /* URB data written (does not include the message header reg) must 1957 * be a multiple of 256 bits, or 2 VS registers. See vol5c.5, 1958 * section 5.4.3.2.2: URB_INTERLEAVED. 1959 * 1960 * URB entries are allocated on a multiple of 1024 bits, so an 1961 * extra 128 bits written here to make the end align to 256 is 1962 * no problem. 1963 */ 1964 if ((mlen % 2) != 1) 1965 mlen++; 1966 } 1967 1968 return mlen; 1969} 1970 1971/** 1972 * Generates the VUE payload plus the 1 or 2 URB write instructions to 1973 * complete the VS thread. 1974 * 1975 * The VUE layout is documented in Volume 2a. 1976 */ 1977void 1978vec4_visitor::emit_urb_writes() 1979{ 1980 /* MRF 0 is reserved for the debugger, so start with message header 1981 * in MRF 1. 1982 */ 1983 int base_mrf = 1; 1984 int mrf = base_mrf; 1985 /* In the process of generating our URB write message contents, we 1986 * may need to unspill a register or load from an array. Those 1987 * reads would use MRFs 14-15. 1988 */ 1989 int max_usable_mrf = 13; 1990 1991 /* The following assertion verifies that max_usable_mrf causes an 1992 * even-numbered amount of URB write data, which will meet gen6's 1993 * requirements for length alignment. 1994 */ 1995 assert ((max_usable_mrf - base_mrf) % 2 == 0); 1996 1997 /* FINISHME: edgeflag */ 1998 1999 brw_compute_vue_map(&c->vue_map, intel, c->key.nr_userclip, 2000 c->prog_data.outputs_written); 2001 2002 /* First mrf is the g0-based message header containing URB handles and such, 2003 * which is implied in VS_OPCODE_URB_WRITE. 2004 */ 2005 mrf++; 2006 2007 if (intel->gen < 6) { 2008 emit_ndc_computation(); 2009 } 2010 2011 /* Set up the VUE data for the first URB write */ 2012 int slot; 2013 for (slot = 0; slot < c->vue_map.num_slots; ++slot) { 2014 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 2015 2016 /* If this was max_usable_mrf, we can't fit anything more into this URB 2017 * WRITE. 2018 */ 2019 if (mrf > max_usable_mrf) { 2020 slot++; 2021 break; 2022 } 2023 } 2024 2025 current_annotation = "URB write"; 2026 vec4_instruction *inst = emit(VS_OPCODE_URB_WRITE); 2027 inst->base_mrf = base_mrf; 2028 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 2029 inst->eot = (slot >= c->vue_map.num_slots); 2030 2031 /* Optional second URB write */ 2032 if (!inst->eot) { 2033 mrf = base_mrf + 1; 2034 2035 for (; slot < c->vue_map.num_slots; ++slot) { 2036 assert(mrf < max_usable_mrf); 2037 2038 emit_urb_slot(mrf++, c->vue_map.slot_to_vert_result[slot]); 2039 } 2040 2041 current_annotation = "URB write"; 2042 inst = emit(VS_OPCODE_URB_WRITE); 2043 inst->base_mrf = base_mrf; 2044 inst->mlen = align_interleaved_urb_mlen(brw, mrf - base_mrf); 2045 inst->eot = true; 2046 /* URB destination offset. In the previous write, we got MRFs 2047 * 2-13 minus the one header MRF, so 12 regs. URB offset is in 2048 * URB row increments, and each of our MRFs is half of one of 2049 * those, since we're doing interleaved writes. 2050 */ 2051 inst->offset = (max_usable_mrf - base_mrf) / 2; 2052 } 2053 2054 if (intel->gen == 6) 2055 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 8) / 8; 2056 else 2057 c->prog_data.urb_entry_size = ALIGN(c->vue_map.num_slots, 4) / 4; 2058} 2059 2060src_reg 2061vec4_visitor::get_scratch_offset(vec4_instruction *inst, 2062 src_reg *reladdr, int reg_offset) 2063{ 2064 /* Because we store the values to scratch interleaved like our 2065 * vertex data, we need to scale the vec4 index by 2. 2066 */ 2067 int message_header_scale = 2; 2068 2069 /* Pre-gen6, the message header uses byte offsets instead of vec4 2070 * (16-byte) offset units. 2071 */ 2072 if (intel->gen < 6) 2073 message_header_scale *= 16; 2074 2075 if (reladdr) { 2076 src_reg index = src_reg(this, glsl_type::int_type); 2077 2078 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2079 emit_before(inst, MUL(dst_reg(index), 2080 index, src_reg(message_header_scale))); 2081 2082 return index; 2083 } else { 2084 return src_reg(reg_offset * message_header_scale); 2085 } 2086} 2087 2088src_reg 2089vec4_visitor::get_pull_constant_offset(vec4_instruction *inst, 2090 src_reg *reladdr, int reg_offset) 2091{ 2092 if (reladdr) { 2093 src_reg index = src_reg(this, glsl_type::int_type); 2094 2095 emit_before(inst, ADD(dst_reg(index), *reladdr, src_reg(reg_offset))); 2096 2097 /* Pre-gen6, the message header uses byte offsets instead of vec4 2098 * (16-byte) offset units. 2099 */ 2100 if (intel->gen < 6) { 2101 emit_before(inst, MUL(dst_reg(index), index, src_reg(16))); 2102 } 2103 2104 return index; 2105 } else { 2106 int message_header_scale = intel->gen < 6 ? 16 : 1; 2107 return src_reg(reg_offset * message_header_scale); 2108 } 2109} 2110 2111/** 2112 * Emits an instruction before @inst to load the value named by @orig_src 2113 * from scratch space at @base_offset to @temp. 2114 */ 2115void 2116vec4_visitor::emit_scratch_read(vec4_instruction *inst, 2117 dst_reg temp, src_reg orig_src, 2118 int base_offset) 2119{ 2120 int reg_offset = base_offset + orig_src.reg_offset; 2121 src_reg index = get_scratch_offset(inst, orig_src.reladdr, reg_offset); 2122 2123 emit_before(inst, SCRATCH_READ(temp, index)); 2124} 2125 2126/** 2127 * Emits an instruction after @inst to store the value to be written 2128 * to @orig_dst to scratch space at @base_offset, from @temp. 2129 */ 2130void 2131vec4_visitor::emit_scratch_write(vec4_instruction *inst, 2132 src_reg temp, dst_reg orig_dst, 2133 int base_offset) 2134{ 2135 int reg_offset = base_offset + orig_dst.reg_offset; 2136 src_reg index = get_scratch_offset(inst, orig_dst.reladdr, reg_offset); 2137 2138 dst_reg dst = dst_reg(brw_writemask(brw_vec8_grf(0, 0), 2139 orig_dst.writemask)); 2140 vec4_instruction *write = SCRATCH_WRITE(dst, temp, index); 2141 write->predicate = inst->predicate; 2142 write->ir = inst->ir; 2143 write->annotation = inst->annotation; 2144 inst->insert_after(write); 2145} 2146 2147/** 2148 * We can't generally support array access in GRF space, because a 2149 * single instruction's destination can only span 2 contiguous 2150 * registers. So, we send all GRF arrays that get variable index 2151 * access to scratch space. 2152 */ 2153void 2154vec4_visitor::move_grf_array_access_to_scratch() 2155{ 2156 int scratch_loc[this->virtual_grf_count]; 2157 2158 for (int i = 0; i < this->virtual_grf_count; i++) { 2159 scratch_loc[i] = -1; 2160 } 2161 2162 /* First, calculate the set of virtual GRFs that need to be punted 2163 * to scratch due to having any array access on them, and where in 2164 * scratch. 2165 */ 2166 foreach_list(node, &this->instructions) { 2167 vec4_instruction *inst = (vec4_instruction *)node; 2168 2169 if (inst->dst.file == GRF && inst->dst.reladdr && 2170 scratch_loc[inst->dst.reg] == -1) { 2171 scratch_loc[inst->dst.reg] = c->last_scratch; 2172 c->last_scratch += this->virtual_grf_sizes[inst->dst.reg] * 8 * 4; 2173 } 2174 2175 for (int i = 0 ; i < 3; i++) { 2176 src_reg *src = &inst->src[i]; 2177 2178 if (src->file == GRF && src->reladdr && 2179 scratch_loc[src->reg] == -1) { 2180 scratch_loc[src->reg] = c->last_scratch; 2181 c->last_scratch += this->virtual_grf_sizes[src->reg] * 8 * 4; 2182 } 2183 } 2184 } 2185 2186 /* Now, for anything that will be accessed through scratch, rewrite 2187 * it to load/store. Note that this is a _safe list walk, because 2188 * we may generate a new scratch_write instruction after the one 2189 * we're processing. 2190 */ 2191 foreach_list_safe(node, &this->instructions) { 2192 vec4_instruction *inst = (vec4_instruction *)node; 2193 2194 /* Set up the annotation tracking for new generated instructions. */ 2195 base_ir = inst->ir; 2196 current_annotation = inst->annotation; 2197 2198 if (inst->dst.file == GRF && scratch_loc[inst->dst.reg] != -1) { 2199 src_reg temp = src_reg(this, glsl_type::vec4_type); 2200 2201 emit_scratch_write(inst, temp, inst->dst, scratch_loc[inst->dst.reg]); 2202 2203 inst->dst.file = temp.file; 2204 inst->dst.reg = temp.reg; 2205 inst->dst.reg_offset = temp.reg_offset; 2206 inst->dst.reladdr = NULL; 2207 } 2208 2209 for (int i = 0 ; i < 3; i++) { 2210 if (inst->src[i].file != GRF || scratch_loc[inst->src[i].reg] == -1) 2211 continue; 2212 2213 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2214 2215 emit_scratch_read(inst, temp, inst->src[i], 2216 scratch_loc[inst->src[i].reg]); 2217 2218 inst->src[i].file = temp.file; 2219 inst->src[i].reg = temp.reg; 2220 inst->src[i].reg_offset = temp.reg_offset; 2221 inst->src[i].reladdr = NULL; 2222 } 2223 } 2224} 2225 2226/** 2227 * Emits an instruction before @inst to load the value named by @orig_src 2228 * from the pull constant buffer (surface) at @base_offset to @temp. 2229 */ 2230void 2231vec4_visitor::emit_pull_constant_load(vec4_instruction *inst, 2232 dst_reg temp, src_reg orig_src, 2233 int base_offset) 2234{ 2235 int reg_offset = base_offset + orig_src.reg_offset; 2236 src_reg index = get_pull_constant_offset(inst, orig_src.reladdr, reg_offset); 2237 vec4_instruction *load; 2238 2239 load = new(mem_ctx) vec4_instruction(this, VS_OPCODE_PULL_CONSTANT_LOAD, 2240 temp, index); 2241 load->base_mrf = 14; 2242 load->mlen = 1; 2243 emit_before(inst, load); 2244} 2245 2246/** 2247 * Implements array access of uniforms by inserting a 2248 * PULL_CONSTANT_LOAD instruction. 2249 * 2250 * Unlike temporary GRF array access (where we don't support it due to 2251 * the difficulty of doing relative addressing on instruction 2252 * destinations), we could potentially do array access of uniforms 2253 * that were loaded in GRF space as push constants. In real-world 2254 * usage we've seen, though, the arrays being used are always larger 2255 * than we could load as push constants, so just always move all 2256 * uniform array access out to a pull constant buffer. 2257 */ 2258void 2259vec4_visitor::move_uniform_array_access_to_pull_constants() 2260{ 2261 int pull_constant_loc[this->uniforms]; 2262 2263 for (int i = 0; i < this->uniforms; i++) { 2264 pull_constant_loc[i] = -1; 2265 } 2266 2267 /* Walk through and find array access of uniforms. Put a copy of that 2268 * uniform in the pull constant buffer. 2269 * 2270 * Note that we don't move constant-indexed accesses to arrays. No 2271 * testing has been done of the performance impact of this choice. 2272 */ 2273 foreach_list_safe(node, &this->instructions) { 2274 vec4_instruction *inst = (vec4_instruction *)node; 2275 2276 for (int i = 0 ; i < 3; i++) { 2277 if (inst->src[i].file != UNIFORM || !inst->src[i].reladdr) 2278 continue; 2279 2280 int uniform = inst->src[i].reg; 2281 2282 /* If this array isn't already present in the pull constant buffer, 2283 * add it. 2284 */ 2285 if (pull_constant_loc[uniform] == -1) { 2286 const float **values = &prog_data->param[uniform * 4]; 2287 2288 pull_constant_loc[uniform] = prog_data->nr_pull_params / 4; 2289 2290 for (int j = 0; j < uniform_size[uniform] * 4; j++) { 2291 prog_data->pull_param[prog_data->nr_pull_params++] = values[j]; 2292 } 2293 } 2294 2295 /* Set up the annotation tracking for new generated instructions. */ 2296 base_ir = inst->ir; 2297 current_annotation = inst->annotation; 2298 2299 dst_reg temp = dst_reg(this, glsl_type::vec4_type); 2300 2301 emit_pull_constant_load(inst, temp, inst->src[i], 2302 pull_constant_loc[uniform]); 2303 2304 inst->src[i].file = temp.file; 2305 inst->src[i].reg = temp.reg; 2306 inst->src[i].reg_offset = temp.reg_offset; 2307 inst->src[i].reladdr = NULL; 2308 } 2309 } 2310 2311 /* Now there are no accesses of the UNIFORM file with a reladdr, so 2312 * no need to track them as larger-than-vec4 objects. This will be 2313 * relied on in cutting out unused uniform vectors from push 2314 * constants. 2315 */ 2316 split_uniform_registers(); 2317} 2318 2319vec4_visitor::vec4_visitor(struct brw_vs_compile *c, 2320 struct gl_shader_program *prog, 2321 struct brw_shader *shader) 2322{ 2323 this->c = c; 2324 this->p = &c->func; 2325 this->brw = p->brw; 2326 this->intel = &brw->intel; 2327 this->ctx = &intel->ctx; 2328 this->prog = prog; 2329 this->shader = shader; 2330 2331 this->mem_ctx = ralloc_context(NULL); 2332 this->failed = false; 2333 2334 this->base_ir = NULL; 2335 this->current_annotation = NULL; 2336 2337 this->c = c; 2338 this->vp = prog->VertexProgram; 2339 this->prog_data = &c->prog_data; 2340 2341 this->variable_ht = hash_table_ctor(0, 2342 hash_table_pointer_hash, 2343 hash_table_pointer_compare); 2344 2345 this->virtual_grf_def = NULL; 2346 this->virtual_grf_use = NULL; 2347 this->virtual_grf_sizes = NULL; 2348 this->virtual_grf_count = 0; 2349 this->virtual_grf_reg_map = NULL; 2350 this->virtual_grf_reg_count = 0; 2351 this->virtual_grf_array_size = 0; 2352 this->live_intervals_valid = false; 2353 2354 this->uniforms = 0; 2355 2356 this->variable_ht = hash_table_ctor(0, 2357 hash_table_pointer_hash, 2358 hash_table_pointer_compare); 2359} 2360 2361vec4_visitor::~vec4_visitor() 2362{ 2363 ralloc_free(this->mem_ctx); 2364 hash_table_dtor(this->variable_ht); 2365} 2366 2367 2368void 2369vec4_visitor::fail(const char *format, ...) 2370{ 2371 va_list va; 2372 char *msg; 2373 2374 if (failed) 2375 return; 2376 2377 failed = true; 2378 2379 va_start(va, format); 2380 msg = ralloc_vasprintf(mem_ctx, format, va); 2381 va_end(va); 2382 msg = ralloc_asprintf(mem_ctx, "VS compile failed: %s\n", msg); 2383 2384 this->fail_msg = msg; 2385 2386 if (INTEL_DEBUG & DEBUG_VS) { 2387 fprintf(stderr, "%s", msg); 2388 } 2389} 2390 2391} /* namespace brw */ 2392