st_glsl_to_tgsi.cpp revision 41472f7809dcff114223b8fadc5b97baff6060a9
1/* 2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved. 3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved. 4 * Copyright © 2010 Intel Corporation 5 * Copyright © 2011 Bryan Cain 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the next 15 * paragraph) shall be included in all copies or substantial portions of the 16 * Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27/** 28 * \file glsl_to_tgsi.cpp 29 * 30 * Translate GLSL IR to TGSI. 31 */ 32 33#include <stdio.h> 34#include "main/compiler.h" 35#include "ir.h" 36#include "ir_visitor.h" 37#include "ir_print_visitor.h" 38#include "ir_expression_flattening.h" 39#include "glsl_types.h" 40#include "glsl_parser_extras.h" 41#include "../glsl/program.h" 42#include "ir_optimization.h" 43#include "ast.h" 44 45extern "C" { 46#include "main/mtypes.h" 47#include "main/shaderapi.h" 48#include "main/shaderobj.h" 49#include "main/uniforms.h" 50#include "program/hash_table.h" 51#include "program/prog_instruction.h" 52#include "program/prog_optimize.h" 53#include "program/prog_print.h" 54#include "program/program.h" 55#include "program/prog_uniform.h" 56#include "program/prog_parameter.h" 57#include "program/sampler.h" 58 59#include "pipe/p_compiler.h" 60#include "pipe/p_context.h" 61#include "pipe/p_screen.h" 62#include "pipe/p_shader_tokens.h" 63#include "pipe/p_state.h" 64#include "util/u_math.h" 65#include "tgsi/tgsi_ureg.h" 66#include "tgsi/tgsi_info.h" 67#include "st_context.h" 68#include "st_program.h" 69#include "st_glsl_to_tgsi.h" 70#include "st_mesa_to_tgsi.h" 71} 72 73#define PROGRAM_ANY_CONST ((1 << PROGRAM_LOCAL_PARAM) | \ 74 (1 << PROGRAM_ENV_PARAM) | \ 75 (1 << PROGRAM_STATE_VAR) | \ 76 (1 << PROGRAM_NAMED_PARAM) | \ 77 (1 << PROGRAM_CONSTANT) | \ 78 (1 << PROGRAM_UNIFORM)) 79 80class st_src_reg; 81class st_dst_reg; 82 83static int swizzle_for_size(int size); 84 85/** 86 * This struct is a corresponding struct to TGSI ureg_src. 87 */ 88class st_src_reg { 89public: 90 st_src_reg(gl_register_file file, int index, const glsl_type *type) 91 { 92 this->file = file; 93 this->index = index; 94 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix())) 95 this->swizzle = swizzle_for_size(type->vector_elements); 96 else 97 this->swizzle = SWIZZLE_XYZW; 98 this->negate = 0; 99 this->type = type ? type->base_type : GLSL_TYPE_ERROR; 100 this->reladdr = NULL; 101 } 102 103 st_src_reg(gl_register_file file, int index, int type) 104 { 105 this->type = type; 106 this->file = file; 107 this->index = index; 108 this->swizzle = SWIZZLE_XYZW; 109 this->negate = 0; 110 this->reladdr = NULL; 111 } 112 113 st_src_reg() 114 { 115 this->type = GLSL_TYPE_ERROR; 116 this->file = PROGRAM_UNDEFINED; 117 this->index = 0; 118 this->swizzle = 0; 119 this->negate = 0; 120 this->reladdr = NULL; 121 } 122 123 explicit st_src_reg(st_dst_reg reg); 124 125 gl_register_file file; /**< PROGRAM_* from Mesa */ 126 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */ 127 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */ 128 int negate; /**< NEGATE_XYZW mask from mesa */ 129 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */ 130 /** Register index should be offset by the integer in this reg. */ 131 st_src_reg *reladdr; 132}; 133 134class st_dst_reg { 135public: 136 st_dst_reg(gl_register_file file, int writemask, int type) 137 { 138 this->file = file; 139 this->index = 0; 140 this->writemask = writemask; 141 this->cond_mask = COND_TR; 142 this->reladdr = NULL; 143 this->type = type; 144 } 145 146 st_dst_reg() 147 { 148 this->type = GLSL_TYPE_ERROR; 149 this->file = PROGRAM_UNDEFINED; 150 this->index = 0; 151 this->writemask = 0; 152 this->cond_mask = COND_TR; 153 this->reladdr = NULL; 154 } 155 156 explicit st_dst_reg(st_src_reg reg); 157 158 gl_register_file file; /**< PROGRAM_* from Mesa */ 159 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */ 160 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */ 161 GLuint cond_mask:4; 162 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */ 163 /** Register index should be offset by the integer in this reg. */ 164 st_src_reg *reladdr; 165}; 166 167st_src_reg::st_src_reg(st_dst_reg reg) 168{ 169 this->type = reg.type; 170 this->file = reg.file; 171 this->index = reg.index; 172 this->swizzle = SWIZZLE_XYZW; 173 this->negate = 0; 174 this->reladdr = NULL; 175} 176 177st_dst_reg::st_dst_reg(st_src_reg reg) 178{ 179 this->type = reg.type; 180 this->file = reg.file; 181 this->index = reg.index; 182 this->writemask = WRITEMASK_XYZW; 183 this->cond_mask = COND_TR; 184 this->reladdr = reg.reladdr; 185} 186 187class glsl_to_tgsi_instruction : public exec_node { 188public: 189 /* Callers of this ralloc-based new need not call delete. It's 190 * easier to just ralloc_free 'ctx' (or any of its ancestors). */ 191 static void* operator new(size_t size, void *ctx) 192 { 193 void *node; 194 195 node = rzalloc_size(ctx, size); 196 assert(node != NULL); 197 198 return node; 199 } 200 201 unsigned op; 202 st_dst_reg dst; 203 st_src_reg src[3]; 204 /** Pointer to the ir source this tree came from for debugging */ 205 ir_instruction *ir; 206 GLboolean cond_update; 207 bool saturate; 208 int sampler; /**< sampler index */ 209 int tex_target; /**< One of TEXTURE_*_INDEX */ 210 GLboolean tex_shadow; 211 int dead_mask; /**< Used in dead code elimination */ 212 213 class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */ 214}; 215 216class variable_storage : public exec_node { 217public: 218 variable_storage(ir_variable *var, gl_register_file file, int index) 219 : file(file), index(index), var(var) 220 { 221 /* empty */ 222 } 223 224 gl_register_file file; 225 int index; 226 ir_variable *var; /* variable that maps to this, if any */ 227}; 228 229class function_entry : public exec_node { 230public: 231 ir_function_signature *sig; 232 233 /** 234 * identifier of this function signature used by the program. 235 * 236 * At the point that Mesa instructions for function calls are 237 * generated, we don't know the address of the first instruction of 238 * the function body. So we make the BranchTarget that is called a 239 * small integer and rewrite them during set_branchtargets(). 240 */ 241 int sig_id; 242 243 /** 244 * Pointer to first instruction of the function body. 245 * 246 * Set during function body emits after main() is processed. 247 */ 248 glsl_to_tgsi_instruction *bgn_inst; 249 250 /** 251 * Index of the first instruction of the function body in actual 252 * Mesa IR. 253 * 254 * Set after convertion from glsl_to_tgsi_instruction to prog_instruction. 255 */ 256 int inst; 257 258 /** Storage for the return value. */ 259 st_src_reg return_reg; 260}; 261 262class glsl_to_tgsi_visitor : public ir_visitor { 263public: 264 glsl_to_tgsi_visitor(); 265 ~glsl_to_tgsi_visitor(); 266 267 function_entry *current_function; 268 269 struct gl_context *ctx; 270 struct gl_program *prog; 271 struct gl_shader_program *shader_program; 272 struct gl_shader_compiler_options *options; 273 274 int next_temp; 275 276 int num_address_regs; 277 int samplers_used; 278 bool indirect_addr_temps; 279 bool indirect_addr_consts; 280 281 int glsl_version; 282 283 variable_storage *find_variable_storage(ir_variable *var); 284 285 function_entry *get_function_signature(ir_function_signature *sig); 286 287 st_src_reg get_temp(const glsl_type *type); 288 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr); 289 290 st_src_reg st_src_reg_for_float(float val); 291 st_src_reg st_src_reg_for_int(int val); 292 st_src_reg st_src_reg_for_type(int type, int val); 293 294 /** 295 * \name Visit methods 296 * 297 * As typical for the visitor pattern, there must be one \c visit method for 298 * each concrete subclass of \c ir_instruction. Virtual base classes within 299 * the hierarchy should not have \c visit methods. 300 */ 301 /*@{*/ 302 virtual void visit(ir_variable *); 303 virtual void visit(ir_loop *); 304 virtual void visit(ir_loop_jump *); 305 virtual void visit(ir_function_signature *); 306 virtual void visit(ir_function *); 307 virtual void visit(ir_expression *); 308 virtual void visit(ir_swizzle *); 309 virtual void visit(ir_dereference_variable *); 310 virtual void visit(ir_dereference_array *); 311 virtual void visit(ir_dereference_record *); 312 virtual void visit(ir_assignment *); 313 virtual void visit(ir_constant *); 314 virtual void visit(ir_call *); 315 virtual void visit(ir_return *); 316 virtual void visit(ir_discard *); 317 virtual void visit(ir_texture *); 318 virtual void visit(ir_if *); 319 /*@}*/ 320 321 st_src_reg result; 322 323 /** List of variable_storage */ 324 exec_list variables; 325 326 /** List of function_entry */ 327 exec_list function_signatures; 328 int next_signature_id; 329 330 /** List of glsl_to_tgsi_instruction */ 331 exec_list instructions; 332 333 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op); 334 335 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 336 st_dst_reg dst, st_src_reg src0); 337 338 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 339 st_dst_reg dst, st_src_reg src0, st_src_reg src1); 340 341 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 342 st_dst_reg dst, 343 st_src_reg src0, st_src_reg src1, st_src_reg src2); 344 345 unsigned get_opcode(ir_instruction *ir, unsigned op, 346 st_dst_reg dst, 347 st_src_reg src0, st_src_reg src1); 348 349 /** 350 * Emit the correct dot-product instruction for the type of arguments 351 */ 352 void emit_dp(ir_instruction *ir, 353 st_dst_reg dst, 354 st_src_reg src0, 355 st_src_reg src1, 356 unsigned elements); 357 358 void emit_scalar(ir_instruction *ir, unsigned op, 359 st_dst_reg dst, st_src_reg src0); 360 361 void emit_scalar(ir_instruction *ir, unsigned op, 362 st_dst_reg dst, st_src_reg src0, st_src_reg src1); 363 364 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0); 365 366 void emit_scs(ir_instruction *ir, unsigned op, 367 st_dst_reg dst, const st_src_reg &src); 368 369 GLboolean try_emit_mad(ir_expression *ir, 370 int mul_operand); 371 GLboolean try_emit_sat(ir_expression *ir); 372 373 void emit_swz(ir_expression *ir); 374 375 bool process_move_condition(ir_rvalue *ir); 376 377 void remove_output_reads(gl_register_file type); 378 void simplify_cmp(void); 379 380 void rename_temp_register(int index, int new_index); 381 int get_first_temp_read(int index); 382 int get_first_temp_write(int index); 383 int get_last_temp_read(int index); 384 int get_last_temp_write(int index); 385 386 void copy_propagate(void); 387 void eliminate_dead_code(void); 388 int eliminate_dead_code_advanced(void); 389 void merge_registers(void); 390 void renumber_registers(void); 391 392 void *mem_ctx; 393}; 394 395static st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR); 396 397static st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR); 398 399static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X, GLSL_TYPE_FLOAT); 400 401static void 402fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3); 403 404static void 405fail_link(struct gl_shader_program *prog, const char *fmt, ...) 406{ 407 va_list args; 408 va_start(args, fmt); 409 ralloc_vasprintf_append(&prog->InfoLog, fmt, args); 410 va_end(args); 411 412 prog->LinkStatus = GL_FALSE; 413} 414 415static int 416swizzle_for_size(int size) 417{ 418 int size_swizzles[4] = { 419 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X), 420 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y), 421 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z), 422 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W), 423 }; 424 425 assert((size >= 1) && (size <= 4)); 426 return size_swizzles[size - 1]; 427} 428 429static bool 430is_tex_instruction(unsigned opcode) 431{ 432 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 433 return info->is_tex; 434} 435 436static unsigned 437num_inst_dst_regs(unsigned opcode) 438{ 439 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 440 return info->num_dst; 441} 442 443static unsigned 444num_inst_src_regs(unsigned opcode) 445{ 446 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 447 return info->is_tex ? info->num_src - 1 : info->num_src; 448} 449 450glsl_to_tgsi_instruction * 451glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 452 st_dst_reg dst, 453 st_src_reg src0, st_src_reg src1, st_src_reg src2) 454{ 455 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction(); 456 int num_reladdr = 0, i; 457 458 op = get_opcode(ir, op, dst, src0, src1); 459 460 /* If we have to do relative addressing, we want to load the ARL 461 * reg directly for one of the regs, and preload the other reladdr 462 * sources into temps. 463 */ 464 num_reladdr += dst.reladdr != NULL; 465 num_reladdr += src0.reladdr != NULL; 466 num_reladdr += src1.reladdr != NULL; 467 num_reladdr += src2.reladdr != NULL; 468 469 reladdr_to_temp(ir, &src2, &num_reladdr); 470 reladdr_to_temp(ir, &src1, &num_reladdr); 471 reladdr_to_temp(ir, &src0, &num_reladdr); 472 473 if (dst.reladdr) { 474 emit_arl(ir, address_reg, *dst.reladdr); 475 num_reladdr--; 476 } 477 assert(num_reladdr == 0); 478 479 inst->op = op; 480 inst->dst = dst; 481 inst->src[0] = src0; 482 inst->src[1] = src1; 483 inst->src[2] = src2; 484 inst->ir = ir; 485 inst->dead_mask = 0; 486 487 inst->function = NULL; 488 489 if (op == TGSI_OPCODE_ARL) 490 this->num_address_regs = 1; 491 492 /* Update indirect addressing status used by TGSI */ 493 if (dst.reladdr) { 494 switch(dst.file) { 495 case PROGRAM_TEMPORARY: 496 this->indirect_addr_temps = true; 497 break; 498 case PROGRAM_LOCAL_PARAM: 499 case PROGRAM_ENV_PARAM: 500 case PROGRAM_STATE_VAR: 501 case PROGRAM_NAMED_PARAM: 502 case PROGRAM_CONSTANT: 503 case PROGRAM_UNIFORM: 504 this->indirect_addr_consts = true; 505 break; 506 default: 507 break; 508 } 509 } 510 else { 511 for (i=0; i<3; i++) { 512 if(inst->src[i].reladdr) { 513 switch(inst->src[i].file) { 514 case PROGRAM_TEMPORARY: 515 this->indirect_addr_temps = true; 516 break; 517 case PROGRAM_LOCAL_PARAM: 518 case PROGRAM_ENV_PARAM: 519 case PROGRAM_STATE_VAR: 520 case PROGRAM_NAMED_PARAM: 521 case PROGRAM_CONSTANT: 522 case PROGRAM_UNIFORM: 523 this->indirect_addr_consts = true; 524 break; 525 default: 526 break; 527 } 528 } 529 } 530 } 531 532 this->instructions.push_tail(inst); 533 534 return inst; 535} 536 537 538glsl_to_tgsi_instruction * 539glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 540 st_dst_reg dst, st_src_reg src0, st_src_reg src1) 541{ 542 return emit(ir, op, dst, src0, src1, undef_src); 543} 544 545glsl_to_tgsi_instruction * 546glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 547 st_dst_reg dst, st_src_reg src0) 548{ 549 assert(dst.writemask != 0); 550 return emit(ir, op, dst, src0, undef_src, undef_src); 551} 552 553glsl_to_tgsi_instruction * 554glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op) 555{ 556 return emit(ir, op, undef_dst, undef_src, undef_src, undef_src); 557} 558 559/** 560 * Determines whether to use an integer, unsigned integer, or float opcode 561 * based on the operands and input opcode, then emits the result. 562 * 563 * TODO: type checking for remaining TGSI opcodes 564 */ 565unsigned 566glsl_to_tgsi_visitor::get_opcode(ir_instruction *ir, unsigned op, 567 st_dst_reg dst, 568 st_src_reg src0, st_src_reg src1) 569{ 570 int type = GLSL_TYPE_FLOAT; 571 572 if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT) 573 type = GLSL_TYPE_FLOAT; 574 else if (glsl_version >= 130) 575 type = src0.type; 576 577#define case4(c, f, i, u) \ 578 case TGSI_OPCODE_##c: \ 579 if (type == GLSL_TYPE_INT) op = TGSI_OPCODE_##i; \ 580 else if (type == GLSL_TYPE_UINT) op = TGSI_OPCODE_##u; \ 581 else op = TGSI_OPCODE_##f; \ 582 break; 583#define case3(f, i, u) case4(f, f, i, u) 584#define case2fi(f, i) case4(f, f, i, i) 585#define case2iu(i, u) case4(i, LAST, i, u) 586 587 switch(op) { 588 case2fi(ADD, UADD); 589 case2fi(MUL, UMUL); 590 case2fi(MAD, UMAD); 591 case3(DIV, IDIV, UDIV); 592 case3(MAX, IMAX, UMAX); 593 case3(MIN, IMIN, UMIN); 594 case2iu(MOD, UMOD); 595 596 case2fi(SEQ, USEQ); 597 case2fi(SNE, USNE); 598 case3(SGE, ISGE, USGE); 599 case3(SLT, ISLT, USLT); 600 601 case2iu(SHL, SHL); 602 case2iu(ISHR, USHR); 603 case2iu(NOT, NOT); 604 case2iu(AND, AND); 605 case2iu(OR, OR); 606 case2iu(XOR, XOR); 607 608 default: break; 609 } 610 611 assert(op != TGSI_OPCODE_LAST); 612 return op; 613} 614 615void 616glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir, 617 st_dst_reg dst, st_src_reg src0, st_src_reg src1, 618 unsigned elements) 619{ 620 static const unsigned dot_opcodes[] = { 621 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4 622 }; 623 624 emit(ir, dot_opcodes[elements - 2], dst, src0, src1); 625} 626 627/** 628 * Emits TGSI scalar opcodes to produce unique answers across channels. 629 * 630 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X 631 * channel determines the result across all channels. So to do a vec4 632 * of this operation, we want to emit a scalar per source channel used 633 * to produce dest channels. 634 */ 635void 636glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op, 637 st_dst_reg dst, 638 st_src_reg orig_src0, st_src_reg orig_src1) 639{ 640 int i, j; 641 int done_mask = ~dst.writemask; 642 643 /* TGSI RCP is a scalar operation splatting results to all channels, 644 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our 645 * dst channels. 646 */ 647 for (i = 0; i < 4; i++) { 648 GLuint this_mask = (1 << i); 649 glsl_to_tgsi_instruction *inst; 650 st_src_reg src0 = orig_src0; 651 st_src_reg src1 = orig_src1; 652 653 if (done_mask & this_mask) 654 continue; 655 656 GLuint src0_swiz = GET_SWZ(src0.swizzle, i); 657 GLuint src1_swiz = GET_SWZ(src1.swizzle, i); 658 for (j = i + 1; j < 4; j++) { 659 /* If there is another enabled component in the destination that is 660 * derived from the same inputs, generate its value on this pass as 661 * well. 662 */ 663 if (!(done_mask & (1 << j)) && 664 GET_SWZ(src0.swizzle, j) == src0_swiz && 665 GET_SWZ(src1.swizzle, j) == src1_swiz) { 666 this_mask |= (1 << j); 667 } 668 } 669 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz, 670 src0_swiz, src0_swiz); 671 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz, 672 src1_swiz, src1_swiz); 673 674 inst = emit(ir, op, dst, src0, src1); 675 inst->dst.writemask = this_mask; 676 done_mask |= this_mask; 677 } 678} 679 680void 681glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op, 682 st_dst_reg dst, st_src_reg src0) 683{ 684 st_src_reg undef = undef_src; 685 686 undef.swizzle = SWIZZLE_XXXX; 687 688 emit_scalar(ir, op, dst, src0, undef); 689} 690 691void 692glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir, 693 st_dst_reg dst, st_src_reg src0) 694{ 695 st_src_reg tmp = get_temp(glsl_type::float_type); 696 697 if (src0.type == GLSL_TYPE_INT) 698 emit(ir, TGSI_OPCODE_I2F, st_dst_reg(tmp), src0); 699 else if (src0.type == GLSL_TYPE_UINT) 700 emit(ir, TGSI_OPCODE_U2F, st_dst_reg(tmp), src0); 701 else 702 tmp = src0; 703 704 emit(ir, TGSI_OPCODE_ARL, dst, tmp); 705} 706 707/** 708 * Emit an TGSI_OPCODE_SCS instruction 709 * 710 * The \c SCS opcode functions a bit differently than the other TGSI opcodes. 711 * Instead of splatting its result across all four components of the 712 * destination, it writes one value to the \c x component and another value to 713 * the \c y component. 714 * 715 * \param ir IR instruction being processed 716 * \param op Either \c TGSI_OPCODE_SIN or \c TGSI_OPCODE_COS depending 717 * on which value is desired. 718 * \param dst Destination register 719 * \param src Source register 720 */ 721void 722glsl_to_tgsi_visitor::emit_scs(ir_instruction *ir, unsigned op, 723 st_dst_reg dst, 724 const st_src_reg &src) 725{ 726 /* Vertex programs cannot use the SCS opcode. 727 */ 728 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) { 729 emit_scalar(ir, op, dst, src); 730 return; 731 } 732 733 const unsigned component = (op == TGSI_OPCODE_SIN) ? 0 : 1; 734 const unsigned scs_mask = (1U << component); 735 int done_mask = ~dst.writemask; 736 st_src_reg tmp; 737 738 assert(op == TGSI_OPCODE_SIN || op == TGSI_OPCODE_COS); 739 740 /* If there are compnents in the destination that differ from the component 741 * that will be written by the SCS instrution, we'll need a temporary. 742 */ 743 if (scs_mask != unsigned(dst.writemask)) { 744 tmp = get_temp(glsl_type::vec4_type); 745 } 746 747 for (unsigned i = 0; i < 4; i++) { 748 unsigned this_mask = (1U << i); 749 st_src_reg src0 = src; 750 751 if ((done_mask & this_mask) != 0) 752 continue; 753 754 /* The source swizzle specified which component of the source generates 755 * sine / cosine for the current component in the destination. The SCS 756 * instruction requires that this value be swizzle to the X component. 757 * Replace the current swizzle with a swizzle that puts the source in 758 * the X component. 759 */ 760 unsigned src0_swiz = GET_SWZ(src.swizzle, i); 761 762 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz, 763 src0_swiz, src0_swiz); 764 for (unsigned j = i + 1; j < 4; j++) { 765 /* If there is another enabled component in the destination that is 766 * derived from the same inputs, generate its value on this pass as 767 * well. 768 */ 769 if (!(done_mask & (1 << j)) && 770 GET_SWZ(src0.swizzle, j) == src0_swiz) { 771 this_mask |= (1 << j); 772 } 773 } 774 775 if (this_mask != scs_mask) { 776 glsl_to_tgsi_instruction *inst; 777 st_dst_reg tmp_dst = st_dst_reg(tmp); 778 779 /* Emit the SCS instruction. 780 */ 781 inst = emit(ir, TGSI_OPCODE_SCS, tmp_dst, src0); 782 inst->dst.writemask = scs_mask; 783 784 /* Move the result of the SCS instruction to the desired location in 785 * the destination. 786 */ 787 tmp.swizzle = MAKE_SWIZZLE4(component, component, 788 component, component); 789 inst = emit(ir, TGSI_OPCODE_SCS, dst, tmp); 790 inst->dst.writemask = this_mask; 791 } else { 792 /* Emit the SCS instruction to write directly to the destination. 793 */ 794 glsl_to_tgsi_instruction *inst = emit(ir, TGSI_OPCODE_SCS, dst, src0); 795 inst->dst.writemask = scs_mask; 796 } 797 798 done_mask |= this_mask; 799 } 800} 801 802struct st_src_reg 803glsl_to_tgsi_visitor::st_src_reg_for_float(float val) 804{ 805 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_FLOAT); 806 union gl_constant_value uval; 807 808 uval.f = val; 809 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters, 810 &uval, 1, GL_FLOAT, &src.swizzle); 811 812 return src; 813} 814 815struct st_src_reg 816glsl_to_tgsi_visitor::st_src_reg_for_int(int val) 817{ 818 st_src_reg src(PROGRAM_CONSTANT, -1, GLSL_TYPE_INT); 819 union gl_constant_value uval; 820 821 assert(glsl_version >= 130); 822 823 uval.i = val; 824 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters, 825 &uval, 1, GL_INT, &src.swizzle); 826 827 return src; 828} 829 830struct st_src_reg 831glsl_to_tgsi_visitor::st_src_reg_for_type(int type, int val) 832{ 833 if (glsl_version >= 130) 834 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) : 835 st_src_reg_for_int(val); 836 else 837 return st_src_reg_for_float(val); 838} 839 840static int 841type_size(const struct glsl_type *type) 842{ 843 unsigned int i; 844 int size; 845 846 switch (type->base_type) { 847 case GLSL_TYPE_UINT: 848 case GLSL_TYPE_INT: 849 case GLSL_TYPE_FLOAT: 850 case GLSL_TYPE_BOOL: 851 if (type->is_matrix()) { 852 return type->matrix_columns; 853 } else { 854 /* Regardless of size of vector, it gets a vec4. This is bad 855 * packing for things like floats, but otherwise arrays become a 856 * mess. Hopefully a later pass over the code can pack scalars 857 * down if appropriate. 858 */ 859 return 1; 860 } 861 case GLSL_TYPE_ARRAY: 862 assert(type->length > 0); 863 return type_size(type->fields.array) * type->length; 864 case GLSL_TYPE_STRUCT: 865 size = 0; 866 for (i = 0; i < type->length; i++) { 867 size += type_size(type->fields.structure[i].type); 868 } 869 return size; 870 case GLSL_TYPE_SAMPLER: 871 /* Samplers take up one slot in UNIFORMS[], but they're baked in 872 * at link time. 873 */ 874 return 1; 875 default: 876 assert(0); 877 return 0; 878 } 879} 880 881/** 882 * In the initial pass of codegen, we assign temporary numbers to 883 * intermediate results. (not SSA -- variable assignments will reuse 884 * storage). 885 */ 886st_src_reg 887glsl_to_tgsi_visitor::get_temp(const glsl_type *type) 888{ 889 st_src_reg src; 890 int swizzle[4]; 891 int i; 892 893 src.type = glsl_version >= 130 ? type->base_type : GLSL_TYPE_FLOAT; 894 src.file = PROGRAM_TEMPORARY; 895 src.index = next_temp; 896 src.reladdr = NULL; 897 next_temp += type_size(type); 898 899 if (type->is_array() || type->is_record()) { 900 src.swizzle = SWIZZLE_NOOP; 901 } else { 902 for (i = 0; i < type->vector_elements; i++) 903 swizzle[i] = i; 904 for (; i < 4; i++) 905 swizzle[i] = type->vector_elements - 1; 906 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], 907 swizzle[2], swizzle[3]); 908 } 909 src.negate = 0; 910 911 return src; 912} 913 914variable_storage * 915glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var) 916{ 917 918 variable_storage *entry; 919 920 foreach_iter(exec_list_iterator, iter, this->variables) { 921 entry = (variable_storage *)iter.get(); 922 923 if (entry->var == var) 924 return entry; 925 } 926 927 return NULL; 928} 929 930void 931glsl_to_tgsi_visitor::visit(ir_variable *ir) 932{ 933 if (strcmp(ir->name, "gl_FragCoord") == 0) { 934 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog; 935 936 fp->OriginUpperLeft = ir->origin_upper_left; 937 fp->PixelCenterInteger = ir->pixel_center_integer; 938 939 } else if (strcmp(ir->name, "gl_FragDepth") == 0) { 940 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog; 941 switch (ir->depth_layout) { 942 case ir_depth_layout_none: 943 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE; 944 break; 945 case ir_depth_layout_any: 946 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY; 947 break; 948 case ir_depth_layout_greater: 949 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER; 950 break; 951 case ir_depth_layout_less: 952 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS; 953 break; 954 case ir_depth_layout_unchanged: 955 fp->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED; 956 break; 957 default: 958 assert(0); 959 break; 960 } 961 } 962 963 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) { 964 unsigned int i; 965 const ir_state_slot *const slots = ir->state_slots; 966 assert(ir->state_slots != NULL); 967 968 /* Check if this statevar's setup in the STATE file exactly 969 * matches how we'll want to reference it as a 970 * struct/array/whatever. If not, then we need to move it into 971 * temporary storage and hope that it'll get copy-propagated 972 * out. 973 */ 974 for (i = 0; i < ir->num_state_slots; i++) { 975 if (slots[i].swizzle != SWIZZLE_XYZW) { 976 break; 977 } 978 } 979 980 struct variable_storage *storage; 981 st_dst_reg dst; 982 if (i == ir->num_state_slots) { 983 /* We'll set the index later. */ 984 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1); 985 this->variables.push_tail(storage); 986 987 dst = undef_dst; 988 } else { 989 /* The variable_storage constructor allocates slots based on the size 990 * of the type. However, this had better match the number of state 991 * elements that we're going to copy into the new temporary. 992 */ 993 assert((int) ir->num_state_slots == type_size(ir->type)); 994 995 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY, 996 this->next_temp); 997 this->variables.push_tail(storage); 998 this->next_temp += type_size(ir->type); 999 1000 dst = st_dst_reg(st_src_reg(PROGRAM_TEMPORARY, storage->index, 1001 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT)); 1002 } 1003 1004 1005 for (unsigned int i = 0; i < ir->num_state_slots; i++) { 1006 int index = _mesa_add_state_reference(this->prog->Parameters, 1007 (gl_state_index *)slots[i].tokens); 1008 1009 if (storage->file == PROGRAM_STATE_VAR) { 1010 if (storage->index == -1) { 1011 storage->index = index; 1012 } else { 1013 assert(index == storage->index + (int)i); 1014 } 1015 } else { 1016 st_src_reg src(PROGRAM_STATE_VAR, index, 1017 glsl_version >= 130 ? ir->type->base_type : GLSL_TYPE_FLOAT); 1018 src.swizzle = slots[i].swizzle; 1019 emit(ir, TGSI_OPCODE_MOV, dst, src); 1020 /* even a float takes up a whole vec4 reg in a struct/array. */ 1021 dst.index++; 1022 } 1023 } 1024 1025 if (storage->file == PROGRAM_TEMPORARY && 1026 dst.index != storage->index + (int) ir->num_state_slots) { 1027 fail_link(this->shader_program, 1028 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n", 1029 ir->name, dst.index - storage->index, 1030 type_size(ir->type)); 1031 } 1032 } 1033} 1034 1035void 1036glsl_to_tgsi_visitor::visit(ir_loop *ir) 1037{ 1038 ir_dereference_variable *counter = NULL; 1039 1040 if (ir->counter != NULL) 1041 counter = new(ir) ir_dereference_variable(ir->counter); 1042 1043 if (ir->from != NULL) { 1044 assert(ir->counter != NULL); 1045 1046 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL); 1047 1048 a->accept(this); 1049 delete a; 1050 } 1051 1052 emit(NULL, TGSI_OPCODE_BGNLOOP); 1053 1054 if (ir->to) { 1055 ir_expression *e = 1056 new(ir) ir_expression(ir->cmp, glsl_type::bool_type, 1057 counter, ir->to); 1058 ir_if *if_stmt = new(ir) ir_if(e); 1059 1060 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break); 1061 1062 if_stmt->then_instructions.push_tail(brk); 1063 1064 if_stmt->accept(this); 1065 1066 delete if_stmt; 1067 delete e; 1068 delete brk; 1069 } 1070 1071 visit_exec_list(&ir->body_instructions, this); 1072 1073 if (ir->increment) { 1074 ir_expression *e = 1075 new(ir) ir_expression(ir_binop_add, counter->type, 1076 counter, ir->increment); 1077 1078 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL); 1079 1080 a->accept(this); 1081 delete a; 1082 delete e; 1083 } 1084 1085 emit(NULL, TGSI_OPCODE_ENDLOOP); 1086} 1087 1088void 1089glsl_to_tgsi_visitor::visit(ir_loop_jump *ir) 1090{ 1091 switch (ir->mode) { 1092 case ir_loop_jump::jump_break: 1093 emit(NULL, TGSI_OPCODE_BRK); 1094 break; 1095 case ir_loop_jump::jump_continue: 1096 emit(NULL, TGSI_OPCODE_CONT); 1097 break; 1098 } 1099} 1100 1101 1102void 1103glsl_to_tgsi_visitor::visit(ir_function_signature *ir) 1104{ 1105 assert(0); 1106 (void)ir; 1107} 1108 1109void 1110glsl_to_tgsi_visitor::visit(ir_function *ir) 1111{ 1112 /* Ignore function bodies other than main() -- we shouldn't see calls to 1113 * them since they should all be inlined before we get to glsl_to_tgsi. 1114 */ 1115 if (strcmp(ir->name, "main") == 0) { 1116 const ir_function_signature *sig; 1117 exec_list empty; 1118 1119 sig = ir->matching_signature(&empty); 1120 1121 assert(sig); 1122 1123 foreach_iter(exec_list_iterator, iter, sig->body) { 1124 ir_instruction *ir = (ir_instruction *)iter.get(); 1125 1126 ir->accept(this); 1127 } 1128 } 1129} 1130 1131GLboolean 1132glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand) 1133{ 1134 int nonmul_operand = 1 - mul_operand; 1135 st_src_reg a, b, c; 1136 1137 ir_expression *expr = ir->operands[mul_operand]->as_expression(); 1138 if (!expr || expr->operation != ir_binop_mul) 1139 return false; 1140 1141 expr->operands[0]->accept(this); 1142 a = this->result; 1143 expr->operands[1]->accept(this); 1144 b = this->result; 1145 ir->operands[nonmul_operand]->accept(this); 1146 c = this->result; 1147 1148 this->result = get_temp(ir->type); 1149 emit(ir, TGSI_OPCODE_MAD, st_dst_reg(this->result), a, b, c); 1150 1151 return true; 1152} 1153 1154GLboolean 1155glsl_to_tgsi_visitor::try_emit_sat(ir_expression *ir) 1156{ 1157 /* Saturates were only introduced to vertex programs in 1158 * NV_vertex_program3, so don't give them to drivers in the VP. 1159 */ 1160 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) 1161 return false; 1162 1163 ir_rvalue *sat_src = ir->as_rvalue_to_saturate(); 1164 if (!sat_src) 1165 return false; 1166 1167 sat_src->accept(this); 1168 st_src_reg src = this->result; 1169 1170 this->result = get_temp(ir->type); 1171 glsl_to_tgsi_instruction *inst; 1172 inst = emit(ir, TGSI_OPCODE_MOV, st_dst_reg(this->result), src); 1173 inst->saturate = true; 1174 1175 return true; 1176} 1177 1178void 1179glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir, 1180 st_src_reg *reg, int *num_reladdr) 1181{ 1182 if (!reg->reladdr) 1183 return; 1184 1185 emit_arl(ir, address_reg, *reg->reladdr); 1186 1187 if (*num_reladdr != 1) { 1188 st_src_reg temp = get_temp(glsl_type::vec4_type); 1189 1190 emit(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg); 1191 *reg = temp; 1192 } 1193 1194 (*num_reladdr)--; 1195} 1196 1197void 1198glsl_to_tgsi_visitor::visit(ir_expression *ir) 1199{ 1200 unsigned int operand; 1201 st_src_reg op[Elements(ir->operands)]; 1202 st_src_reg result_src; 1203 st_dst_reg result_dst; 1204 1205 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c) 1206 */ 1207 if (ir->operation == ir_binop_add) { 1208 if (try_emit_mad(ir, 1)) 1209 return; 1210 if (try_emit_mad(ir, 0)) 1211 return; 1212 } 1213 if (try_emit_sat(ir)) 1214 return; 1215 1216 if (ir->operation == ir_quadop_vector) 1217 assert(!"ir_quadop_vector should have been lowered"); 1218 1219 for (operand = 0; operand < ir->get_num_operands(); operand++) { 1220 this->result.file = PROGRAM_UNDEFINED; 1221 ir->operands[operand]->accept(this); 1222 if (this->result.file == PROGRAM_UNDEFINED) { 1223 ir_print_visitor v; 1224 printf("Failed to get tree for expression operand:\n"); 1225 ir->operands[operand]->accept(&v); 1226 exit(1); 1227 } 1228 op[operand] = this->result; 1229 1230 /* Matrix expression operands should have been broken down to vector 1231 * operations already. 1232 */ 1233 assert(!ir->operands[operand]->type->is_matrix()); 1234 } 1235 1236 int vector_elements = ir->operands[0]->type->vector_elements; 1237 if (ir->operands[1]) { 1238 vector_elements = MAX2(vector_elements, 1239 ir->operands[1]->type->vector_elements); 1240 } 1241 1242 this->result.file = PROGRAM_UNDEFINED; 1243 1244 /* Storage for our result. Ideally for an assignment we'd be using 1245 * the actual storage for the result here, instead. 1246 */ 1247 result_src = get_temp(ir->type); 1248 /* convenience for the emit functions below. */ 1249 result_dst = st_dst_reg(result_src); 1250 /* Limit writes to the channels that will be used by result_src later. 1251 * This does limit this temp's use as a temporary for multi-instruction 1252 * sequences. 1253 */ 1254 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1255 1256 switch (ir->operation) { 1257 case ir_unop_logic_not: 1258 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], st_src_reg_for_type(result_dst.type, 0)); 1259 break; 1260 case ir_unop_neg: 1261 assert(result_dst.type == GLSL_TYPE_FLOAT || result_dst.type == GLSL_TYPE_INT); 1262 if (result_dst.type == GLSL_TYPE_INT) 1263 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]); 1264 else { 1265 op[0].negate = ~op[0].negate; 1266 result_src = op[0]; 1267 } 1268 break; 1269 case ir_unop_abs: 1270 assert(result_dst.type == GLSL_TYPE_FLOAT); 1271 emit(ir, TGSI_OPCODE_ABS, result_dst, op[0]); 1272 break; 1273 case ir_unop_sign: 1274 emit(ir, TGSI_OPCODE_SSG, result_dst, op[0]); 1275 break; 1276 case ir_unop_rcp: 1277 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]); 1278 break; 1279 1280 case ir_unop_exp2: 1281 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]); 1282 break; 1283 case ir_unop_exp: 1284 case ir_unop_log: 1285 assert(!"not reached: should be handled by ir_explog_to_explog2"); 1286 break; 1287 case ir_unop_log2: 1288 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]); 1289 break; 1290 case ir_unop_sin: 1291 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]); 1292 break; 1293 case ir_unop_cos: 1294 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]); 1295 break; 1296 case ir_unop_sin_reduced: 1297 emit_scs(ir, TGSI_OPCODE_SIN, result_dst, op[0]); 1298 break; 1299 case ir_unop_cos_reduced: 1300 emit_scs(ir, TGSI_OPCODE_COS, result_dst, op[0]); 1301 break; 1302 1303 case ir_unop_dFdx: 1304 emit(ir, TGSI_OPCODE_DDX, result_dst, op[0]); 1305 break; 1306 case ir_unop_dFdy: 1307 op[0].negate = ~op[0].negate; 1308 emit(ir, TGSI_OPCODE_DDY, result_dst, op[0]); 1309 break; 1310 1311 case ir_unop_noise: { 1312 /* At some point, a motivated person could add a better 1313 * implementation of noise. Currently not even the nvidia 1314 * binary drivers do anything more than this. In any case, the 1315 * place to do this is in the GL state tracker, not the poor 1316 * driver. 1317 */ 1318 emit(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5)); 1319 break; 1320 } 1321 1322 case ir_binop_add: 1323 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]); 1324 break; 1325 case ir_binop_sub: 1326 emit(ir, TGSI_OPCODE_SUB, result_dst, op[0], op[1]); 1327 break; 1328 1329 case ir_binop_mul: 1330 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]); 1331 break; 1332 case ir_binop_div: 1333 if (result_dst.type == GLSL_TYPE_FLOAT) 1334 assert(!"not reached: should be handled by ir_div_to_mul_rcp"); 1335 else 1336 emit(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]); 1337 break; 1338 case ir_binop_mod: 1339 if (result_dst.type == GLSL_TYPE_FLOAT) 1340 assert(!"ir_binop_mod should have been converted to b * fract(a/b)"); 1341 else 1342 emit(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]); 1343 break; 1344 1345 case ir_binop_less: 1346 emit(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]); 1347 break; 1348 case ir_binop_greater: 1349 emit(ir, TGSI_OPCODE_SGT, result_dst, op[0], op[1]); 1350 break; 1351 case ir_binop_lequal: 1352 emit(ir, TGSI_OPCODE_SLE, result_dst, op[0], op[1]); 1353 break; 1354 case ir_binop_gequal: 1355 emit(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]); 1356 break; 1357 case ir_binop_equal: 1358 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]); 1359 break; 1360 case ir_binop_nequal: 1361 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1362 break; 1363 case ir_binop_all_equal: 1364 /* "==" operator producing a scalar boolean. */ 1365 if (ir->operands[0]->type->is_vector() || 1366 ir->operands[1]->type->is_vector()) { 1367 st_src_reg temp = get_temp(glsl_version >= 130 ? 1368 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) : 1369 glsl_type::vec4_type); 1370 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT); 1371 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]); 1372 emit_dp(ir, result_dst, temp, temp, vector_elements); 1373 emit(ir, TGSI_OPCODE_SEQ, result_dst, result_src, st_src_reg_for_float(0.0)); 1374 } else { 1375 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]); 1376 } 1377 break; 1378 case ir_binop_any_nequal: 1379 /* "!=" operator producing a scalar boolean. */ 1380 if (ir->operands[0]->type->is_vector() || 1381 ir->operands[1]->type->is_vector()) { 1382 st_src_reg temp = get_temp(glsl_version >= 130 ? 1383 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) : 1384 glsl_type::vec4_type); 1385 assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT); 1386 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]); 1387 emit_dp(ir, result_dst, temp, temp, vector_elements); 1388 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0)); 1389 } else { 1390 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1391 } 1392 break; 1393 1394 case ir_unop_any: 1395 assert(ir->operands[0]->type->is_vector()); 1396 emit_dp(ir, result_dst, op[0], op[0], 1397 ir->operands[0]->type->vector_elements); 1398 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0)); 1399 break; 1400 1401 case ir_binop_logic_xor: 1402 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1403 break; 1404 1405 case ir_binop_logic_or: 1406 /* This could be a saturated add and skip the SNE. */ 1407 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]); 1408 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_float(0.0)); 1409 break; 1410 1411 case ir_binop_logic_and: 1412 /* the bool args are stored as float 0.0 or 1.0, so "mul" gives us "and". */ 1413 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]); 1414 break; 1415 1416 case ir_binop_dot: 1417 assert(ir->operands[0]->type->is_vector()); 1418 assert(ir->operands[0]->type == ir->operands[1]->type); 1419 emit_dp(ir, result_dst, op[0], op[1], 1420 ir->operands[0]->type->vector_elements); 1421 break; 1422 1423 case ir_unop_sqrt: 1424 /* sqrt(x) = x * rsq(x). */ 1425 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]); 1426 emit(ir, TGSI_OPCODE_MUL, result_dst, result_src, op[0]); 1427 /* For incoming channels <= 0, set the result to 0. */ 1428 op[0].negate = ~op[0].negate; 1429 emit(ir, TGSI_OPCODE_CMP, result_dst, 1430 op[0], result_src, st_src_reg_for_float(0.0)); 1431 break; 1432 case ir_unop_rsq: 1433 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]); 1434 break; 1435 case ir_unop_i2f: 1436 case ir_unop_b2f: 1437 if (glsl_version >= 130) { 1438 emit(ir, TGSI_OPCODE_I2F, result_dst, op[0]); 1439 break; 1440 } 1441 case ir_unop_b2i: 1442 /* Booleans are stored as integers (or floats in GLSL 1.20 and lower). */ 1443 result_src = op[0]; 1444 break; 1445 case ir_unop_f2i: 1446 if (glsl_version >= 130) 1447 emit(ir, TGSI_OPCODE_F2I, result_dst, op[0]); 1448 else 1449 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]); 1450 break; 1451 case ir_unop_f2b: 1452 case ir_unop_i2b: 1453 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], 1454 st_src_reg_for_type(result_dst.type, 0)); 1455 break; 1456 case ir_unop_trunc: 1457 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]); 1458 break; 1459 case ir_unop_ceil: 1460 op[0].negate = ~op[0].negate; 1461 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]); 1462 result_src.negate = ~result_src.negate; 1463 break; 1464 case ir_unop_floor: 1465 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]); 1466 break; 1467 case ir_unop_fract: 1468 emit(ir, TGSI_OPCODE_FRC, result_dst, op[0]); 1469 break; 1470 1471 case ir_binop_min: 1472 emit(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]); 1473 break; 1474 case ir_binop_max: 1475 emit(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]); 1476 break; 1477 case ir_binop_pow: 1478 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]); 1479 break; 1480 1481 case ir_unop_bit_not: 1482 if (glsl_version >= 130) { 1483 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]); 1484 break; 1485 } 1486 case ir_unop_u2f: 1487 if (glsl_version >= 130) { 1488 emit(ir, TGSI_OPCODE_U2F, result_dst, op[0]); 1489 break; 1490 } 1491 case ir_binop_lshift: 1492 if (glsl_version >= 130) { 1493 emit(ir, TGSI_OPCODE_SHL, result_dst, op[0]); 1494 break; 1495 } 1496 case ir_binop_rshift: 1497 if (glsl_version >= 130) { 1498 emit(ir, TGSI_OPCODE_ISHR, result_dst, op[0]); 1499 break; 1500 } 1501 case ir_binop_bit_and: 1502 if (glsl_version >= 130) { 1503 emit(ir, TGSI_OPCODE_AND, result_dst, op[0]); 1504 break; 1505 } 1506 case ir_binop_bit_xor: 1507 if (glsl_version >= 130) { 1508 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0]); 1509 break; 1510 } 1511 case ir_binop_bit_or: 1512 if (glsl_version >= 130) { 1513 emit(ir, TGSI_OPCODE_OR, result_dst, op[0]); 1514 break; 1515 } 1516 case ir_unop_round_even: 1517 assert(!"GLSL 1.30 features unsupported"); 1518 break; 1519 1520 case ir_quadop_vector: 1521 /* This operation should have already been handled. 1522 */ 1523 assert(!"Should not get here."); 1524 break; 1525 } 1526 1527 this->result = result_src; 1528} 1529 1530 1531void 1532glsl_to_tgsi_visitor::visit(ir_swizzle *ir) 1533{ 1534 st_src_reg src; 1535 int i; 1536 int swizzle[4]; 1537 1538 /* Note that this is only swizzles in expressions, not those on the left 1539 * hand side of an assignment, which do write masking. See ir_assignment 1540 * for that. 1541 */ 1542 1543 ir->val->accept(this); 1544 src = this->result; 1545 assert(src.file != PROGRAM_UNDEFINED); 1546 1547 for (i = 0; i < 4; i++) { 1548 if (i < ir->type->vector_elements) { 1549 switch (i) { 1550 case 0: 1551 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x); 1552 break; 1553 case 1: 1554 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y); 1555 break; 1556 case 2: 1557 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z); 1558 break; 1559 case 3: 1560 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w); 1561 break; 1562 } 1563 } else { 1564 /* If the type is smaller than a vec4, replicate the last 1565 * channel out. 1566 */ 1567 swizzle[i] = swizzle[ir->type->vector_elements - 1]; 1568 } 1569 } 1570 1571 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); 1572 1573 this->result = src; 1574} 1575 1576void 1577glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir) 1578{ 1579 variable_storage *entry = find_variable_storage(ir->var); 1580 ir_variable *var = ir->var; 1581 1582 if (!entry) { 1583 switch (var->mode) { 1584 case ir_var_uniform: 1585 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM, 1586 var->location); 1587 this->variables.push_tail(entry); 1588 break; 1589 case ir_var_in: 1590 case ir_var_inout: 1591 /* The linker assigns locations for varyings and attributes, 1592 * including deprecated builtins (like gl_Color), user-assign 1593 * generic attributes (glBindVertexLocation), and 1594 * user-defined varyings. 1595 * 1596 * FINISHME: We would hit this path for function arguments. Fix! 1597 */ 1598 assert(var->location != -1); 1599 entry = new(mem_ctx) variable_storage(var, 1600 PROGRAM_INPUT, 1601 var->location); 1602 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB && 1603 var->location >= VERT_ATTRIB_GENERIC0) { 1604 _mesa_add_attribute(this->prog->Attributes, 1605 var->name, 1606 _mesa_sizeof_glsl_type(var->type->gl_type), 1607 var->type->gl_type, 1608 var->location - VERT_ATTRIB_GENERIC0); 1609 } 1610 break; 1611 case ir_var_out: 1612 assert(var->location != -1); 1613 entry = new(mem_ctx) variable_storage(var, 1614 PROGRAM_OUTPUT, 1615 var->location); 1616 break; 1617 case ir_var_system_value: 1618 entry = new(mem_ctx) variable_storage(var, 1619 PROGRAM_SYSTEM_VALUE, 1620 var->location); 1621 break; 1622 case ir_var_auto: 1623 case ir_var_temporary: 1624 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY, 1625 this->next_temp); 1626 this->variables.push_tail(entry); 1627 1628 next_temp += type_size(var->type); 1629 break; 1630 } 1631 1632 if (!entry) { 1633 printf("Failed to make storage for %s\n", var->name); 1634 exit(1); 1635 } 1636 } 1637 1638 this->result = st_src_reg(entry->file, entry->index, var->type); 1639 if (glsl_version <= 120) 1640 this->result.type = GLSL_TYPE_FLOAT; 1641} 1642 1643void 1644glsl_to_tgsi_visitor::visit(ir_dereference_array *ir) 1645{ 1646 ir_constant *index; 1647 st_src_reg src; 1648 int element_size = type_size(ir->type); 1649 1650 index = ir->array_index->constant_expression_value(); 1651 1652 ir->array->accept(this); 1653 src = this->result; 1654 1655 if (index) { 1656 src.index += index->value.i[0] * element_size; 1657 } else { 1658 st_src_reg array_base = this->result; 1659 /* Variable index array dereference. It eats the "vec4" of the 1660 * base of the array and an index that offsets the Mesa register 1661 * index. 1662 */ 1663 ir->array_index->accept(this); 1664 1665 st_src_reg index_reg; 1666 1667 if (element_size == 1) { 1668 index_reg = this->result; 1669 } else { 1670 index_reg = get_temp(glsl_type::float_type); 1671 1672 emit(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg), 1673 this->result, st_src_reg_for_float(element_size)); 1674 } 1675 1676 src.reladdr = ralloc(mem_ctx, st_src_reg); 1677 memcpy(src.reladdr, &index_reg, sizeof(index_reg)); 1678 } 1679 1680 /* If the type is smaller than a vec4, replicate the last channel out. */ 1681 if (ir->type->is_scalar() || ir->type->is_vector()) 1682 src.swizzle = swizzle_for_size(ir->type->vector_elements); 1683 else 1684 src.swizzle = SWIZZLE_NOOP; 1685 1686 this->result = src; 1687} 1688 1689void 1690glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) 1691{ 1692 unsigned int i; 1693 const glsl_type *struct_type = ir->record->type; 1694 int offset = 0; 1695 1696 ir->record->accept(this); 1697 1698 for (i = 0; i < struct_type->length; i++) { 1699 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 1700 break; 1701 offset += type_size(struct_type->fields.structure[i].type); 1702 } 1703 1704 /* If the type is smaller than a vec4, replicate the last channel out. */ 1705 if (ir->type->is_scalar() || ir->type->is_vector()) 1706 this->result.swizzle = swizzle_for_size(ir->type->vector_elements); 1707 else 1708 this->result.swizzle = SWIZZLE_NOOP; 1709 1710 this->result.index += offset; 1711} 1712 1713/** 1714 * We want to be careful in assignment setup to hit the actual storage 1715 * instead of potentially using a temporary like we might with the 1716 * ir_dereference handler. 1717 */ 1718static st_dst_reg 1719get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v) 1720{ 1721 /* The LHS must be a dereference. If the LHS is a variable indexed array 1722 * access of a vector, it must be separated into a series conditional moves 1723 * before reaching this point (see ir_vec_index_to_cond_assign). 1724 */ 1725 assert(ir->as_dereference()); 1726 ir_dereference_array *deref_array = ir->as_dereference_array(); 1727 if (deref_array) { 1728 assert(!deref_array->array->type->is_vector()); 1729 } 1730 1731 /* Use the rvalue deref handler for the most part. We'll ignore 1732 * swizzles in it and write swizzles using writemask, though. 1733 */ 1734 ir->accept(v); 1735 return st_dst_reg(v->result); 1736} 1737 1738/** 1739 * Process the condition of a conditional assignment 1740 * 1741 * Examines the condition of a conditional assignment to generate the optimal 1742 * first operand of a \c CMP instruction. If the condition is a relational 1743 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be 1744 * used as the source for the \c CMP instruction. Otherwise the comparison 1745 * is processed to a boolean result, and the boolean result is used as the 1746 * operand to the CMP instruction. 1747 */ 1748bool 1749glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir) 1750{ 1751 ir_rvalue *src_ir = ir; 1752 bool negate = true; 1753 bool switch_order = false; 1754 1755 ir_expression *const expr = ir->as_expression(); 1756 if ((expr != NULL) && (expr->get_num_operands() == 2)) { 1757 bool zero_on_left = false; 1758 1759 if (expr->operands[0]->is_zero()) { 1760 src_ir = expr->operands[1]; 1761 zero_on_left = true; 1762 } else if (expr->operands[1]->is_zero()) { 1763 src_ir = expr->operands[0]; 1764 zero_on_left = false; 1765 } 1766 1767 /* a is - 0 + - 0 + 1768 * (a < 0) T F F ( a < 0) T F F 1769 * (0 < a) F F T (-a < 0) F F T 1770 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands) 1771 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands) 1772 * (a > 0) F F T (-a < 0) F F T 1773 * (0 > a) T F F ( a < 0) T F F 1774 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands) 1775 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands) 1776 * 1777 * Note that exchanging the order of 0 and 'a' in the comparison simply 1778 * means that the value of 'a' should be negated. 1779 */ 1780 if (src_ir != ir) { 1781 switch (expr->operation) { 1782 case ir_binop_less: 1783 switch_order = false; 1784 negate = zero_on_left; 1785 break; 1786 1787 case ir_binop_greater: 1788 switch_order = false; 1789 negate = !zero_on_left; 1790 break; 1791 1792 case ir_binop_lequal: 1793 switch_order = true; 1794 negate = !zero_on_left; 1795 break; 1796 1797 case ir_binop_gequal: 1798 switch_order = true; 1799 negate = zero_on_left; 1800 break; 1801 1802 default: 1803 /* This isn't the right kind of comparison afterall, so make sure 1804 * the whole condition is visited. 1805 */ 1806 src_ir = ir; 1807 break; 1808 } 1809 } 1810 } 1811 1812 src_ir->accept(this); 1813 1814 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the 1815 * condition we produced is 0.0 or 1.0. By flipping the sign, we can 1816 * choose which value TGSI_OPCODE_CMP produces without an extra instruction 1817 * computing the condition. 1818 */ 1819 if (negate) 1820 this->result.negate = ~this->result.negate; 1821 1822 return switch_order; 1823} 1824 1825void 1826glsl_to_tgsi_visitor::visit(ir_assignment *ir) 1827{ 1828 st_dst_reg l; 1829 st_src_reg r; 1830 int i; 1831 1832 ir->rhs->accept(this); 1833 r = this->result; 1834 1835 l = get_assignment_lhs(ir->lhs, this); 1836 1837 /* FINISHME: This should really set to the correct maximal writemask for each 1838 * FINISHME: component written (in the loops below). This case can only 1839 * FINISHME: occur for matrices, arrays, and structures. 1840 */ 1841 if (ir->write_mask == 0) { 1842 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector()); 1843 l.writemask = WRITEMASK_XYZW; 1844 } else if (ir->lhs->type->is_scalar()) { 1845 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the 1846 * FINISHME: W component of fragment shader output zero, work correctly. 1847 */ 1848 l.writemask = WRITEMASK_XYZW; 1849 } else { 1850 int swizzles[4]; 1851 int first_enabled_chan = 0; 1852 int rhs_chan = 0; 1853 1854 assert(ir->lhs->type->is_vector()); 1855 l.writemask = ir->write_mask; 1856 1857 for (int i = 0; i < 4; i++) { 1858 if (l.writemask & (1 << i)) { 1859 first_enabled_chan = GET_SWZ(r.swizzle, i); 1860 break; 1861 } 1862 } 1863 1864 /* Swizzle a small RHS vector into the channels being written. 1865 * 1866 * glsl ir treats write_mask as dictating how many channels are 1867 * present on the RHS while Mesa IR treats write_mask as just 1868 * showing which channels of the vec4 RHS get written. 1869 */ 1870 for (int i = 0; i < 4; i++) { 1871 if (l.writemask & (1 << i)) 1872 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++); 1873 else 1874 swizzles[i] = first_enabled_chan; 1875 } 1876 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1], 1877 swizzles[2], swizzles[3]); 1878 } 1879 1880 assert(l.file != PROGRAM_UNDEFINED); 1881 assert(r.file != PROGRAM_UNDEFINED); 1882 1883 if (ir->condition) { 1884 const bool switch_order = this->process_move_condition(ir->condition); 1885 st_src_reg condition = this->result; 1886 1887 for (i = 0; i < type_size(ir->lhs->type); i++) { 1888 st_src_reg l_src = st_src_reg(l); 1889 l_src.swizzle = swizzle_for_size(ir->lhs->type->vector_elements); 1890 1891 if (switch_order) { 1892 emit(ir, TGSI_OPCODE_CMP, l, condition, l_src, r); 1893 } else { 1894 emit(ir, TGSI_OPCODE_CMP, l, condition, r, l_src); 1895 } 1896 1897 l.index++; 1898 r.index++; 1899 } 1900 } else { 1901 for (i = 0; i < type_size(ir->lhs->type); i++) { 1902 emit(ir, TGSI_OPCODE_MOV, l, r); 1903 l.index++; 1904 r.index++; 1905 } 1906 } 1907} 1908 1909 1910void 1911glsl_to_tgsi_visitor::visit(ir_constant *ir) 1912{ 1913 st_src_reg src; 1914 GLfloat stack_vals[4] = { 0 }; 1915 gl_constant_value *values = (gl_constant_value *) stack_vals; 1916 GLenum gl_type = GL_NONE; 1917 unsigned int i; 1918 1919 /* Unfortunately, 4 floats is all we can get into 1920 * _mesa_add_unnamed_constant. So, make a temp to store an 1921 * aggregate constant and move each constant value into it. If we 1922 * get lucky, copy propagation will eliminate the extra moves. 1923 */ 1924 if (ir->type->base_type == GLSL_TYPE_STRUCT) { 1925 st_src_reg temp_base = get_temp(ir->type); 1926 st_dst_reg temp = st_dst_reg(temp_base); 1927 1928 foreach_iter(exec_list_iterator, iter, ir->components) { 1929 ir_constant *field_value = (ir_constant *)iter.get(); 1930 int size = type_size(field_value->type); 1931 1932 assert(size > 0); 1933 1934 field_value->accept(this); 1935 src = this->result; 1936 1937 for (i = 0; i < (unsigned int)size; i++) { 1938 emit(ir, TGSI_OPCODE_MOV, temp, src); 1939 1940 src.index++; 1941 temp.index++; 1942 } 1943 } 1944 this->result = temp_base; 1945 return; 1946 } 1947 1948 if (ir->type->is_array()) { 1949 st_src_reg temp_base = get_temp(ir->type); 1950 st_dst_reg temp = st_dst_reg(temp_base); 1951 int size = type_size(ir->type->fields.array); 1952 1953 assert(size > 0); 1954 1955 for (i = 0; i < ir->type->length; i++) { 1956 ir->array_elements[i]->accept(this); 1957 src = this->result; 1958 for (int j = 0; j < size; j++) { 1959 emit(ir, TGSI_OPCODE_MOV, temp, src); 1960 1961 src.index++; 1962 temp.index++; 1963 } 1964 } 1965 this->result = temp_base; 1966 return; 1967 } 1968 1969 if (ir->type->is_matrix()) { 1970 st_src_reg mat = get_temp(ir->type); 1971 st_dst_reg mat_column = st_dst_reg(mat); 1972 1973 for (i = 0; i < ir->type->matrix_columns; i++) { 1974 assert(ir->type->base_type == GLSL_TYPE_FLOAT); 1975 values = (gl_constant_value *) &ir->value.f[i * ir->type->vector_elements]; 1976 1977 src = st_src_reg(PROGRAM_CONSTANT, -1, ir->type->base_type); 1978 src.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters, 1979 values, 1980 ir->type->vector_elements, 1981 GL_FLOAT, 1982 &src.swizzle); 1983 emit(ir, TGSI_OPCODE_MOV, mat_column, src); 1984 1985 mat_column.index++; 1986 } 1987 1988 this->result = mat; 1989 return; 1990 } 1991 1992 src.file = PROGRAM_CONSTANT; 1993 switch (ir->type->base_type) { 1994 case GLSL_TYPE_FLOAT: 1995 gl_type = GL_FLOAT; 1996 for (i = 0; i < ir->type->vector_elements; i++) { 1997 values[i].f = ir->value.f[i]; 1998 } 1999 break; 2000 case GLSL_TYPE_UINT: 2001 gl_type = glsl_version >= 130 ? GL_UNSIGNED_INT : GL_FLOAT; 2002 for (i = 0; i < ir->type->vector_elements; i++) { 2003 if (glsl_version >= 130) 2004 values[i].u = ir->value.u[i]; 2005 else 2006 values[i].f = ir->value.u[i]; 2007 } 2008 break; 2009 case GLSL_TYPE_INT: 2010 gl_type = glsl_version >= 130 ? GL_INT : GL_FLOAT; 2011 for (i = 0; i < ir->type->vector_elements; i++) { 2012 if (glsl_version >= 130) 2013 values[i].i = ir->value.i[i]; 2014 else 2015 values[i].f = ir->value.i[i]; 2016 } 2017 break; 2018 case GLSL_TYPE_BOOL: 2019 gl_type = glsl_version >= 130 ? GL_BOOL : GL_FLOAT; 2020 for (i = 0; i < ir->type->vector_elements; i++) { 2021 if (glsl_version >= 130) 2022 values[i].b = ir->value.b[i]; 2023 else 2024 values[i].f = ir->value.b[i]; 2025 } 2026 break; 2027 default: 2028 assert(!"Non-float/uint/int/bool constant"); 2029 } 2030 2031 this->result = st_src_reg(PROGRAM_CONSTANT, -1, ir->type); 2032 this->result.index = _mesa_add_typed_unnamed_constant(this->prog->Parameters, 2033 values, ir->type->vector_elements, gl_type, 2034 &this->result.swizzle); 2035} 2036 2037function_entry * 2038glsl_to_tgsi_visitor::get_function_signature(ir_function_signature *sig) 2039{ 2040 function_entry *entry; 2041 2042 foreach_iter(exec_list_iterator, iter, this->function_signatures) { 2043 entry = (function_entry *)iter.get(); 2044 2045 if (entry->sig == sig) 2046 return entry; 2047 } 2048 2049 entry = ralloc(mem_ctx, function_entry); 2050 entry->sig = sig; 2051 entry->sig_id = this->next_signature_id++; 2052 entry->bgn_inst = NULL; 2053 2054 /* Allocate storage for all the parameters. */ 2055 foreach_iter(exec_list_iterator, iter, sig->parameters) { 2056 ir_variable *param = (ir_variable *)iter.get(); 2057 variable_storage *storage; 2058 2059 storage = find_variable_storage(param); 2060 assert(!storage); 2061 2062 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY, 2063 this->next_temp); 2064 this->variables.push_tail(storage); 2065 2066 this->next_temp += type_size(param->type); 2067 } 2068 2069 if (!sig->return_type->is_void()) { 2070 entry->return_reg = get_temp(sig->return_type); 2071 } else { 2072 entry->return_reg = undef_src; 2073 } 2074 2075 this->function_signatures.push_tail(entry); 2076 return entry; 2077} 2078 2079void 2080glsl_to_tgsi_visitor::visit(ir_call *ir) 2081{ 2082 glsl_to_tgsi_instruction *call_inst; 2083 ir_function_signature *sig = ir->get_callee(); 2084 function_entry *entry = get_function_signature(sig); 2085 int i; 2086 2087 /* Process in parameters. */ 2088 exec_list_iterator sig_iter = sig->parameters.iterator(); 2089 foreach_iter(exec_list_iterator, iter, *ir) { 2090 ir_rvalue *param_rval = (ir_rvalue *)iter.get(); 2091 ir_variable *param = (ir_variable *)sig_iter.get(); 2092 2093 if (param->mode == ir_var_in || 2094 param->mode == ir_var_inout) { 2095 variable_storage *storage = find_variable_storage(param); 2096 assert(storage); 2097 2098 param_rval->accept(this); 2099 st_src_reg r = this->result; 2100 2101 st_dst_reg l; 2102 l.file = storage->file; 2103 l.index = storage->index; 2104 l.reladdr = NULL; 2105 l.writemask = WRITEMASK_XYZW; 2106 l.cond_mask = COND_TR; 2107 2108 for (i = 0; i < type_size(param->type); i++) { 2109 emit(ir, TGSI_OPCODE_MOV, l, r); 2110 l.index++; 2111 r.index++; 2112 } 2113 } 2114 2115 sig_iter.next(); 2116 } 2117 assert(!sig_iter.has_next()); 2118 2119 /* Emit call instruction */ 2120 call_inst = emit(ir, TGSI_OPCODE_CAL); 2121 call_inst->function = entry; 2122 2123 /* Process out parameters. */ 2124 sig_iter = sig->parameters.iterator(); 2125 foreach_iter(exec_list_iterator, iter, *ir) { 2126 ir_rvalue *param_rval = (ir_rvalue *)iter.get(); 2127 ir_variable *param = (ir_variable *)sig_iter.get(); 2128 2129 if (param->mode == ir_var_out || 2130 param->mode == ir_var_inout) { 2131 variable_storage *storage = find_variable_storage(param); 2132 assert(storage); 2133 2134 st_src_reg r; 2135 r.file = storage->file; 2136 r.index = storage->index; 2137 r.reladdr = NULL; 2138 r.swizzle = SWIZZLE_NOOP; 2139 r.negate = 0; 2140 2141 param_rval->accept(this); 2142 st_dst_reg l = st_dst_reg(this->result); 2143 2144 for (i = 0; i < type_size(param->type); i++) { 2145 emit(ir, TGSI_OPCODE_MOV, l, r); 2146 l.index++; 2147 r.index++; 2148 } 2149 } 2150 2151 sig_iter.next(); 2152 } 2153 assert(!sig_iter.has_next()); 2154 2155 /* Process return value. */ 2156 this->result = entry->return_reg; 2157} 2158 2159void 2160glsl_to_tgsi_visitor::visit(ir_texture *ir) 2161{ 2162 st_src_reg result_src, coord, lod_info, projector, dx, dy; 2163 st_dst_reg result_dst, coord_dst; 2164 glsl_to_tgsi_instruction *inst = NULL; 2165 unsigned opcode = TGSI_OPCODE_NOP; 2166 2167 ir->coordinate->accept(this); 2168 2169 /* Put our coords in a temp. We'll need to modify them for shadow, 2170 * projection, or LOD, so the only case we'd use it as is is if 2171 * we're doing plain old texturing. Mesa IR optimization should 2172 * handle cleaning up our mess in that case. 2173 */ 2174 coord = get_temp(glsl_type::vec4_type); 2175 coord_dst = st_dst_reg(coord); 2176 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result); 2177 2178 if (ir->projector) { 2179 ir->projector->accept(this); 2180 projector = this->result; 2181 } 2182 2183 /* Storage for our result. Ideally for an assignment we'd be using 2184 * the actual storage for the result here, instead. 2185 */ 2186 result_src = get_temp(glsl_type::vec4_type); 2187 result_dst = st_dst_reg(result_src); 2188 2189 switch (ir->op) { 2190 case ir_tex: 2191 opcode = TGSI_OPCODE_TEX; 2192 break; 2193 case ir_txb: 2194 opcode = TGSI_OPCODE_TXB; 2195 ir->lod_info.bias->accept(this); 2196 lod_info = this->result; 2197 break; 2198 case ir_txl: 2199 opcode = TGSI_OPCODE_TXL; 2200 ir->lod_info.lod->accept(this); 2201 lod_info = this->result; 2202 break; 2203 case ir_txd: 2204 opcode = TGSI_OPCODE_TXD; 2205 ir->lod_info.grad.dPdx->accept(this); 2206 dx = this->result; 2207 ir->lod_info.grad.dPdy->accept(this); 2208 dy = this->result; 2209 break; 2210 case ir_txf: /* TODO: use TGSI_OPCODE_TXF here */ 2211 assert(!"GLSL 1.30 features unsupported"); 2212 break; 2213 } 2214 2215 if (ir->projector) { 2216 if (opcode == TGSI_OPCODE_TEX) { 2217 /* Slot the projector in as the last component of the coord. */ 2218 coord_dst.writemask = WRITEMASK_W; 2219 emit(ir, TGSI_OPCODE_MOV, coord_dst, projector); 2220 coord_dst.writemask = WRITEMASK_XYZW; 2221 opcode = TGSI_OPCODE_TXP; 2222 } else { 2223 st_src_reg coord_w = coord; 2224 coord_w.swizzle = SWIZZLE_WWWW; 2225 2226 /* For the other TEX opcodes there's no projective version 2227 * since the last slot is taken up by LOD info. Do the 2228 * projective divide now. 2229 */ 2230 coord_dst.writemask = WRITEMASK_W; 2231 emit(ir, TGSI_OPCODE_RCP, coord_dst, projector); 2232 2233 /* In the case where we have to project the coordinates "by hand," 2234 * the shadow comparator value must also be projected. 2235 */ 2236 st_src_reg tmp_src = coord; 2237 if (ir->shadow_comparitor) { 2238 /* Slot the shadow value in as the second to last component of the 2239 * coord. 2240 */ 2241 ir->shadow_comparitor->accept(this); 2242 2243 tmp_src = get_temp(glsl_type::vec4_type); 2244 st_dst_reg tmp_dst = st_dst_reg(tmp_src); 2245 2246 tmp_dst.writemask = WRITEMASK_Z; 2247 emit(ir, TGSI_OPCODE_MOV, tmp_dst, this->result); 2248 2249 tmp_dst.writemask = WRITEMASK_XY; 2250 emit(ir, TGSI_OPCODE_MOV, tmp_dst, coord); 2251 } 2252 2253 coord_dst.writemask = WRITEMASK_XYZ; 2254 emit(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w); 2255 2256 coord_dst.writemask = WRITEMASK_XYZW; 2257 coord.swizzle = SWIZZLE_XYZW; 2258 } 2259 } 2260 2261 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow 2262 * comparator was put in the correct place (and projected) by the code, 2263 * above, that handles by-hand projection. 2264 */ 2265 if (ir->shadow_comparitor && (!ir->projector || opcode == TGSI_OPCODE_TXP)) { 2266 /* Slot the shadow value in as the second to last component of the 2267 * coord. 2268 */ 2269 ir->shadow_comparitor->accept(this); 2270 coord_dst.writemask = WRITEMASK_Z; 2271 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result); 2272 coord_dst.writemask = WRITEMASK_XYZW; 2273 } 2274 2275 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB) { 2276 /* TGSI stores LOD or LOD bias in the last channel of the coords. */ 2277 coord_dst.writemask = WRITEMASK_W; 2278 emit(ir, TGSI_OPCODE_MOV, coord_dst, lod_info); 2279 coord_dst.writemask = WRITEMASK_XYZW; 2280 } 2281 2282 if (opcode == TGSI_OPCODE_TXD) 2283 inst = emit(ir, opcode, result_dst, coord, dx, dy); 2284 else 2285 inst = emit(ir, opcode, result_dst, coord); 2286 2287 if (ir->shadow_comparitor) 2288 inst->tex_shadow = GL_TRUE; 2289 2290 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler, 2291 this->shader_program, 2292 this->prog); 2293 2294 const glsl_type *sampler_type = ir->sampler->type; 2295 2296 switch (sampler_type->sampler_dimensionality) { 2297 case GLSL_SAMPLER_DIM_1D: 2298 inst->tex_target = (sampler_type->sampler_array) 2299 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX; 2300 break; 2301 case GLSL_SAMPLER_DIM_2D: 2302 inst->tex_target = (sampler_type->sampler_array) 2303 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX; 2304 break; 2305 case GLSL_SAMPLER_DIM_3D: 2306 inst->tex_target = TEXTURE_3D_INDEX; 2307 break; 2308 case GLSL_SAMPLER_DIM_CUBE: 2309 inst->tex_target = TEXTURE_CUBE_INDEX; 2310 break; 2311 case GLSL_SAMPLER_DIM_RECT: 2312 inst->tex_target = TEXTURE_RECT_INDEX; 2313 break; 2314 case GLSL_SAMPLER_DIM_BUF: 2315 assert(!"FINISHME: Implement ARB_texture_buffer_object"); 2316 break; 2317 default: 2318 assert(!"Should not get here."); 2319 } 2320 2321 this->result = result_src; 2322} 2323 2324void 2325glsl_to_tgsi_visitor::visit(ir_return *ir) 2326{ 2327 if (ir->get_value()) { 2328 st_dst_reg l; 2329 int i; 2330 2331 assert(current_function); 2332 2333 ir->get_value()->accept(this); 2334 st_src_reg r = this->result; 2335 2336 l = st_dst_reg(current_function->return_reg); 2337 2338 for (i = 0; i < type_size(current_function->sig->return_type); i++) { 2339 emit(ir, TGSI_OPCODE_MOV, l, r); 2340 l.index++; 2341 r.index++; 2342 } 2343 } 2344 2345 emit(ir, TGSI_OPCODE_RET); 2346} 2347 2348void 2349glsl_to_tgsi_visitor::visit(ir_discard *ir) 2350{ 2351 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog; 2352 2353 if (ir->condition) { 2354 ir->condition->accept(this); 2355 this->result.negate = ~this->result.negate; 2356 emit(ir, TGSI_OPCODE_KIL, undef_dst, this->result); 2357 } else { 2358 emit(ir, TGSI_OPCODE_KILP); 2359 } 2360 2361 fp->UsesKill = GL_TRUE; 2362} 2363 2364void 2365glsl_to_tgsi_visitor::visit(ir_if *ir) 2366{ 2367 glsl_to_tgsi_instruction *cond_inst, *if_inst, *else_inst = NULL; 2368 glsl_to_tgsi_instruction *prev_inst; 2369 2370 prev_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 2371 2372 ir->condition->accept(this); 2373 assert(this->result.file != PROGRAM_UNDEFINED); 2374 2375 if (this->options->EmitCondCodes) { 2376 cond_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 2377 2378 /* See if we actually generated any instruction for generating 2379 * the condition. If not, then cook up a move to a temp so we 2380 * have something to set cond_update on. 2381 */ 2382 if (cond_inst == prev_inst) { 2383 st_src_reg temp = get_temp(glsl_type::bool_type); 2384 cond_inst = emit(ir->condition, TGSI_OPCODE_MOV, st_dst_reg(temp), result); 2385 } 2386 cond_inst->cond_update = GL_TRUE; 2387 2388 if_inst = emit(ir->condition, TGSI_OPCODE_IF); 2389 if_inst->dst.cond_mask = COND_NE; 2390 } else { 2391 if_inst = emit(ir->condition, TGSI_OPCODE_IF, undef_dst, this->result); 2392 } 2393 2394 this->instructions.push_tail(if_inst); 2395 2396 visit_exec_list(&ir->then_instructions, this); 2397 2398 if (!ir->else_instructions.is_empty()) { 2399 else_inst = emit(ir->condition, TGSI_OPCODE_ELSE); 2400 visit_exec_list(&ir->else_instructions, this); 2401 } 2402 2403 if_inst = emit(ir->condition, TGSI_OPCODE_ENDIF); 2404} 2405 2406glsl_to_tgsi_visitor::glsl_to_tgsi_visitor() 2407{ 2408 result.file = PROGRAM_UNDEFINED; 2409 next_temp = 1; 2410 next_signature_id = 1; 2411 current_function = NULL; 2412 num_address_regs = 0; 2413 indirect_addr_temps = false; 2414 indirect_addr_consts = false; 2415 mem_ctx = ralloc_context(NULL); 2416} 2417 2418glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor() 2419{ 2420 ralloc_free(mem_ctx); 2421} 2422 2423extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v) 2424{ 2425 delete v; 2426} 2427 2428 2429/** 2430 * Count resources used by the given gpu program (number of texture 2431 * samplers, etc). 2432 */ 2433static void 2434count_resources(glsl_to_tgsi_visitor *v, gl_program *prog) 2435{ 2436 v->samplers_used = 0; 2437 2438 foreach_iter(exec_list_iterator, iter, v->instructions) { 2439 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2440 2441 if (is_tex_instruction(inst->op)) { 2442 v->samplers_used |= 1 << inst->sampler; 2443 2444 prog->SamplerTargets[inst->sampler] = 2445 (gl_texture_index)inst->tex_target; 2446 if (inst->tex_shadow) { 2447 prog->ShadowSamplers |= 1 << inst->sampler; 2448 } 2449 } 2450 } 2451 2452 prog->SamplersUsed = v->samplers_used; 2453 _mesa_update_shader_textures_used(prog); 2454} 2455 2456 2457/** 2458 * Check if the given vertex/fragment/shader program is within the 2459 * resource limits of the context (number of texture units, etc). 2460 * If any of those checks fail, record a linker error. 2461 * 2462 * XXX more checks are needed... 2463 */ 2464static void 2465check_resources(const struct gl_context *ctx, 2466 struct gl_shader_program *shader_program, 2467 glsl_to_tgsi_visitor *prog, 2468 struct gl_program *proginfo) 2469{ 2470 switch (proginfo->Target) { 2471 case GL_VERTEX_PROGRAM_ARB: 2472 if (_mesa_bitcount(prog->samplers_used) > 2473 ctx->Const.MaxVertexTextureImageUnits) { 2474 fail_link(shader_program, "Too many vertex shader texture samplers"); 2475 } 2476 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) { 2477 fail_link(shader_program, "Too many vertex shader constants"); 2478 } 2479 break; 2480 case MESA_GEOMETRY_PROGRAM: 2481 if (_mesa_bitcount(prog->samplers_used) > 2482 ctx->Const.MaxGeometryTextureImageUnits) { 2483 fail_link(shader_program, "Too many geometry shader texture samplers"); 2484 } 2485 if (proginfo->Parameters->NumParameters > 2486 MAX_GEOMETRY_UNIFORM_COMPONENTS / 4) { 2487 fail_link(shader_program, "Too many geometry shader constants"); 2488 } 2489 break; 2490 case GL_FRAGMENT_PROGRAM_ARB: 2491 if (_mesa_bitcount(prog->samplers_used) > 2492 ctx->Const.MaxTextureImageUnits) { 2493 fail_link(shader_program, "Too many fragment shader texture samplers"); 2494 } 2495 if (proginfo->Parameters->NumParameters > MAX_UNIFORMS) { 2496 fail_link(shader_program, "Too many fragment shader constants"); 2497 } 2498 break; 2499 default: 2500 _mesa_problem(ctx, "unexpected program type in check_resources()"); 2501 } 2502} 2503 2504 2505 2506struct uniform_sort { 2507 struct gl_uniform *u; 2508 int pos; 2509}; 2510 2511/* The shader_program->Uniforms list is almost sorted in increasing 2512 * uniform->{Frag,Vert}Pos locations, but not quite when there are 2513 * uniforms shared between targets. We need to add parameters in 2514 * increasing order for the targets. 2515 */ 2516static int 2517sort_uniforms(const void *a, const void *b) 2518{ 2519 struct uniform_sort *u1 = (struct uniform_sort *)a; 2520 struct uniform_sort *u2 = (struct uniform_sort *)b; 2521 2522 return u1->pos - u2->pos; 2523} 2524 2525/* Add the uniforms to the parameters. The linker chose locations 2526 * in our parameters lists (which weren't created yet), which the 2527 * uniforms code will use to poke values into our parameters list 2528 * when uniforms are updated. 2529 */ 2530static void 2531add_uniforms_to_parameters_list(struct gl_shader_program *shader_program, 2532 struct gl_shader *shader, 2533 struct gl_program *prog) 2534{ 2535 unsigned int i; 2536 unsigned int next_sampler = 0, num_uniforms = 0; 2537 struct uniform_sort *sorted_uniforms; 2538 2539 sorted_uniforms = ralloc_array(NULL, struct uniform_sort, 2540 shader_program->Uniforms->NumUniforms); 2541 2542 for (i = 0; i < shader_program->Uniforms->NumUniforms; i++) { 2543 struct gl_uniform *uniform = shader_program->Uniforms->Uniforms + i; 2544 int parameter_index = -1; 2545 2546 switch (shader->Type) { 2547 case GL_VERTEX_SHADER: 2548 parameter_index = uniform->VertPos; 2549 break; 2550 case GL_FRAGMENT_SHADER: 2551 parameter_index = uniform->FragPos; 2552 break; 2553 case GL_GEOMETRY_SHADER: 2554 parameter_index = uniform->GeomPos; 2555 break; 2556 } 2557 2558 /* Only add uniforms used in our target. */ 2559 if (parameter_index != -1) { 2560 sorted_uniforms[num_uniforms].pos = parameter_index; 2561 sorted_uniforms[num_uniforms].u = uniform; 2562 num_uniforms++; 2563 } 2564 } 2565 2566 qsort(sorted_uniforms, num_uniforms, sizeof(struct uniform_sort), 2567 sort_uniforms); 2568 2569 for (i = 0; i < num_uniforms; i++) { 2570 struct gl_uniform *uniform = sorted_uniforms[i].u; 2571 int parameter_index = sorted_uniforms[i].pos; 2572 const glsl_type *type = uniform->Type; 2573 unsigned int size; 2574 2575 if (type->is_vector() || 2576 type->is_scalar()) { 2577 size = type->vector_elements; 2578 } else { 2579 size = type_size(type) * 4; 2580 } 2581 2582 gl_register_file file; 2583 if (type->is_sampler() || 2584 (type->is_array() && type->fields.array->is_sampler())) { 2585 file = PROGRAM_SAMPLER; 2586 } else { 2587 file = PROGRAM_UNIFORM; 2588 } 2589 2590 GLint index = _mesa_lookup_parameter_index(prog->Parameters, -1, 2591 uniform->Name); 2592 2593 if (index < 0) { 2594 index = _mesa_add_parameter(prog->Parameters, file, 2595 uniform->Name, size, type->gl_type, 2596 NULL, NULL, 0x0); 2597 2598 /* Sampler uniform values are stored in prog->SamplerUnits, 2599 * and the entry in that array is selected by this index we 2600 * store in ParameterValues[]. 2601 */ 2602 if (file == PROGRAM_SAMPLER) { 2603 for (unsigned int j = 0; j < size / 4; j++) 2604 prog->Parameters->ParameterValues[index + j][0].f = next_sampler++; 2605 } 2606 2607 /* The location chosen in the Parameters list here (returned 2608 * from _mesa_add_uniform) has to match what the linker chose. 2609 */ 2610 if (index != parameter_index) { 2611 fail_link(shader_program, "Allocation of uniform `%s' to target " 2612 "failed (%d vs %d)\n", 2613 uniform->Name, index, parameter_index); 2614 } 2615 } 2616 } 2617 2618 ralloc_free(sorted_uniforms); 2619} 2620 2621static void 2622set_uniform_initializer(struct gl_context *ctx, void *mem_ctx, 2623 struct gl_shader_program *shader_program, 2624 const char *name, const glsl_type *type, 2625 ir_constant *val) 2626{ 2627 if (type->is_record()) { 2628 ir_constant *field_constant; 2629 2630 field_constant = (ir_constant *)val->components.get_head(); 2631 2632 for (unsigned int i = 0; i < type->length; i++) { 2633 const glsl_type *field_type = type->fields.structure[i].type; 2634 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name, 2635 type->fields.structure[i].name); 2636 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name, 2637 field_type, field_constant); 2638 field_constant = (ir_constant *)field_constant->next; 2639 } 2640 return; 2641 } 2642 2643 int loc = _mesa_get_uniform_location(ctx, shader_program, name); 2644 2645 if (loc == -1) { 2646 fail_link(shader_program, 2647 "Couldn't find uniform for initializer %s\n", name); 2648 return; 2649 } 2650 2651 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) { 2652 ir_constant *element; 2653 const glsl_type *element_type; 2654 if (type->is_array()) { 2655 element = val->array_elements[i]; 2656 element_type = type->fields.array; 2657 } else { 2658 element = val; 2659 element_type = type; 2660 } 2661 2662 void *values; 2663 2664 if (element_type->base_type == GLSL_TYPE_BOOL) { 2665 int *conv = ralloc_array(mem_ctx, int, element_type->components()); 2666 for (unsigned int j = 0; j < element_type->components(); j++) { 2667 conv[j] = element->value.b[j]; 2668 } 2669 values = (void *)conv; 2670 element_type = glsl_type::get_instance(GLSL_TYPE_INT, 2671 element_type->vector_elements, 2672 1); 2673 } else { 2674 values = &element->value; 2675 } 2676 2677 if (element_type->is_matrix()) { 2678 _mesa_uniform_matrix(ctx, shader_program, 2679 element_type->matrix_columns, 2680 element_type->vector_elements, 2681 loc, 1, GL_FALSE, (GLfloat *)values); 2682 loc += element_type->matrix_columns; 2683 } else { 2684 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns, 2685 values, element_type->gl_type); 2686 loc += type_size(element_type); 2687 } 2688 } 2689} 2690 2691static void 2692set_uniform_initializers(struct gl_context *ctx, 2693 struct gl_shader_program *shader_program) 2694{ 2695 void *mem_ctx = NULL; 2696 2697 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) { 2698 struct gl_shader *shader = shader_program->_LinkedShaders[i]; 2699 2700 if (shader == NULL) 2701 continue; 2702 2703 foreach_iter(exec_list_iterator, iter, *shader->ir) { 2704 ir_instruction *ir = (ir_instruction *)iter.get(); 2705 ir_variable *var = ir->as_variable(); 2706 2707 if (!var || var->mode != ir_var_uniform || !var->constant_value) 2708 continue; 2709 2710 if (!mem_ctx) 2711 mem_ctx = ralloc_context(NULL); 2712 2713 set_uniform_initializer(ctx, mem_ctx, shader_program, var->name, 2714 var->type, var->constant_value); 2715 } 2716 } 2717 2718 ralloc_free(mem_ctx); 2719} 2720 2721/* 2722 * Scan/rewrite program to remove reads of custom (output) registers. 2723 * The passed type has to be either PROGRAM_OUTPUT or PROGRAM_VARYING 2724 * (for vertex shaders). 2725 * In GLSL shaders, varying vars can be read and written. 2726 * On some hardware, trying to read an output register causes trouble. 2727 * So, rewrite the program to use a temporary register in this case. 2728 * 2729 * Based on _mesa_remove_output_reads from programopt.c. 2730 */ 2731void 2732glsl_to_tgsi_visitor::remove_output_reads(gl_register_file type) 2733{ 2734 GLuint i; 2735 GLint outputMap[VERT_RESULT_MAX]; 2736 GLint outputTypes[VERT_RESULT_MAX]; 2737 GLuint numVaryingReads = 0; 2738 GLboolean usedTemps[MAX_PROGRAM_TEMPS]; 2739 GLuint firstTemp = 0; 2740 2741 _mesa_find_used_registers(prog, PROGRAM_TEMPORARY, 2742 usedTemps, MAX_PROGRAM_TEMPS); 2743 2744 assert(type == PROGRAM_VARYING || type == PROGRAM_OUTPUT); 2745 assert(prog->Target == GL_VERTEX_PROGRAM_ARB || type != PROGRAM_VARYING); 2746 2747 for (i = 0; i < VERT_RESULT_MAX; i++) 2748 outputMap[i] = -1; 2749 2750 /* look for instructions which read from varying vars */ 2751 foreach_iter(exec_list_iterator, iter, this->instructions) { 2752 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2753 const GLuint numSrc = num_inst_src_regs(inst->op); 2754 GLuint j; 2755 for (j = 0; j < numSrc; j++) { 2756 if (inst->src[j].file == type) { 2757 /* replace the read with a temp reg */ 2758 const GLuint var = inst->src[j].index; 2759 if (outputMap[var] == -1) { 2760 numVaryingReads++; 2761 outputMap[var] = _mesa_find_free_register(usedTemps, 2762 MAX_PROGRAM_TEMPS, 2763 firstTemp); 2764 outputTypes[var] = inst->src[j].type; 2765 firstTemp = outputMap[var] + 1; 2766 } 2767 inst->src[j].file = PROGRAM_TEMPORARY; 2768 inst->src[j].index = outputMap[var]; 2769 } 2770 } 2771 } 2772 2773 if (numVaryingReads == 0) 2774 return; /* nothing to be done */ 2775 2776 /* look for instructions which write to the varying vars identified above */ 2777 foreach_iter(exec_list_iterator, iter, this->instructions) { 2778 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2779 if (inst->dst.file == type && outputMap[inst->dst.index] >= 0) { 2780 /* change inst to write to the temp reg, instead of the varying */ 2781 inst->dst.file = PROGRAM_TEMPORARY; 2782 inst->dst.index = outputMap[inst->dst.index]; 2783 } 2784 } 2785 2786 /* insert new MOV instructions at the end */ 2787 for (i = 0; i < VERT_RESULT_MAX; i++) { 2788 if (outputMap[i] >= 0) { 2789 /* MOV VAR[i], TEMP[tmp]; */ 2790 st_src_reg src = st_src_reg(PROGRAM_TEMPORARY, outputMap[i], outputTypes[i]); 2791 st_dst_reg dst = st_dst_reg(type, WRITEMASK_XYZW, outputTypes[i]); 2792 dst.index = i; 2793 this->emit(NULL, TGSI_OPCODE_MOV, dst, src); 2794 } 2795 } 2796} 2797 2798/** 2799 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which 2800 * are read from the given src in this instruction 2801 */ 2802static int 2803get_src_arg_mask(st_dst_reg dst, st_src_reg src) 2804{ 2805 int read_mask = 0, comp; 2806 2807 /* Now, given the src swizzle and the written channels, find which 2808 * components are actually read 2809 */ 2810 for (comp = 0; comp < 4; ++comp) { 2811 const unsigned coord = GET_SWZ(src.swizzle, comp); 2812 ASSERT(coord < 4); 2813 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W) 2814 read_mask |= 1 << coord; 2815 } 2816 2817 return read_mask; 2818} 2819 2820/** 2821 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP 2822 * instruction is the first instruction to write to register T0. There are 2823 * several lowering passes done in GLSL IR (e.g. branches and 2824 * relative addressing) that create a large number of conditional assignments 2825 * that ir_to_mesa converts to CMP instructions like the one mentioned above. 2826 * 2827 * Here is why this conversion is safe: 2828 * CMP T0, T1 T2 T0 can be expanded to: 2829 * if (T1 < 0.0) 2830 * MOV T0, T2; 2831 * else 2832 * MOV T0, T0; 2833 * 2834 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same 2835 * as the original program. If (T1 < 0.0) evaluates to false, executing 2836 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized. 2837 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2 2838 * because any instruction that was going to read from T0 after this was going 2839 * to read a garbage value anyway. 2840 */ 2841void 2842glsl_to_tgsi_visitor::simplify_cmp(void) 2843{ 2844 unsigned tempWrites[MAX_PROGRAM_TEMPS]; 2845 unsigned outputWrites[MAX_PROGRAM_OUTPUTS]; 2846 2847 memset(tempWrites, 0, sizeof(tempWrites)); 2848 memset(outputWrites, 0, sizeof(outputWrites)); 2849 2850 foreach_iter(exec_list_iterator, iter, this->instructions) { 2851 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2852 unsigned prevWriteMask = 0; 2853 2854 /* Give up if we encounter relative addressing or flow control. */ 2855 if (inst->dst.reladdr || 2856 tgsi_get_opcode_info(inst->op)->is_branch || 2857 inst->op == TGSI_OPCODE_BGNSUB || 2858 inst->op == TGSI_OPCODE_CONT || 2859 inst->op == TGSI_OPCODE_END || 2860 inst->op == TGSI_OPCODE_ENDSUB || 2861 inst->op == TGSI_OPCODE_RET) { 2862 return; 2863 } 2864 2865 if (inst->dst.file == PROGRAM_OUTPUT) { 2866 assert(inst->dst.index < MAX_PROGRAM_OUTPUTS); 2867 prevWriteMask = outputWrites[inst->dst.index]; 2868 outputWrites[inst->dst.index] |= inst->dst.writemask; 2869 } else if (inst->dst.file == PROGRAM_TEMPORARY) { 2870 assert(inst->dst.index < MAX_PROGRAM_TEMPS); 2871 prevWriteMask = tempWrites[inst->dst.index]; 2872 tempWrites[inst->dst.index] |= inst->dst.writemask; 2873 } 2874 2875 /* For a CMP to be considered a conditional write, the destination 2876 * register and source register two must be the same. */ 2877 if (inst->op == TGSI_OPCODE_CMP 2878 && !(inst->dst.writemask & prevWriteMask) 2879 && inst->src[2].file == inst->dst.file 2880 && inst->src[2].index == inst->dst.index 2881 && inst->dst.writemask == get_src_arg_mask(inst->dst, inst->src[2])) { 2882 2883 inst->op = TGSI_OPCODE_MOV; 2884 inst->src[0] = inst->src[1]; 2885 } 2886 } 2887} 2888 2889/* Replaces all references to a temporary register index with another index. */ 2890void 2891glsl_to_tgsi_visitor::rename_temp_register(int index, int new_index) 2892{ 2893 foreach_iter(exec_list_iterator, iter, this->instructions) { 2894 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2895 unsigned j; 2896 2897 for (j=0; j < num_inst_src_regs(inst->op); j++) { 2898 if (inst->src[j].file == PROGRAM_TEMPORARY && 2899 inst->src[j].index == index) { 2900 inst->src[j].index = new_index; 2901 } 2902 } 2903 2904 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) { 2905 inst->dst.index = new_index; 2906 } 2907 } 2908} 2909 2910int 2911glsl_to_tgsi_visitor::get_first_temp_read(int index) 2912{ 2913 int depth = 0; /* loop depth */ 2914 int loop_start = -1; /* index of the first active BGNLOOP (if any) */ 2915 unsigned i = 0, j; 2916 2917 foreach_iter(exec_list_iterator, iter, this->instructions) { 2918 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2919 2920 for (j=0; j < num_inst_src_regs(inst->op); j++) { 2921 if (inst->src[j].file == PROGRAM_TEMPORARY && 2922 inst->src[j].index == index) { 2923 return (depth == 0) ? i : loop_start; 2924 } 2925 } 2926 2927 if (inst->op == TGSI_OPCODE_BGNLOOP) { 2928 if(depth++ == 0) 2929 loop_start = i; 2930 } else if (inst->op == TGSI_OPCODE_ENDLOOP) { 2931 if (--depth == 0) 2932 loop_start = -1; 2933 } 2934 assert(depth >= 0); 2935 2936 i++; 2937 } 2938 2939 return -1; 2940} 2941 2942int 2943glsl_to_tgsi_visitor::get_first_temp_write(int index) 2944{ 2945 int depth = 0; /* loop depth */ 2946 int loop_start = -1; /* index of the first active BGNLOOP (if any) */ 2947 int i = 0; 2948 2949 foreach_iter(exec_list_iterator, iter, this->instructions) { 2950 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2951 2952 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) { 2953 return (depth == 0) ? i : loop_start; 2954 } 2955 2956 if (inst->op == TGSI_OPCODE_BGNLOOP) { 2957 if(depth++ == 0) 2958 loop_start = i; 2959 } else if (inst->op == TGSI_OPCODE_ENDLOOP) { 2960 if (--depth == 0) 2961 loop_start = -1; 2962 } 2963 assert(depth >= 0); 2964 2965 i++; 2966 } 2967 2968 return -1; 2969} 2970 2971int 2972glsl_to_tgsi_visitor::get_last_temp_read(int index) 2973{ 2974 int depth = 0; /* loop depth */ 2975 int last = -1; /* index of last instruction that reads the temporary */ 2976 unsigned i = 0, j; 2977 2978 foreach_iter(exec_list_iterator, iter, this->instructions) { 2979 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2980 2981 for (j=0; j < num_inst_src_regs(inst->op); j++) { 2982 if (inst->src[j].file == PROGRAM_TEMPORARY && 2983 inst->src[j].index == index) { 2984 last = (depth == 0) ? i : -2; 2985 } 2986 } 2987 2988 if (inst->op == TGSI_OPCODE_BGNLOOP) 2989 depth++; 2990 else if (inst->op == TGSI_OPCODE_ENDLOOP) 2991 if (--depth == 0 && last == -2) 2992 last = i; 2993 assert(depth >= 0); 2994 2995 i++; 2996 } 2997 2998 assert(last >= -1); 2999 return last; 3000} 3001 3002int 3003glsl_to_tgsi_visitor::get_last_temp_write(int index) 3004{ 3005 int depth = 0; /* loop depth */ 3006 int last = -1; /* index of last instruction that writes to the temporary */ 3007 int i = 0; 3008 3009 foreach_iter(exec_list_iterator, iter, this->instructions) { 3010 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3011 3012 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) 3013 last = (depth == 0) ? i : -2; 3014 3015 if (inst->op == TGSI_OPCODE_BGNLOOP) 3016 depth++; 3017 else if (inst->op == TGSI_OPCODE_ENDLOOP) 3018 if (--depth == 0 && last == -2) 3019 last = i; 3020 assert(depth >= 0); 3021 3022 i++; 3023 } 3024 3025 assert(last >= -1); 3026 return last; 3027} 3028 3029/* 3030 * On a basic block basis, tracks available PROGRAM_TEMPORARY register 3031 * channels for copy propagation and updates following instructions to 3032 * use the original versions. 3033 * 3034 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3035 * will occur. As an example, a TXP production before this pass: 3036 * 3037 * 0: MOV TEMP[1], INPUT[4].xyyy; 3038 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3039 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D; 3040 * 3041 * and after: 3042 * 3043 * 0: MOV TEMP[1], INPUT[4].xyyy; 3044 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3045 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3046 * 3047 * which allows for dead code elimination on TEMP[1]'s writes. 3048 */ 3049void 3050glsl_to_tgsi_visitor::copy_propagate(void) 3051{ 3052 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx, 3053 glsl_to_tgsi_instruction *, 3054 this->next_temp * 4); 3055 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4); 3056 int level = 0; 3057 3058 foreach_iter(exec_list_iterator, iter, this->instructions) { 3059 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3060 3061 assert(inst->dst.file != PROGRAM_TEMPORARY 3062 || inst->dst.index < this->next_temp); 3063 3064 /* First, do any copy propagation possible into the src regs. */ 3065 for (int r = 0; r < 3; r++) { 3066 glsl_to_tgsi_instruction *first = NULL; 3067 bool good = true; 3068 int acp_base = inst->src[r].index * 4; 3069 3070 if (inst->src[r].file != PROGRAM_TEMPORARY || 3071 inst->src[r].reladdr) 3072 continue; 3073 3074 /* See if we can find entries in the ACP consisting of MOVs 3075 * from the same src register for all the swizzled channels 3076 * of this src register reference. 3077 */ 3078 for (int i = 0; i < 4; i++) { 3079 int src_chan = GET_SWZ(inst->src[r].swizzle, i); 3080 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan]; 3081 3082 if (!copy_chan) { 3083 good = false; 3084 break; 3085 } 3086 3087 assert(acp_level[acp_base + src_chan] <= level); 3088 3089 if (!first) { 3090 first = copy_chan; 3091 } else { 3092 if (first->src[0].file != copy_chan->src[0].file || 3093 first->src[0].index != copy_chan->src[0].index) { 3094 good = false; 3095 break; 3096 } 3097 } 3098 } 3099 3100 if (good) { 3101 /* We've now validated that we can copy-propagate to 3102 * replace this src register reference. Do it. 3103 */ 3104 inst->src[r].file = first->src[0].file; 3105 inst->src[r].index = first->src[0].index; 3106 3107 int swizzle = 0; 3108 for (int i = 0; i < 4; i++) { 3109 int src_chan = GET_SWZ(inst->src[r].swizzle, i); 3110 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan]; 3111 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) << 3112 (3 * i)); 3113 } 3114 inst->src[r].swizzle = swizzle; 3115 } 3116 } 3117 3118 switch (inst->op) { 3119 case TGSI_OPCODE_BGNLOOP: 3120 case TGSI_OPCODE_ENDLOOP: 3121 /* End of a basic block, clear the ACP entirely. */ 3122 memset(acp, 0, sizeof(*acp) * this->next_temp * 4); 3123 break; 3124 3125 case TGSI_OPCODE_IF: 3126 ++level; 3127 break; 3128 3129 case TGSI_OPCODE_ENDIF: 3130 case TGSI_OPCODE_ELSE: 3131 /* Clear all channels written inside the block from the ACP, but 3132 * leaving those that were not touched. 3133 */ 3134 for (int r = 0; r < this->next_temp; r++) { 3135 for (int c = 0; c < 4; c++) { 3136 if (!acp[4 * r + c]) 3137 continue; 3138 3139 if (acp_level[4 * r + c] >= level) 3140 acp[4 * r + c] = NULL; 3141 } 3142 } 3143 if (inst->op == TGSI_OPCODE_ENDIF) 3144 --level; 3145 break; 3146 3147 default: 3148 /* Continuing the block, clear any written channels from 3149 * the ACP. 3150 */ 3151 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) { 3152 /* Any temporary might be written, so no copy propagation 3153 * across this instruction. 3154 */ 3155 memset(acp, 0, sizeof(*acp) * this->next_temp * 4); 3156 } else if (inst->dst.file == PROGRAM_OUTPUT && 3157 inst->dst.reladdr) { 3158 /* Any output might be written, so no copy propagation 3159 * from outputs across this instruction. 3160 */ 3161 for (int r = 0; r < this->next_temp; r++) { 3162 for (int c = 0; c < 4; c++) { 3163 if (!acp[4 * r + c]) 3164 continue; 3165 3166 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT) 3167 acp[4 * r + c] = NULL; 3168 } 3169 } 3170 } else if (inst->dst.file == PROGRAM_TEMPORARY || 3171 inst->dst.file == PROGRAM_OUTPUT) { 3172 /* Clear where it's used as dst. */ 3173 if (inst->dst.file == PROGRAM_TEMPORARY) { 3174 for (int c = 0; c < 4; c++) { 3175 if (inst->dst.writemask & (1 << c)) { 3176 acp[4 * inst->dst.index + c] = NULL; 3177 } 3178 } 3179 } 3180 3181 /* Clear where it's used as src. */ 3182 for (int r = 0; r < this->next_temp; r++) { 3183 for (int c = 0; c < 4; c++) { 3184 if (!acp[4 * r + c]) 3185 continue; 3186 3187 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c); 3188 3189 if (acp[4 * r + c]->src[0].file == inst->dst.file && 3190 acp[4 * r + c]->src[0].index == inst->dst.index && 3191 inst->dst.writemask & (1 << src_chan)) 3192 { 3193 acp[4 * r + c] = NULL; 3194 } 3195 } 3196 } 3197 } 3198 break; 3199 } 3200 3201 /* If this is a copy, add it to the ACP. */ 3202 if (inst->op == TGSI_OPCODE_MOV && 3203 inst->dst.file == PROGRAM_TEMPORARY && 3204 !inst->dst.reladdr && 3205 !inst->saturate && 3206 !inst->src[0].reladdr && 3207 !inst->src[0].negate) { 3208 for (int i = 0; i < 4; i++) { 3209 if (inst->dst.writemask & (1 << i)) { 3210 acp[4 * inst->dst.index + i] = inst; 3211 acp_level[4 * inst->dst.index + i] = level; 3212 } 3213 } 3214 } 3215 } 3216 3217 ralloc_free(acp_level); 3218 ralloc_free(acp); 3219} 3220 3221/* 3222 * Tracks available PROGRAM_TEMPORARY registers for dead code elimination. 3223 * 3224 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3225 * will occur. As an example, a TXP production after copy propagation but 3226 * before this pass: 3227 * 3228 * 0: MOV TEMP[1], INPUT[4].xyyy; 3229 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3230 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3231 * 3232 * and after this pass: 3233 * 3234 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3235 * 3236 * FIXME: assumes that all functions are inlined (no support for BGNSUB/ENDSUB) 3237 * FIXME: doesn't eliminate all dead code inside of loops; it steps around them 3238 */ 3239void 3240glsl_to_tgsi_visitor::eliminate_dead_code(void) 3241{ 3242 int i; 3243 3244 for (i=0; i < this->next_temp; i++) { 3245 int last_read = get_last_temp_read(i); 3246 int j = 0; 3247 3248 foreach_iter(exec_list_iterator, iter, this->instructions) { 3249 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3250 3251 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == i && 3252 j > last_read) 3253 { 3254 iter.remove(); 3255 delete inst; 3256 } 3257 3258 j++; 3259 } 3260 } 3261} 3262 3263/* 3264 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead 3265 * code elimination. This is less primitive than eliminate_dead_code(), as it 3266 * is per-channel and can detect consecutive writes without a read between them 3267 * as dead code. However, there is some dead code that can be eliminated by 3268 * eliminate_dead_code() but not this function - for example, this function 3269 * cannot eliminate an instruction writing to a register that is never read and 3270 * is the only instruction writing to that register. 3271 * 3272 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3273 * will occur. 3274 */ 3275int 3276glsl_to_tgsi_visitor::eliminate_dead_code_advanced(void) 3277{ 3278 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx, 3279 glsl_to_tgsi_instruction *, 3280 this->next_temp * 4); 3281 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4); 3282 int level = 0; 3283 int removed = 0; 3284 3285 foreach_iter(exec_list_iterator, iter, this->instructions) { 3286 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3287 3288 assert(inst->dst.file != PROGRAM_TEMPORARY 3289 || inst->dst.index < this->next_temp); 3290 3291 switch (inst->op) { 3292 case TGSI_OPCODE_BGNLOOP: 3293 case TGSI_OPCODE_ENDLOOP: 3294 /* End of a basic block, clear the write array entirely. 3295 * FIXME: This keeps us from killing dead code when the writes are 3296 * on either side of a loop, even when the register isn't touched 3297 * inside the loop. 3298 */ 3299 memset(writes, 0, sizeof(*writes) * this->next_temp * 4); 3300 break; 3301 3302 case TGSI_OPCODE_IF: 3303 ++level; 3304 break; 3305 3306 case TGSI_OPCODE_ENDIF: 3307 --level; 3308 break; 3309 3310 case TGSI_OPCODE_ELSE: 3311 /* Clear all channels written inside the preceding if block from the 3312 * write array, but leave those that were not touched. 3313 * 3314 * FIXME: This destroys opportunities to remove dead code inside of 3315 * IF blocks that are followed by an ELSE block. 3316 */ 3317 for (int r = 0; r < this->next_temp; r++) { 3318 for (int c = 0; c < 4; c++) { 3319 if (!writes[4 * r + c]) 3320 continue; 3321 3322 if (write_level[4 * r + c] >= level) 3323 writes[4 * r + c] = NULL; 3324 } 3325 } 3326 break; 3327 3328 default: 3329 /* Continuing the block, clear any channels from the write array that 3330 * are read by this instruction. 3331 */ 3332 for (int i = 0; i < 4; i++) { 3333 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){ 3334 /* Any temporary might be read, so no dead code elimination 3335 * across this instruction. 3336 */ 3337 memset(writes, 0, sizeof(*writes) * this->next_temp * 4); 3338 } else if (inst->src[i].file == PROGRAM_TEMPORARY) { 3339 /* Clear where it's used as src. */ 3340 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0); 3341 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1); 3342 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2); 3343 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3); 3344 3345 for (int c = 0; c < 4; c++) { 3346 if (src_chans & (1 << c)) { 3347 writes[4 * inst->src[i].index + c] = NULL; 3348 } 3349 } 3350 } 3351 } 3352 break; 3353 } 3354 3355 /* If this instruction writes to a temporary, add it to the write array. 3356 * If there is already an instruction in the write array for one or more 3357 * of the channels, flag that channel write as dead. 3358 */ 3359 if (inst->dst.file == PROGRAM_TEMPORARY && 3360 !inst->dst.reladdr && 3361 !inst->saturate) { 3362 for (int c = 0; c < 4; c++) { 3363 if (inst->dst.writemask & (1 << c)) { 3364 if (writes[4 * inst->dst.index + c]) { 3365 if (write_level[4 * inst->dst.index + c] < level) 3366 continue; 3367 else 3368 writes[4 * inst->dst.index + c]->dead_mask |= (1 << c); 3369 } 3370 writes[4 * inst->dst.index + c] = inst; 3371 write_level[4 * inst->dst.index + c] = level; 3372 } 3373 } 3374 } 3375 } 3376 3377 /* Now actually remove the instructions that are completely dead and update 3378 * the writemask of other instructions with dead channels. 3379 */ 3380 foreach_iter(exec_list_iterator, iter, this->instructions) { 3381 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3382 3383 if (!inst->dead_mask || !inst->dst.writemask) 3384 continue; 3385 else if (inst->dead_mask == inst->dst.writemask) { 3386 iter.remove(); 3387 delete inst; 3388 removed++; 3389 } else 3390 inst->dst.writemask &= ~(inst->dead_mask); 3391 } 3392 3393 ralloc_free(write_level); 3394 ralloc_free(writes); 3395 3396 return removed; 3397} 3398 3399/* Merges temporary registers together where possible to reduce the number of 3400 * registers needed to run a program. 3401 * 3402 * Produces optimal code only after copy propagation and dead code elimination 3403 * have been run. */ 3404void 3405glsl_to_tgsi_visitor::merge_registers(void) 3406{ 3407 int *last_reads = rzalloc_array(mem_ctx, int, this->next_temp); 3408 int *first_writes = rzalloc_array(mem_ctx, int, this->next_temp); 3409 int i, j; 3410 3411 /* Read the indices of the last read and first write to each temp register 3412 * into an array so that we don't have to traverse the instruction list as 3413 * much. */ 3414 for (i=0; i < this->next_temp; i++) { 3415 last_reads[i] = get_last_temp_read(i); 3416 first_writes[i] = get_first_temp_write(i); 3417 } 3418 3419 /* Start looking for registers with non-overlapping usages that can be 3420 * merged together. */ 3421 for (i=0; i < this->next_temp; i++) { 3422 /* Don't touch unused registers. */ 3423 if (last_reads[i] < 0 || first_writes[i] < 0) continue; 3424 3425 for (j=0; j < this->next_temp; j++) { 3426 /* Don't touch unused registers. */ 3427 if (last_reads[j] < 0 || first_writes[j] < 0) continue; 3428 3429 /* We can merge the two registers if the first write to j is after or 3430 * in the same instruction as the last read from i. Note that the 3431 * register at index i will always be used earlier or at the same time 3432 * as the register at index j. */ 3433 if (first_writes[i] <= first_writes[j] && 3434 last_reads[i] <= first_writes[j]) 3435 { 3436 rename_temp_register(j, i); /* Replace all references to j with i.*/ 3437 3438 /* Update the first_writes and last_reads arrays with the new 3439 * values for the merged register index, and mark the newly unused 3440 * register index as such. */ 3441 last_reads[i] = last_reads[j]; 3442 first_writes[j] = -1; 3443 last_reads[j] = -1; 3444 } 3445 } 3446 } 3447 3448 ralloc_free(last_reads); 3449 ralloc_free(first_writes); 3450} 3451 3452/* Reassign indices to temporary registers by reusing unused indices created 3453 * by optimization passes. */ 3454void 3455glsl_to_tgsi_visitor::renumber_registers(void) 3456{ 3457 int i = 0; 3458 int new_index = 0; 3459 3460 for (i=0; i < this->next_temp; i++) { 3461 if (get_first_temp_read(i) < 0) continue; 3462 if (i != new_index) 3463 rename_temp_register(i, new_index); 3464 new_index++; 3465 } 3466 3467 this->next_temp = new_index; 3468} 3469 3470/* ------------------------- TGSI conversion stuff -------------------------- */ 3471struct label { 3472 unsigned branch_target; 3473 unsigned token; 3474}; 3475 3476/** 3477 * Intermediate state used during shader translation. 3478 */ 3479struct st_translate { 3480 struct ureg_program *ureg; 3481 3482 struct ureg_dst temps[MAX_PROGRAM_TEMPS]; 3483 struct ureg_src *constants; 3484 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS]; 3485 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS]; 3486 struct ureg_dst address[1]; 3487 struct ureg_src samplers[PIPE_MAX_SAMPLERS]; 3488 struct ureg_src systemValues[SYSTEM_VALUE_MAX]; 3489 3490 /* Extra info for handling point size clamping in vertex shader */ 3491 struct ureg_dst pointSizeResult; /**< Actual point size output register */ 3492 struct ureg_src pointSizeConst; /**< Point size range constant register */ 3493 GLint pointSizeOutIndex; /**< Temp point size output register */ 3494 GLboolean prevInstWrotePointSize; 3495 3496 const GLuint *inputMapping; 3497 const GLuint *outputMapping; 3498 3499 /* For every instruction that contains a label (eg CALL), keep 3500 * details so that we can go back afterwards and emit the correct 3501 * tgsi instruction number for each label. 3502 */ 3503 struct label *labels; 3504 unsigned labels_size; 3505 unsigned labels_count; 3506 3507 /* Keep a record of the tgsi instruction number that each mesa 3508 * instruction starts at, will be used to fix up labels after 3509 * translation. 3510 */ 3511 unsigned *insn; 3512 unsigned insn_size; 3513 unsigned insn_count; 3514 3515 unsigned procType; /**< TGSI_PROCESSOR_VERTEX/FRAGMENT */ 3516 3517 boolean error; 3518}; 3519 3520/** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */ 3521static unsigned mesa_sysval_to_semantic[SYSTEM_VALUE_MAX] = { 3522 TGSI_SEMANTIC_FACE, 3523 TGSI_SEMANTIC_INSTANCEID 3524}; 3525 3526/** 3527 * Make note of a branch to a label in the TGSI code. 3528 * After we've emitted all instructions, we'll go over the list 3529 * of labels built here and patch the TGSI code with the actual 3530 * location of each label. 3531 */ 3532static unsigned *get_label( struct st_translate *t, 3533 unsigned branch_target ) 3534{ 3535 unsigned i; 3536 3537 if (t->labels_count + 1 >= t->labels_size) { 3538 t->labels_size = 1 << (util_logbase2(t->labels_size) + 1); 3539 t->labels = (struct label *)realloc(t->labels, 3540 t->labels_size * sizeof t->labels[0]); 3541 if (t->labels == NULL) { 3542 static unsigned dummy; 3543 t->error = TRUE; 3544 return &dummy; 3545 } 3546 } 3547 3548 i = t->labels_count++; 3549 t->labels[i].branch_target = branch_target; 3550 return &t->labels[i].token; 3551} 3552 3553/** 3554 * Called prior to emitting the TGSI code for each Mesa instruction. 3555 * Allocate additional space for instructions if needed. 3556 * Update the insn[] array so the next Mesa instruction points to 3557 * the next TGSI instruction. 3558 */ 3559static void set_insn_start( struct st_translate *t, 3560 unsigned start ) 3561{ 3562 if (t->insn_count + 1 >= t->insn_size) { 3563 t->insn_size = 1 << (util_logbase2(t->insn_size) + 1); 3564 t->insn = (unsigned *)realloc(t->insn, t->insn_size * sizeof t->insn[0]); 3565 if (t->insn == NULL) { 3566 t->error = TRUE; 3567 return; 3568 } 3569 } 3570 3571 t->insn[t->insn_count++] = start; 3572} 3573 3574/** 3575 * Map a Mesa dst register to a TGSI ureg_dst register. 3576 */ 3577static struct ureg_dst 3578dst_register( struct st_translate *t, 3579 gl_register_file file, 3580 GLuint index ) 3581{ 3582 switch( file ) { 3583 case PROGRAM_UNDEFINED: 3584 return ureg_dst_undef(); 3585 3586 case PROGRAM_TEMPORARY: 3587 if (ureg_dst_is_undef(t->temps[index])) 3588 t->temps[index] = ureg_DECL_temporary( t->ureg ); 3589 3590 return t->temps[index]; 3591 3592 case PROGRAM_OUTPUT: 3593 if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ) 3594 t->prevInstWrotePointSize = GL_TRUE; 3595 3596 if (t->procType == TGSI_PROCESSOR_VERTEX) 3597 assert(index < VERT_RESULT_MAX); 3598 else if (t->procType == TGSI_PROCESSOR_FRAGMENT) 3599 assert(index < FRAG_RESULT_MAX); 3600 else 3601 assert(index < GEOM_RESULT_MAX); 3602 3603 assert(t->outputMapping[index] < Elements(t->outputs)); 3604 3605 return t->outputs[t->outputMapping[index]]; 3606 3607 case PROGRAM_ADDRESS: 3608 return t->address[index]; 3609 3610 default: 3611 debug_assert( 0 ); 3612 return ureg_dst_undef(); 3613 } 3614} 3615 3616/** 3617 * Map a Mesa src register to a TGSI ureg_src register. 3618 */ 3619static struct ureg_src 3620src_register( struct st_translate *t, 3621 gl_register_file file, 3622 GLuint index ) 3623{ 3624 switch( file ) { 3625 case PROGRAM_UNDEFINED: 3626 return ureg_src_undef(); 3627 3628 case PROGRAM_TEMPORARY: 3629 assert(index >= 0); 3630 assert(index < Elements(t->temps)); 3631 if (ureg_dst_is_undef(t->temps[index])) 3632 t->temps[index] = ureg_DECL_temporary( t->ureg ); 3633 return ureg_src(t->temps[index]); 3634 3635 case PROGRAM_NAMED_PARAM: 3636 case PROGRAM_ENV_PARAM: 3637 case PROGRAM_LOCAL_PARAM: 3638 case PROGRAM_UNIFORM: 3639 assert(index >= 0); 3640 return t->constants[index]; 3641 case PROGRAM_STATE_VAR: 3642 case PROGRAM_CONSTANT: /* ie, immediate */ 3643 if (index < 0) 3644 return ureg_DECL_constant( t->ureg, 0 ); 3645 else 3646 return t->constants[index]; 3647 3648 case PROGRAM_INPUT: 3649 assert(t->inputMapping[index] < Elements(t->inputs)); 3650 return t->inputs[t->inputMapping[index]]; 3651 3652 case PROGRAM_OUTPUT: 3653 assert(t->outputMapping[index] < Elements(t->outputs)); 3654 return ureg_src(t->outputs[t->outputMapping[index]]); /* not needed? */ 3655 3656 case PROGRAM_ADDRESS: 3657 return ureg_src(t->address[index]); 3658 3659 case PROGRAM_SYSTEM_VALUE: 3660 assert(index < Elements(t->systemValues)); 3661 return t->systemValues[index]; 3662 3663 default: 3664 debug_assert( 0 ); 3665 return ureg_src_undef(); 3666 } 3667} 3668 3669/** 3670 * Create a TGSI ureg_dst register from an st_dst_reg. 3671 */ 3672static struct ureg_dst 3673translate_dst( struct st_translate *t, 3674 const st_dst_reg *dst_reg, 3675 boolean saturate ) 3676{ 3677 struct ureg_dst dst = dst_register( t, 3678 dst_reg->file, 3679 dst_reg->index ); 3680 3681 dst = ureg_writemask( dst, 3682 dst_reg->writemask ); 3683 3684 if (saturate) 3685 dst = ureg_saturate( dst ); 3686 3687 if (dst_reg->reladdr != NULL) 3688 dst = ureg_dst_indirect( dst, ureg_src(t->address[0]) ); 3689 3690 return dst; 3691} 3692 3693/** 3694 * Create a TGSI ureg_src register from an st_src_reg. 3695 */ 3696static struct ureg_src 3697translate_src( struct st_translate *t, 3698 const st_src_reg *src_reg ) 3699{ 3700 struct ureg_src src = src_register( t, src_reg->file, src_reg->index ); 3701 3702 src = ureg_swizzle( src, 3703 GET_SWZ( src_reg->swizzle, 0 ) & 0x3, 3704 GET_SWZ( src_reg->swizzle, 1 ) & 0x3, 3705 GET_SWZ( src_reg->swizzle, 2 ) & 0x3, 3706 GET_SWZ( src_reg->swizzle, 3 ) & 0x3); 3707 3708 if ((src_reg->negate & 0xf) == NEGATE_XYZW) 3709 src = ureg_negate(src); 3710 3711 if (src_reg->reladdr != NULL) { 3712 /* Normally ureg_src_indirect() would be used here, but a stupid compiler 3713 * bug in g++ makes ureg_src_indirect (an inline C function) erroneously 3714 * set the bit for src.Negate. So we have to do the operation manually 3715 * here to work around the compiler's problems. */ 3716 /*src = ureg_src_indirect(src, ureg_src(t->address[0]));*/ 3717 struct ureg_src addr = ureg_src(t->address[0]); 3718 src.Indirect = 1; 3719 src.IndirectFile = addr.File; 3720 src.IndirectIndex = addr.Index; 3721 src.IndirectSwizzle = addr.SwizzleX; 3722 3723 if (src_reg->file != PROGRAM_INPUT && 3724 src_reg->file != PROGRAM_OUTPUT) { 3725 /* If src_reg->index was negative, it was set to zero in 3726 * src_register(). Reassign it now. But don't do this 3727 * for input/output regs since they get remapped while 3728 * const buffers don't. 3729 */ 3730 src.Index = src_reg->index; 3731 } 3732 } 3733 3734 return src; 3735} 3736 3737static void 3738compile_tgsi_instruction(struct st_translate *t, 3739 const struct glsl_to_tgsi_instruction *inst) 3740{ 3741 struct ureg_program *ureg = t->ureg; 3742 GLuint i; 3743 struct ureg_dst dst[1]; 3744 struct ureg_src src[4]; 3745 unsigned num_dst; 3746 unsigned num_src; 3747 3748 num_dst = num_inst_dst_regs( inst->op ); 3749 num_src = num_inst_src_regs( inst->op ); 3750 3751 if (num_dst) 3752 dst[0] = translate_dst( t, 3753 &inst->dst, 3754 inst->saturate); 3755 3756 for (i = 0; i < num_src; i++) 3757 src[i] = translate_src( t, &inst->src[i] ); 3758 3759 switch( inst->op ) { 3760 case TGSI_OPCODE_BGNLOOP: 3761 case TGSI_OPCODE_CAL: 3762 case TGSI_OPCODE_ELSE: 3763 case TGSI_OPCODE_ENDLOOP: 3764 case TGSI_OPCODE_IF: 3765 debug_assert(num_dst == 0); 3766 ureg_label_insn( ureg, 3767 inst->op, 3768 src, num_src, 3769 get_label( t, 3770 inst->op == TGSI_OPCODE_CAL ? inst->function->sig_id : 0 )); 3771 return; 3772 3773 case TGSI_OPCODE_TEX: 3774 case TGSI_OPCODE_TXB: 3775 case TGSI_OPCODE_TXD: 3776 case TGSI_OPCODE_TXL: 3777 case TGSI_OPCODE_TXP: 3778 src[num_src++] = t->samplers[inst->sampler]; 3779 ureg_tex_insn( ureg, 3780 inst->op, 3781 dst, num_dst, 3782 translate_texture_target( inst->tex_target, 3783 inst->tex_shadow ), 3784 src, num_src ); 3785 return; 3786 3787 case TGSI_OPCODE_SCS: 3788 dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XY ); 3789 ureg_insn( ureg, 3790 inst->op, 3791 dst, num_dst, 3792 src, num_src ); 3793 break; 3794 3795 default: 3796 ureg_insn( ureg, 3797 inst->op, 3798 dst, num_dst, 3799 src, num_src ); 3800 break; 3801 } 3802} 3803 3804/** 3805 * Emit the TGSI instructions to adjust the WPOS pixel center convention 3806 * Basically, add (adjX, adjY) to the fragment position. 3807 */ 3808static void 3809emit_adjusted_wpos( struct st_translate *t, 3810 const struct gl_program *program, 3811 GLfloat adjX, GLfloat adjY) 3812{ 3813 struct ureg_program *ureg = t->ureg; 3814 struct ureg_dst wpos_temp = ureg_DECL_temporary(ureg); 3815 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]]; 3816 3817 /* Note that we bias X and Y and pass Z and W through unchanged. 3818 * The shader might also use gl_FragCoord.w and .z. 3819 */ 3820 ureg_ADD(ureg, wpos_temp, wpos_input, 3821 ureg_imm4f(ureg, adjX, adjY, 0.0f, 0.0f)); 3822 3823 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp); 3824} 3825 3826 3827/** 3828 * Emit the TGSI instructions for inverting the WPOS y coordinate. 3829 * This code is unavoidable because it also depends on whether 3830 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM). 3831 */ 3832static void 3833emit_wpos_inversion( struct st_translate *t, 3834 const struct gl_program *program, 3835 boolean invert) 3836{ 3837 struct ureg_program *ureg = t->ureg; 3838 3839 /* Fragment program uses fragment position input. 3840 * Need to replace instances of INPUT[WPOS] with temp T 3841 * where T = INPUT[WPOS] by y is inverted. 3842 */ 3843 static const gl_state_index wposTransformState[STATE_LENGTH] 3844 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 3845 (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 }; 3846 3847 /* XXX: note we are modifying the incoming shader here! Need to 3848 * do this before emitting the constant decls below, or this 3849 * will be missed: 3850 */ 3851 unsigned wposTransConst = _mesa_add_state_reference(program->Parameters, 3852 wposTransformState); 3853 3854 struct ureg_src wpostrans = ureg_DECL_constant( ureg, wposTransConst ); 3855 struct ureg_dst wpos_temp; 3856 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]]; 3857 3858 /* MOV wpos_temp, input[wpos] 3859 */ 3860 if (wpos_input.File == TGSI_FILE_TEMPORARY) 3861 wpos_temp = ureg_dst(wpos_input); 3862 else { 3863 wpos_temp = ureg_DECL_temporary( ureg ); 3864 ureg_MOV( ureg, wpos_temp, wpos_input ); 3865 } 3866 3867 if (invert) { 3868 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy 3869 */ 3870 ureg_MAD( ureg, 3871 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ), 3872 wpos_input, 3873 ureg_scalar(wpostrans, 0), 3874 ureg_scalar(wpostrans, 1)); 3875 } else { 3876 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww 3877 */ 3878 ureg_MAD( ureg, 3879 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ), 3880 wpos_input, 3881 ureg_scalar(wpostrans, 2), 3882 ureg_scalar(wpostrans, 3)); 3883 } 3884 3885 /* Use wpos_temp as position input from here on: 3886 */ 3887 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp); 3888} 3889 3890 3891/** 3892 * Emit fragment position/ooordinate code. 3893 */ 3894static void 3895emit_wpos(struct st_context *st, 3896 struct st_translate *t, 3897 const struct gl_program *program, 3898 struct ureg_program *ureg) 3899{ 3900 const struct gl_fragment_program *fp = 3901 (const struct gl_fragment_program *) program; 3902 struct pipe_screen *pscreen = st->pipe->screen; 3903 boolean invert = FALSE; 3904 3905 if (fp->OriginUpperLeft) { 3906 /* Fragment shader wants origin in upper-left */ 3907 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) { 3908 /* the driver supports upper-left origin */ 3909 } 3910 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) { 3911 /* the driver supports lower-left origin, need to invert Y */ 3912 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT); 3913 invert = TRUE; 3914 } 3915 else 3916 assert(0); 3917 } 3918 else { 3919 /* Fragment shader wants origin in lower-left */ 3920 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) 3921 /* the driver supports lower-left origin */ 3922 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT); 3923 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) 3924 /* the driver supports upper-left origin, need to invert Y */ 3925 invert = TRUE; 3926 else 3927 assert(0); 3928 } 3929 3930 if (fp->PixelCenterInteger) { 3931 /* Fragment shader wants pixel center integer */ 3932 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) 3933 /* the driver supports pixel center integer */ 3934 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER); 3935 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) 3936 /* the driver supports pixel center half integer, need to bias X,Y */ 3937 emit_adjusted_wpos(t, program, 0.5f, invert ? 0.5f : -0.5f); 3938 else 3939 assert(0); 3940 } 3941 else { 3942 /* Fragment shader wants pixel center half integer */ 3943 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) { 3944 /* the driver supports pixel center half integer */ 3945 } 3946 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) { 3947 /* the driver supports pixel center integer, need to bias X,Y */ 3948 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER); 3949 emit_adjusted_wpos(t, program, 0.5f, invert ? -0.5f : 0.5f); 3950 } 3951 else 3952 assert(0); 3953 } 3954 3955 /* we invert after adjustment so that we avoid the MOV to temporary, 3956 * and reuse the adjustment ADD instead */ 3957 emit_wpos_inversion(t, program, invert); 3958} 3959 3960/** 3961 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back. 3962 * TGSI uses +1 for front, -1 for back. 3963 * This function converts the TGSI value to the GL value. Simply clamping/ 3964 * saturating the value to [0,1] does the job. 3965 */ 3966static void 3967emit_face_var(struct st_translate *t) 3968{ 3969 struct ureg_program *ureg = t->ureg; 3970 struct ureg_dst face_temp = ureg_DECL_temporary(ureg); 3971 struct ureg_src face_input = t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]]; 3972 3973 /* MOV_SAT face_temp, input[face] */ 3974 face_temp = ureg_saturate(face_temp); 3975 ureg_MOV(ureg, face_temp, face_input); 3976 3977 /* Use face_temp as face input from here on: */ 3978 t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]] = ureg_src(face_temp); 3979} 3980 3981static void 3982emit_edgeflags(struct st_translate *t) 3983{ 3984 struct ureg_program *ureg = t->ureg; 3985 struct ureg_dst edge_dst = t->outputs[t->outputMapping[VERT_RESULT_EDGE]]; 3986 struct ureg_src edge_src = t->inputs[t->inputMapping[VERT_ATTRIB_EDGEFLAG]]; 3987 3988 ureg_MOV(ureg, edge_dst, edge_src); 3989} 3990 3991/** 3992 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format. 3993 * \param program the program to translate 3994 * \param numInputs number of input registers used 3995 * \param inputMapping maps Mesa fragment program inputs to TGSI generic 3996 * input indexes 3997 * \param inputSemanticName the TGSI_SEMANTIC flag for each input 3998 * \param inputSemanticIndex the semantic index (ex: which texcoord) for 3999 * each input 4000 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input 4001 * \param numOutputs number of output registers used 4002 * \param outputMapping maps Mesa fragment program outputs to TGSI 4003 * generic outputs 4004 * \param outputSemanticName the TGSI_SEMANTIC flag for each output 4005 * \param outputSemanticIndex the semantic index (ex: which texcoord) for 4006 * each output 4007 * 4008 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY 4009 */ 4010extern "C" enum pipe_error 4011st_translate_program( 4012 struct gl_context *ctx, 4013 uint procType, 4014 struct ureg_program *ureg, 4015 glsl_to_tgsi_visitor *program, 4016 const struct gl_program *proginfo, 4017 GLuint numInputs, 4018 const GLuint inputMapping[], 4019 const ubyte inputSemanticName[], 4020 const ubyte inputSemanticIndex[], 4021 const GLuint interpMode[], 4022 GLuint numOutputs, 4023 const GLuint outputMapping[], 4024 const ubyte outputSemanticName[], 4025 const ubyte outputSemanticIndex[], 4026 boolean passthrough_edgeflags ) 4027{ 4028 struct st_translate translate, *t; 4029 unsigned i; 4030 enum pipe_error ret = PIPE_OK; 4031 4032 assert(numInputs <= Elements(t->inputs)); 4033 assert(numOutputs <= Elements(t->outputs)); 4034 4035 t = &translate; 4036 memset(t, 0, sizeof *t); 4037 4038 t->procType = procType; 4039 t->inputMapping = inputMapping; 4040 t->outputMapping = outputMapping; 4041 t->ureg = ureg; 4042 t->pointSizeOutIndex = -1; 4043 t->prevInstWrotePointSize = GL_FALSE; 4044 4045 /* 4046 * Declare input attributes. 4047 */ 4048 if (procType == TGSI_PROCESSOR_FRAGMENT) { 4049 for (i = 0; i < numInputs; i++) { 4050 t->inputs[i] = ureg_DECL_fs_input(ureg, 4051 inputSemanticName[i], 4052 inputSemanticIndex[i], 4053 interpMode[i]); 4054 } 4055 4056 if (proginfo->InputsRead & FRAG_BIT_WPOS) { 4057 /* Must do this after setting up t->inputs, and before 4058 * emitting constant references, below: 4059 */ 4060 emit_wpos(st_context(ctx), t, proginfo, ureg); 4061 } 4062 4063 if (proginfo->InputsRead & FRAG_BIT_FACE) 4064 emit_face_var(t); 4065 4066 /* 4067 * Declare output attributes. 4068 */ 4069 for (i = 0; i < numOutputs; i++) { 4070 switch (outputSemanticName[i]) { 4071 case TGSI_SEMANTIC_POSITION: 4072 t->outputs[i] = ureg_DECL_output( ureg, 4073 TGSI_SEMANTIC_POSITION, /* Z / Depth */ 4074 outputSemanticIndex[i] ); 4075 4076 t->outputs[i] = ureg_writemask( t->outputs[i], 4077 TGSI_WRITEMASK_Z ); 4078 break; 4079 case TGSI_SEMANTIC_STENCIL: 4080 t->outputs[i] = ureg_DECL_output( ureg, 4081 TGSI_SEMANTIC_STENCIL, /* Stencil */ 4082 outputSemanticIndex[i] ); 4083 t->outputs[i] = ureg_writemask( t->outputs[i], 4084 TGSI_WRITEMASK_Y ); 4085 break; 4086 case TGSI_SEMANTIC_COLOR: 4087 t->outputs[i] = ureg_DECL_output( ureg, 4088 TGSI_SEMANTIC_COLOR, 4089 outputSemanticIndex[i] ); 4090 break; 4091 default: 4092 debug_assert(0); 4093 return PIPE_ERROR_BAD_INPUT; 4094 } 4095 } 4096 } 4097 else if (procType == TGSI_PROCESSOR_GEOMETRY) { 4098 for (i = 0; i < numInputs; i++) { 4099 t->inputs[i] = ureg_DECL_gs_input(ureg, 4100 i, 4101 inputSemanticName[i], 4102 inputSemanticIndex[i]); 4103 } 4104 4105 for (i = 0; i < numOutputs; i++) { 4106 t->outputs[i] = ureg_DECL_output( ureg, 4107 outputSemanticName[i], 4108 outputSemanticIndex[i] ); 4109 } 4110 } 4111 else { 4112 assert(procType == TGSI_PROCESSOR_VERTEX); 4113 4114 for (i = 0; i < numInputs; i++) { 4115 t->inputs[i] = ureg_DECL_vs_input(ureg, i); 4116 } 4117 4118 for (i = 0; i < numOutputs; i++) { 4119 t->outputs[i] = ureg_DECL_output( ureg, 4120 outputSemanticName[i], 4121 outputSemanticIndex[i] ); 4122 if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) { 4123 /* Writing to the point size result register requires special 4124 * handling to implement clamping. 4125 */ 4126 static const gl_state_index pointSizeClampState[STATE_LENGTH] 4127 = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 }; 4128 /* XXX: note we are modifying the incoming shader here! Need to 4129 * do this before emitting the constant decls below, or this 4130 * will be missed. 4131 */ 4132 unsigned pointSizeClampConst = 4133 _mesa_add_state_reference(proginfo->Parameters, 4134 pointSizeClampState); 4135 struct ureg_dst psizregtemp = ureg_DECL_temporary( ureg ); 4136 t->pointSizeConst = ureg_DECL_constant( ureg, pointSizeClampConst ); 4137 t->pointSizeResult = t->outputs[i]; 4138 t->pointSizeOutIndex = i; 4139 t->outputs[i] = psizregtemp; 4140 } 4141 } 4142 if (passthrough_edgeflags) 4143 emit_edgeflags(t); 4144 } 4145 4146 /* Declare address register. 4147 */ 4148 if (program->num_address_regs > 0) { 4149 debug_assert( program->num_address_regs == 1 ); 4150 t->address[0] = ureg_DECL_address( ureg ); 4151 } 4152 4153 /* Declare misc input registers 4154 */ 4155 { 4156 GLbitfield sysInputs = proginfo->SystemValuesRead; 4157 unsigned numSys = 0; 4158 for (i = 0; sysInputs; i++) { 4159 if (sysInputs & (1 << i)) { 4160 unsigned semName = mesa_sysval_to_semantic[i]; 4161 t->systemValues[i] = ureg_DECL_system_value(ureg, numSys, semName, 0); 4162 numSys++; 4163 sysInputs &= ~(1 << i); 4164 } 4165 } 4166 } 4167 4168 if (program->indirect_addr_temps) { 4169 /* If temps are accessed with indirect addressing, declare temporaries 4170 * in sequential order. Else, we declare them on demand elsewhere. 4171 * (Note: the number of temporaries is equal to program->next_temp) 4172 */ 4173 for (i = 0; i < (unsigned)program->next_temp; i++) { 4174 /* XXX use TGSI_FILE_TEMPORARY_ARRAY when it's supported by ureg */ 4175 t->temps[i] = ureg_DECL_temporary( t->ureg ); 4176 } 4177 } 4178 4179 /* Emit constants and immediates. Mesa uses a single index space 4180 * for these, so we put all the translated regs in t->constants. 4181 * XXX: this entire if block depends on proginfo->Parameters from Mesa IR 4182 */ 4183 if (proginfo->Parameters) { 4184 t->constants = (struct ureg_src *)CALLOC( proginfo->Parameters->NumParameters * sizeof t->constants[0] ); 4185 if (t->constants == NULL) { 4186 ret = PIPE_ERROR_OUT_OF_MEMORY; 4187 goto out; 4188 } 4189 4190 for (i = 0; i < proginfo->Parameters->NumParameters; i++) { 4191 switch (proginfo->Parameters->Parameters[i].Type) { 4192 case PROGRAM_ENV_PARAM: 4193 case PROGRAM_LOCAL_PARAM: 4194 case PROGRAM_STATE_VAR: 4195 case PROGRAM_NAMED_PARAM: 4196 case PROGRAM_UNIFORM: 4197 t->constants[i] = ureg_DECL_constant( ureg, i ); 4198 break; 4199 4200 /* Emit immediates only when there's no indirect addressing of 4201 * the const buffer. 4202 * FIXME: Be smarter and recognize param arrays: 4203 * indirect addressing is only valid within the referenced 4204 * array. 4205 */ 4206 case PROGRAM_CONSTANT: 4207 if (program->indirect_addr_consts) 4208 t->constants[i] = ureg_DECL_constant( ureg, i ); 4209 else 4210 switch(proginfo->Parameters->Parameters[i].DataType) 4211 { 4212 case GL_FLOAT: 4213 case GL_FLOAT_VEC2: 4214 case GL_FLOAT_VEC3: 4215 case GL_FLOAT_VEC4: 4216 t->constants[i] = ureg_DECL_immediate(ureg, (float *)proginfo->Parameters->ParameterValues[i], 4); 4217 break; 4218 case GL_INT: 4219 case GL_INT_VEC2: 4220 case GL_INT_VEC3: 4221 case GL_INT_VEC4: 4222 t->constants[i] = ureg_DECL_immediate_int(ureg, (int *)proginfo->Parameters->ParameterValues[i], 4); 4223 break; 4224 case GL_UNSIGNED_INT: 4225 case GL_UNSIGNED_INT_VEC2: 4226 case GL_UNSIGNED_INT_VEC3: 4227 case GL_UNSIGNED_INT_VEC4: 4228 case GL_BOOL: 4229 case GL_BOOL_VEC2: 4230 case GL_BOOL_VEC3: 4231 case GL_BOOL_VEC4: 4232 t->constants[i] = ureg_DECL_immediate_uint(ureg, (unsigned *)proginfo->Parameters->ParameterValues[i], 4); 4233 break; 4234 default: 4235 assert(!"should not get here"); 4236 } 4237 break; 4238 default: 4239 break; 4240 } 4241 } 4242 } 4243 4244 /* texture samplers */ 4245 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { 4246 if (program->samplers_used & (1 << i)) { 4247 t->samplers[i] = ureg_DECL_sampler( ureg, i ); 4248 } 4249 } 4250 4251 /* Emit each instruction in turn: 4252 */ 4253 foreach_iter(exec_list_iterator, iter, program->instructions) { 4254 set_insn_start( t, ureg_get_instruction_number( ureg )); 4255 compile_tgsi_instruction( t, (glsl_to_tgsi_instruction *)iter.get() ); 4256 4257 if (t->prevInstWrotePointSize && proginfo->Id) { 4258 /* The previous instruction wrote to the (fake) vertex point size 4259 * result register. Now we need to clamp that value to the min/max 4260 * point size range, putting the result into the real point size 4261 * register. 4262 * Note that we can't do this easily at the end of program due to 4263 * possible early return. 4264 */ 4265 set_insn_start( t, ureg_get_instruction_number( ureg )); 4266 ureg_MAX( t->ureg, 4267 ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X), 4268 ureg_src(t->outputs[t->pointSizeOutIndex]), 4269 ureg_swizzle(t->pointSizeConst, 1,1,1,1)); 4270 ureg_MIN( t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X), 4271 ureg_src(t->outputs[t->pointSizeOutIndex]), 4272 ureg_swizzle(t->pointSizeConst, 2,2,2,2)); 4273 } 4274 t->prevInstWrotePointSize = GL_FALSE; 4275 } 4276 4277 /* Fix up all emitted labels: 4278 */ 4279 for (i = 0; i < t->labels_count; i++) { 4280 ureg_fixup_label( ureg, 4281 t->labels[i].token, 4282 t->insn[t->labels[i].branch_target] ); 4283 } 4284 4285out: 4286 FREE(t->insn); 4287 FREE(t->labels); 4288 FREE(t->constants); 4289 4290 if (t->error) { 4291 debug_printf("%s: translate error flag set\n", __FUNCTION__); 4292 } 4293 4294 return ret; 4295} 4296/* ----------------------------- End TGSI code ------------------------------ */ 4297 4298/** 4299 * Convert a shader's GLSL IR into a Mesa gl_program, although without 4300 * generating Mesa IR. 4301 */ 4302static struct gl_program * 4303get_mesa_program(struct gl_context *ctx, 4304 struct gl_shader_program *shader_program, 4305 struct gl_shader *shader) 4306{ 4307 glsl_to_tgsi_visitor* v = new glsl_to_tgsi_visitor(); 4308 struct gl_program *prog; 4309 GLenum target; 4310 const char *target_string; 4311 GLboolean progress; 4312 struct gl_shader_compiler_options *options = 4313 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)]; 4314 4315 switch (shader->Type) { 4316 case GL_VERTEX_SHADER: 4317 target = GL_VERTEX_PROGRAM_ARB; 4318 target_string = "vertex"; 4319 break; 4320 case GL_FRAGMENT_SHADER: 4321 target = GL_FRAGMENT_PROGRAM_ARB; 4322 target_string = "fragment"; 4323 break; 4324 case GL_GEOMETRY_SHADER: 4325 target = GL_GEOMETRY_PROGRAM_NV; 4326 target_string = "geometry"; 4327 break; 4328 default: 4329 assert(!"should not be reached"); 4330 return NULL; 4331 } 4332 4333 validate_ir_tree(shader->ir); 4334 4335 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name); 4336 if (!prog) 4337 return NULL; 4338 prog->Parameters = _mesa_new_parameter_list(); 4339 prog->Varying = _mesa_new_parameter_list(); 4340 prog->Attributes = _mesa_new_parameter_list(); 4341 v->ctx = ctx; 4342 v->prog = prog; 4343 v->shader_program = shader_program; 4344 v->options = options; 4345 v->glsl_version = ctx->Const.GLSLVersion; 4346 4347 add_uniforms_to_parameters_list(shader_program, shader, prog); 4348 4349 /* Emit intermediate IR for main(). */ 4350 visit_exec_list(shader->ir, v); 4351 4352 /* Now emit bodies for any functions that were used. */ 4353 do { 4354 progress = GL_FALSE; 4355 4356 foreach_iter(exec_list_iterator, iter, v->function_signatures) { 4357 function_entry *entry = (function_entry *)iter.get(); 4358 4359 if (!entry->bgn_inst) { 4360 v->current_function = entry; 4361 4362 entry->bgn_inst = v->emit(NULL, TGSI_OPCODE_BGNSUB); 4363 entry->bgn_inst->function = entry; 4364 4365 visit_exec_list(&entry->sig->body, v); 4366 4367 glsl_to_tgsi_instruction *last; 4368 last = (glsl_to_tgsi_instruction *)v->instructions.get_tail(); 4369 if (last->op != TGSI_OPCODE_RET) 4370 v->emit(NULL, TGSI_OPCODE_RET); 4371 4372 glsl_to_tgsi_instruction *end; 4373 end = v->emit(NULL, TGSI_OPCODE_ENDSUB); 4374 end->function = entry; 4375 4376 progress = GL_TRUE; 4377 } 4378 } 4379 } while (progress); 4380 4381#if 0 4382 /* Print out some information (for debugging purposes) used by the 4383 * optimization passes. */ 4384 for (i=0; i < v->next_temp; i++) { 4385 int fr = v->get_first_temp_read(i); 4386 int fw = v->get_first_temp_write(i); 4387 int lr = v->get_last_temp_read(i); 4388 int lw = v->get_last_temp_write(i); 4389 4390 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, fr, fw, lr, lw); 4391 assert(fw <= fr); 4392 } 4393#endif 4394 4395 /* Remove reads to output registers, and to varyings in vertex shaders. */ 4396 v->remove_output_reads(PROGRAM_OUTPUT); 4397 if (target == GL_VERTEX_PROGRAM_ARB) 4398 v->remove_output_reads(PROGRAM_VARYING); 4399 4400 /* Perform the simplify_cmp optimization, which is required by r300g. */ 4401 v->simplify_cmp(); 4402 4403 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. 4404 * FIXME: These passes to optimize temporary registers don't work when there 4405 * is indirect addressing of the temporary register space. We need proper 4406 * array support so that we don't have to give up these passes in every 4407 * shader that uses arrays. 4408 */ 4409 if (!v->indirect_addr_temps) { 4410 v->copy_propagate(); 4411 while (v->eliminate_dead_code_advanced()); 4412 v->eliminate_dead_code(); 4413 v->merge_registers(); 4414 v->renumber_registers(); 4415 } 4416 4417 /* Write the END instruction. */ 4418 v->emit(NULL, TGSI_OPCODE_END); 4419 4420 if (ctx->Shader.Flags & GLSL_DUMP) { 4421 printf("\n"); 4422 printf("GLSL IR for linked %s program %d:\n", target_string, 4423 shader_program->Name); 4424 _mesa_print_ir(shader->ir, NULL); 4425 printf("\n"); 4426 printf("\n"); 4427 } 4428 4429 prog->Instructions = NULL; 4430 prog->NumInstructions = 0; 4431 4432 do_set_program_inouts(shader->ir, prog); 4433 count_resources(v, prog); 4434 4435 check_resources(ctx, shader_program, v, prog); 4436 4437 _mesa_reference_program(ctx, &shader->Program, prog); 4438 4439 struct st_vertex_program *stvp; 4440 struct st_fragment_program *stfp; 4441 struct st_geometry_program *stgp; 4442 4443 switch (shader->Type) { 4444 case GL_VERTEX_SHADER: 4445 stvp = (struct st_vertex_program *)prog; 4446 stvp->glsl_to_tgsi = v; 4447 break; 4448 case GL_FRAGMENT_SHADER: 4449 stfp = (struct st_fragment_program *)prog; 4450 stfp->glsl_to_tgsi = v; 4451 break; 4452 case GL_GEOMETRY_SHADER: 4453 stgp = (struct st_geometry_program *)prog; 4454 stgp->glsl_to_tgsi = v; 4455 break; 4456 default: 4457 assert(!"should not be reached"); 4458 return NULL; 4459 } 4460 4461 return prog; 4462} 4463 4464extern "C" { 4465 4466struct gl_shader * 4467st_new_shader(struct gl_context *ctx, GLuint name, GLuint type) 4468{ 4469 struct gl_shader *shader; 4470 assert(type == GL_FRAGMENT_SHADER || type == GL_VERTEX_SHADER || 4471 type == GL_GEOMETRY_SHADER_ARB); 4472 shader = rzalloc(NULL, struct gl_shader); 4473 if (shader) { 4474 shader->Type = type; 4475 shader->Name = name; 4476 _mesa_init_shader(ctx, shader); 4477 } 4478 return shader; 4479} 4480 4481struct gl_shader_program * 4482st_new_shader_program(struct gl_context *ctx, GLuint name) 4483{ 4484 struct gl_shader_program *shProg; 4485 shProg = rzalloc(NULL, struct gl_shader_program); 4486 if (shProg) { 4487 shProg->Name = name; 4488 _mesa_init_shader_program(ctx, shProg); 4489 } 4490 return shProg; 4491} 4492 4493/** 4494 * Link a shader. 4495 * Called via ctx->Driver.LinkShader() 4496 * This actually involves converting GLSL IR into an intermediate TGSI-like IR 4497 * with code lowering and other optimizations. 4498 */ 4499GLboolean 4500st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog) 4501{ 4502 assert(prog->LinkStatus); 4503 4504 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { 4505 if (prog->_LinkedShaders[i] == NULL) 4506 continue; 4507 4508 bool progress; 4509 exec_list *ir = prog->_LinkedShaders[i]->ir; 4510 const struct gl_shader_compiler_options *options = 4511 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)]; 4512 4513 do { 4514 progress = false; 4515 4516 /* Lowering */ 4517 do_mat_op_to_vec(ir); 4518 lower_instructions(ir, (MOD_TO_FRACT | DIV_TO_MUL_RCP | EXP_TO_EXP2 4519 | LOG_TO_LOG2 4520 | ((options->EmitNoPow) ? POW_TO_EXP2 : 0))); 4521 4522 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress; 4523 4524 progress = do_common_optimization(ir, true, options->MaxUnrollIterations) || progress; 4525 4526 progress = lower_quadop_vector(ir, true) || progress; 4527 4528 if (options->EmitNoIfs) { 4529 progress = lower_discard(ir) || progress; 4530 progress = lower_if_to_cond_assign(ir) || progress; 4531 } 4532 4533 if (options->EmitNoNoise) 4534 progress = lower_noise(ir) || progress; 4535 4536 /* If there are forms of indirect addressing that the driver 4537 * cannot handle, perform the lowering pass. 4538 */ 4539 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput 4540 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform) 4541 progress = 4542 lower_variable_index_to_cond_assign(ir, 4543 options->EmitNoIndirectInput, 4544 options->EmitNoIndirectOutput, 4545 options->EmitNoIndirectTemp, 4546 options->EmitNoIndirectUniform) 4547 || progress; 4548 4549 progress = do_vec_index_to_cond_assign(ir) || progress; 4550 } while (progress); 4551 4552 validate_ir_tree(ir); 4553 } 4554 4555 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { 4556 struct gl_program *linked_prog; 4557 4558 if (prog->_LinkedShaders[i] == NULL) 4559 continue; 4560 4561 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i]); 4562 4563 if (linked_prog) { 4564 bool ok = true; 4565 4566 switch (prog->_LinkedShaders[i]->Type) { 4567 case GL_VERTEX_SHADER: 4568 _mesa_reference_vertprog(ctx, &prog->VertexProgram, 4569 (struct gl_vertex_program *)linked_prog); 4570 ok = ctx->Driver.ProgramStringNotify(ctx, GL_VERTEX_PROGRAM_ARB, 4571 linked_prog); 4572 break; 4573 case GL_FRAGMENT_SHADER: 4574 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, 4575 (struct gl_fragment_program *)linked_prog); 4576 ok = ctx->Driver.ProgramStringNotify(ctx, GL_FRAGMENT_PROGRAM_ARB, 4577 linked_prog); 4578 break; 4579 case GL_GEOMETRY_SHADER: 4580 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, 4581 (struct gl_geometry_program *)linked_prog); 4582 ok = ctx->Driver.ProgramStringNotify(ctx, GL_GEOMETRY_PROGRAM_NV, 4583 linked_prog); 4584 break; 4585 } 4586 if (!ok) { 4587 return GL_FALSE; 4588 } 4589 } 4590 4591 _mesa_reference_program(ctx, &linked_prog, NULL); 4592 } 4593 4594 return GL_TRUE; 4595} 4596 4597 4598/** 4599 * Link a GLSL shader program. Called via glLinkProgram(). 4600 */ 4601void 4602st_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog) 4603{ 4604 unsigned int i; 4605 4606 _mesa_clear_shader_program_data(ctx, prog); 4607 4608 prog->LinkStatus = GL_TRUE; 4609 4610 for (i = 0; i < prog->NumShaders; i++) { 4611 if (!prog->Shaders[i]->CompileStatus) { 4612 fail_link(prog, "linking with uncompiled shader"); 4613 prog->LinkStatus = GL_FALSE; 4614 } 4615 } 4616 4617 prog->Varying = _mesa_new_parameter_list(); 4618 _mesa_reference_vertprog(ctx, &prog->VertexProgram, NULL); 4619 _mesa_reference_fragprog(ctx, &prog->FragmentProgram, NULL); 4620 _mesa_reference_geomprog(ctx, &prog->GeometryProgram, NULL); 4621 4622 if (prog->LinkStatus) { 4623 link_shaders(ctx, prog); 4624 } 4625 4626 if (prog->LinkStatus) { 4627 if (!ctx->Driver.LinkShader(ctx, prog)) { 4628 prog->LinkStatus = GL_FALSE; 4629 } 4630 } 4631 4632 set_uniform_initializers(ctx, prog); 4633 4634 if (ctx->Shader.Flags & GLSL_DUMP) { 4635 if (!prog->LinkStatus) { 4636 printf("GLSL shader program %d failed to link\n", prog->Name); 4637 } 4638 4639 if (prog->InfoLog && prog->InfoLog[0] != 0) { 4640 printf("GLSL shader program %d info log:\n", prog->Name); 4641 printf("%s\n", prog->InfoLog); 4642 } 4643 } 4644} 4645 4646} /* extern "C" */ 4647