st_glsl_to_tgsi.cpp revision fdae0eaf222f271bfbc7e71d8561eb8b90685ae5
1/* 2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved. 3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved. 4 * Copyright © 2010 Intel Corporation 5 * Copyright © 2011 Bryan Cain 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the next 15 * paragraph) shall be included in all copies or substantial portions of the 16 * Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 24 * DEALINGS IN THE SOFTWARE. 25 */ 26 27/** 28 * \file glsl_to_tgsi.cpp 29 * 30 * Translate GLSL IR to TGSI. 31 */ 32 33#include <stdio.h> 34#include "main/compiler.h" 35#include "ir.h" 36#include "ir_visitor.h" 37#include "ir_print_visitor.h" 38#include "ir_expression_flattening.h" 39#include "glsl_types.h" 40#include "glsl_parser_extras.h" 41#include "../glsl/program.h" 42#include "ir_optimization.h" 43#include "ast.h" 44 45#include "main/mtypes.h" 46#include "main/shaderobj.h" 47#include "program/hash_table.h" 48 49extern "C" { 50#include "main/shaderapi.h" 51#include "main/uniforms.h" 52#include "program/prog_instruction.h" 53#include "program/prog_optimize.h" 54#include "program/prog_print.h" 55#include "program/program.h" 56#include "program/prog_parameter.h" 57#include "program/sampler.h" 58 59#include "pipe/p_compiler.h" 60#include "pipe/p_context.h" 61#include "pipe/p_screen.h" 62#include "pipe/p_shader_tokens.h" 63#include "pipe/p_state.h" 64#include "util/u_math.h" 65#include "tgsi/tgsi_ureg.h" 66#include "tgsi/tgsi_info.h" 67#include "st_context.h" 68#include "st_program.h" 69#include "st_glsl_to_tgsi.h" 70#include "st_mesa_to_tgsi.h" 71} 72 73#define PROGRAM_IMMEDIATE PROGRAM_FILE_MAX 74#define PROGRAM_ANY_CONST ((1 << PROGRAM_LOCAL_PARAM) | \ 75 (1 << PROGRAM_ENV_PARAM) | \ 76 (1 << PROGRAM_STATE_VAR) | \ 77 (1 << PROGRAM_NAMED_PARAM) | \ 78 (1 << PROGRAM_CONSTANT) | \ 79 (1 << PROGRAM_UNIFORM)) 80 81/** 82 * Maximum number of temporary registers. 83 * 84 * It is too big for stack allocated arrays -- it will cause stack overflow on 85 * Windows and likely Mac OS X. 86 */ 87#define MAX_TEMPS 4096 88 89/* will be 4 for GLSL 4.00 */ 90#define MAX_GLSL_TEXTURE_OFFSET 1 91 92class st_src_reg; 93class st_dst_reg; 94 95static int swizzle_for_size(int size); 96 97/** 98 * This struct is a corresponding struct to TGSI ureg_src. 99 */ 100class st_src_reg { 101public: 102 st_src_reg(gl_register_file file, int index, const glsl_type *type) 103 { 104 this->file = file; 105 this->index = index; 106 if (type && (type->is_scalar() || type->is_vector() || type->is_matrix())) 107 this->swizzle = swizzle_for_size(type->vector_elements); 108 else 109 this->swizzle = SWIZZLE_XYZW; 110 this->negate = 0; 111 this->type = type ? type->base_type : GLSL_TYPE_ERROR; 112 this->reladdr = NULL; 113 } 114 115 st_src_reg(gl_register_file file, int index, int type) 116 { 117 this->type = type; 118 this->file = file; 119 this->index = index; 120 this->swizzle = SWIZZLE_XYZW; 121 this->negate = 0; 122 this->reladdr = NULL; 123 } 124 125 st_src_reg() 126 { 127 this->type = GLSL_TYPE_ERROR; 128 this->file = PROGRAM_UNDEFINED; 129 this->index = 0; 130 this->swizzle = 0; 131 this->negate = 0; 132 this->reladdr = NULL; 133 } 134 135 explicit st_src_reg(st_dst_reg reg); 136 137 gl_register_file file; /**< PROGRAM_* from Mesa */ 138 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */ 139 GLuint swizzle; /**< SWIZZLE_XYZWONEZERO swizzles from Mesa. */ 140 int negate; /**< NEGATE_XYZW mask from mesa */ 141 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */ 142 /** Register index should be offset by the integer in this reg. */ 143 st_src_reg *reladdr; 144}; 145 146class st_dst_reg { 147public: 148 st_dst_reg(gl_register_file file, int writemask, int type) 149 { 150 this->file = file; 151 this->index = 0; 152 this->writemask = writemask; 153 this->cond_mask = COND_TR; 154 this->reladdr = NULL; 155 this->type = type; 156 } 157 158 st_dst_reg() 159 { 160 this->type = GLSL_TYPE_ERROR; 161 this->file = PROGRAM_UNDEFINED; 162 this->index = 0; 163 this->writemask = 0; 164 this->cond_mask = COND_TR; 165 this->reladdr = NULL; 166 } 167 168 explicit st_dst_reg(st_src_reg reg); 169 170 gl_register_file file; /**< PROGRAM_* from Mesa */ 171 int index; /**< temporary index, VERT_ATTRIB_*, FRAG_ATTRIB_*, etc. */ 172 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */ 173 GLuint cond_mask:4; 174 int type; /** GLSL_TYPE_* from GLSL IR (enum glsl_base_type) */ 175 /** Register index should be offset by the integer in this reg. */ 176 st_src_reg *reladdr; 177}; 178 179st_src_reg::st_src_reg(st_dst_reg reg) 180{ 181 this->type = reg.type; 182 this->file = reg.file; 183 this->index = reg.index; 184 this->swizzle = SWIZZLE_XYZW; 185 this->negate = 0; 186 this->reladdr = reg.reladdr; 187} 188 189st_dst_reg::st_dst_reg(st_src_reg reg) 190{ 191 this->type = reg.type; 192 this->file = reg.file; 193 this->index = reg.index; 194 this->writemask = WRITEMASK_XYZW; 195 this->cond_mask = COND_TR; 196 this->reladdr = reg.reladdr; 197} 198 199class glsl_to_tgsi_instruction : public exec_node { 200public: 201 /* Callers of this ralloc-based new need not call delete. It's 202 * easier to just ralloc_free 'ctx' (or any of its ancestors). */ 203 static void* operator new(size_t size, void *ctx) 204 { 205 void *node; 206 207 node = rzalloc_size(ctx, size); 208 assert(node != NULL); 209 210 return node; 211 } 212 213 unsigned op; 214 st_dst_reg dst; 215 st_src_reg src[3]; 216 /** Pointer to the ir source this tree came from for debugging */ 217 ir_instruction *ir; 218 GLboolean cond_update; 219 bool saturate; 220 int sampler; /**< sampler index */ 221 int tex_target; /**< One of TEXTURE_*_INDEX */ 222 GLboolean tex_shadow; 223 struct tgsi_texture_offset tex_offsets[MAX_GLSL_TEXTURE_OFFSET]; 224 unsigned tex_offset_num_offset; 225 int dead_mask; /**< Used in dead code elimination */ 226 227 class function_entry *function; /* Set on TGSI_OPCODE_CAL or TGSI_OPCODE_BGNSUB */ 228}; 229 230class variable_storage : public exec_node { 231public: 232 variable_storage(ir_variable *var, gl_register_file file, int index) 233 : file(file), index(index), var(var) 234 { 235 /* empty */ 236 } 237 238 gl_register_file file; 239 int index; 240 ir_variable *var; /* variable that maps to this, if any */ 241}; 242 243class immediate_storage : public exec_node { 244public: 245 immediate_storage(gl_constant_value *values, int size, int type) 246 { 247 memcpy(this->values, values, size * sizeof(gl_constant_value)); 248 this->size = size; 249 this->type = type; 250 } 251 252 gl_constant_value values[4]; 253 int size; /**< Number of components (1-4) */ 254 int type; /**< GL_FLOAT, GL_INT, GL_BOOL, or GL_UNSIGNED_INT */ 255}; 256 257class function_entry : public exec_node { 258public: 259 ir_function_signature *sig; 260 261 /** 262 * identifier of this function signature used by the program. 263 * 264 * At the point that TGSI instructions for function calls are 265 * generated, we don't know the address of the first instruction of 266 * the function body. So we make the BranchTarget that is called a 267 * small integer and rewrite them during set_branchtargets(). 268 */ 269 int sig_id; 270 271 /** 272 * Pointer to first instruction of the function body. 273 * 274 * Set during function body emits after main() is processed. 275 */ 276 glsl_to_tgsi_instruction *bgn_inst; 277 278 /** 279 * Index of the first instruction of the function body in actual TGSI. 280 * 281 * Set after conversion from glsl_to_tgsi_instruction to TGSI. 282 */ 283 int inst; 284 285 /** Storage for the return value. */ 286 st_src_reg return_reg; 287}; 288 289class glsl_to_tgsi_visitor : public ir_visitor { 290public: 291 glsl_to_tgsi_visitor(); 292 ~glsl_to_tgsi_visitor(); 293 294 function_entry *current_function; 295 296 struct gl_context *ctx; 297 struct gl_program *prog; 298 struct gl_shader_program *shader_program; 299 struct gl_shader_compiler_options *options; 300 301 int next_temp; 302 303 int num_address_regs; 304 int samplers_used; 305 bool indirect_addr_temps; 306 bool indirect_addr_consts; 307 int num_clip_distances; 308 309 int glsl_version; 310 bool native_integers; 311 312 variable_storage *find_variable_storage(ir_variable *var); 313 314 int add_constant(gl_register_file file, gl_constant_value values[4], 315 int size, int datatype, GLuint *swizzle_out); 316 317 function_entry *get_function_signature(ir_function_signature *sig); 318 319 st_src_reg get_temp(const glsl_type *type); 320 void reladdr_to_temp(ir_instruction *ir, st_src_reg *reg, int *num_reladdr); 321 322 st_src_reg st_src_reg_for_float(float val); 323 st_src_reg st_src_reg_for_int(int val); 324 st_src_reg st_src_reg_for_type(int type, int val); 325 326 /** 327 * \name Visit methods 328 * 329 * As typical for the visitor pattern, there must be one \c visit method for 330 * each concrete subclass of \c ir_instruction. Virtual base classes within 331 * the hierarchy should not have \c visit methods. 332 */ 333 /*@{*/ 334 virtual void visit(ir_variable *); 335 virtual void visit(ir_loop *); 336 virtual void visit(ir_loop_jump *); 337 virtual void visit(ir_function_signature *); 338 virtual void visit(ir_function *); 339 virtual void visit(ir_expression *); 340 virtual void visit(ir_swizzle *); 341 virtual void visit(ir_dereference_variable *); 342 virtual void visit(ir_dereference_array *); 343 virtual void visit(ir_dereference_record *); 344 virtual void visit(ir_assignment *); 345 virtual void visit(ir_constant *); 346 virtual void visit(ir_call *); 347 virtual void visit(ir_return *); 348 virtual void visit(ir_discard *); 349 virtual void visit(ir_texture *); 350 virtual void visit(ir_if *); 351 /*@}*/ 352 353 st_src_reg result; 354 355 /** List of variable_storage */ 356 exec_list variables; 357 358 /** List of immediate_storage */ 359 exec_list immediates; 360 unsigned num_immediates; 361 362 /** List of function_entry */ 363 exec_list function_signatures; 364 int next_signature_id; 365 366 /** List of glsl_to_tgsi_instruction */ 367 exec_list instructions; 368 369 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op); 370 371 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 372 st_dst_reg dst, st_src_reg src0); 373 374 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 375 st_dst_reg dst, st_src_reg src0, st_src_reg src1); 376 377 glsl_to_tgsi_instruction *emit(ir_instruction *ir, unsigned op, 378 st_dst_reg dst, 379 st_src_reg src0, st_src_reg src1, st_src_reg src2); 380 381 unsigned get_opcode(ir_instruction *ir, unsigned op, 382 st_dst_reg dst, 383 st_src_reg src0, st_src_reg src1); 384 385 /** 386 * Emit the correct dot-product instruction for the type of arguments 387 */ 388 glsl_to_tgsi_instruction *emit_dp(ir_instruction *ir, 389 st_dst_reg dst, 390 st_src_reg src0, 391 st_src_reg src1, 392 unsigned elements); 393 394 void emit_scalar(ir_instruction *ir, unsigned op, 395 st_dst_reg dst, st_src_reg src0); 396 397 void emit_scalar(ir_instruction *ir, unsigned op, 398 st_dst_reg dst, st_src_reg src0, st_src_reg src1); 399 400 void try_emit_float_set(ir_instruction *ir, unsigned op, st_dst_reg dst); 401 402 void emit_arl(ir_instruction *ir, st_dst_reg dst, st_src_reg src0); 403 404 void emit_scs(ir_instruction *ir, unsigned op, 405 st_dst_reg dst, const st_src_reg &src); 406 407 bool try_emit_mad(ir_expression *ir, 408 int mul_operand); 409 bool try_emit_mad_for_and_not(ir_expression *ir, 410 int mul_operand); 411 bool try_emit_sat(ir_expression *ir); 412 413 void emit_swz(ir_expression *ir); 414 415 bool process_move_condition(ir_rvalue *ir); 416 417 void simplify_cmp(void); 418 419 void rename_temp_register(int index, int new_index); 420 int get_first_temp_read(int index); 421 int get_first_temp_write(int index); 422 int get_last_temp_read(int index); 423 int get_last_temp_write(int index); 424 425 void copy_propagate(void); 426 void eliminate_dead_code(void); 427 int eliminate_dead_code_advanced(void); 428 void merge_registers(void); 429 void renumber_registers(void); 430 431 void *mem_ctx; 432}; 433 434static st_src_reg undef_src = st_src_reg(PROGRAM_UNDEFINED, 0, GLSL_TYPE_ERROR); 435 436static st_dst_reg undef_dst = st_dst_reg(PROGRAM_UNDEFINED, SWIZZLE_NOOP, GLSL_TYPE_ERROR); 437 438static st_dst_reg address_reg = st_dst_reg(PROGRAM_ADDRESS, WRITEMASK_X, GLSL_TYPE_FLOAT); 439 440static void 441fail_link(struct gl_shader_program *prog, const char *fmt, ...) PRINTFLIKE(2, 3); 442 443static void 444fail_link(struct gl_shader_program *prog, const char *fmt, ...) 445{ 446 va_list args; 447 va_start(args, fmt); 448 ralloc_vasprintf_append(&prog->InfoLog, fmt, args); 449 va_end(args); 450 451 prog->LinkStatus = GL_FALSE; 452} 453 454static int 455swizzle_for_size(int size) 456{ 457 int size_swizzles[4] = { 458 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X), 459 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y), 460 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z), 461 MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W), 462 }; 463 464 assert((size >= 1) && (size <= 4)); 465 return size_swizzles[size - 1]; 466} 467 468static bool 469is_tex_instruction(unsigned opcode) 470{ 471 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 472 return info->is_tex; 473} 474 475static unsigned 476num_inst_dst_regs(unsigned opcode) 477{ 478 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 479 return info->num_dst; 480} 481 482static unsigned 483num_inst_src_regs(unsigned opcode) 484{ 485 const tgsi_opcode_info* info = tgsi_get_opcode_info(opcode); 486 return info->is_tex ? info->num_src - 1 : info->num_src; 487} 488 489glsl_to_tgsi_instruction * 490glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 491 st_dst_reg dst, 492 st_src_reg src0, st_src_reg src1, st_src_reg src2) 493{ 494 glsl_to_tgsi_instruction *inst = new(mem_ctx) glsl_to_tgsi_instruction(); 495 int num_reladdr = 0, i; 496 497 op = get_opcode(ir, op, dst, src0, src1); 498 499 /* If we have to do relative addressing, we want to load the ARL 500 * reg directly for one of the regs, and preload the other reladdr 501 * sources into temps. 502 */ 503 num_reladdr += dst.reladdr != NULL; 504 num_reladdr += src0.reladdr != NULL; 505 num_reladdr += src1.reladdr != NULL; 506 num_reladdr += src2.reladdr != NULL; 507 508 reladdr_to_temp(ir, &src2, &num_reladdr); 509 reladdr_to_temp(ir, &src1, &num_reladdr); 510 reladdr_to_temp(ir, &src0, &num_reladdr); 511 512 if (dst.reladdr) { 513 emit_arl(ir, address_reg, *dst.reladdr); 514 num_reladdr--; 515 } 516 assert(num_reladdr == 0); 517 518 inst->op = op; 519 inst->dst = dst; 520 inst->src[0] = src0; 521 inst->src[1] = src1; 522 inst->src[2] = src2; 523 inst->ir = ir; 524 inst->dead_mask = 0; 525 526 inst->function = NULL; 527 528 if (op == TGSI_OPCODE_ARL || op == TGSI_OPCODE_UARL) 529 this->num_address_regs = 1; 530 531 /* Update indirect addressing status used by TGSI */ 532 if (dst.reladdr) { 533 switch(dst.file) { 534 case PROGRAM_TEMPORARY: 535 this->indirect_addr_temps = true; 536 break; 537 case PROGRAM_LOCAL_PARAM: 538 case PROGRAM_ENV_PARAM: 539 case PROGRAM_STATE_VAR: 540 case PROGRAM_NAMED_PARAM: 541 case PROGRAM_CONSTANT: 542 case PROGRAM_UNIFORM: 543 this->indirect_addr_consts = true; 544 break; 545 case PROGRAM_IMMEDIATE: 546 assert(!"immediates should not have indirect addressing"); 547 break; 548 default: 549 break; 550 } 551 } 552 else { 553 for (i=0; i<3; i++) { 554 if(inst->src[i].reladdr) { 555 switch(inst->src[i].file) { 556 case PROGRAM_TEMPORARY: 557 this->indirect_addr_temps = true; 558 break; 559 case PROGRAM_LOCAL_PARAM: 560 case PROGRAM_ENV_PARAM: 561 case PROGRAM_STATE_VAR: 562 case PROGRAM_NAMED_PARAM: 563 case PROGRAM_CONSTANT: 564 case PROGRAM_UNIFORM: 565 this->indirect_addr_consts = true; 566 break; 567 case PROGRAM_IMMEDIATE: 568 assert(!"immediates should not have indirect addressing"); 569 break; 570 default: 571 break; 572 } 573 } 574 } 575 } 576 577 this->instructions.push_tail(inst); 578 579 if (native_integers) 580 try_emit_float_set(ir, op, dst); 581 582 return inst; 583} 584 585 586glsl_to_tgsi_instruction * 587glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 588 st_dst_reg dst, st_src_reg src0, st_src_reg src1) 589{ 590 return emit(ir, op, dst, src0, src1, undef_src); 591} 592 593glsl_to_tgsi_instruction * 594glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op, 595 st_dst_reg dst, st_src_reg src0) 596{ 597 assert(dst.writemask != 0); 598 return emit(ir, op, dst, src0, undef_src, undef_src); 599} 600 601glsl_to_tgsi_instruction * 602glsl_to_tgsi_visitor::emit(ir_instruction *ir, unsigned op) 603{ 604 return emit(ir, op, undef_dst, undef_src, undef_src, undef_src); 605} 606 607 /** 608 * Emits the code to convert the result of float SET instructions to integers. 609 */ 610void 611glsl_to_tgsi_visitor::try_emit_float_set(ir_instruction *ir, unsigned op, 612 st_dst_reg dst) 613{ 614 if ((op == TGSI_OPCODE_SEQ || 615 op == TGSI_OPCODE_SNE || 616 op == TGSI_OPCODE_SGE || 617 op == TGSI_OPCODE_SLT)) 618 { 619 st_src_reg src = st_src_reg(dst); 620 src.negate = ~src.negate; 621 dst.type = GLSL_TYPE_FLOAT; 622 emit(ir, TGSI_OPCODE_F2I, dst, src); 623 } 624} 625 626/** 627 * Determines whether to use an integer, unsigned integer, or float opcode 628 * based on the operands and input opcode, then emits the result. 629 */ 630unsigned 631glsl_to_tgsi_visitor::get_opcode(ir_instruction *ir, unsigned op, 632 st_dst_reg dst, 633 st_src_reg src0, st_src_reg src1) 634{ 635 int type = GLSL_TYPE_FLOAT; 636 637 if (src0.type == GLSL_TYPE_FLOAT || src1.type == GLSL_TYPE_FLOAT) 638 type = GLSL_TYPE_FLOAT; 639 else if (native_integers) 640 type = src0.type == GLSL_TYPE_BOOL ? GLSL_TYPE_INT : src0.type; 641 642#define case4(c, f, i, u) \ 643 case TGSI_OPCODE_##c: \ 644 if (type == GLSL_TYPE_INT) op = TGSI_OPCODE_##i; \ 645 else if (type == GLSL_TYPE_UINT) op = TGSI_OPCODE_##u; \ 646 else op = TGSI_OPCODE_##f; \ 647 break; 648#define case3(f, i, u) case4(f, f, i, u) 649#define case2fi(f, i) case4(f, f, i, i) 650#define case2iu(i, u) case4(i, LAST, i, u) 651 652 switch(op) { 653 case2fi(ADD, UADD); 654 case2fi(MUL, UMUL); 655 case2fi(MAD, UMAD); 656 case3(DIV, IDIV, UDIV); 657 case3(MAX, IMAX, UMAX); 658 case3(MIN, IMIN, UMIN); 659 case2iu(MOD, UMOD); 660 661 case2fi(SEQ, USEQ); 662 case2fi(SNE, USNE); 663 case3(SGE, ISGE, USGE); 664 case3(SLT, ISLT, USLT); 665 666 case2iu(ISHR, USHR); 667 668 case2fi(SSG, ISSG); 669 case3(ABS, IABS, IABS); 670 671 default: break; 672 } 673 674 assert(op != TGSI_OPCODE_LAST); 675 return op; 676} 677 678glsl_to_tgsi_instruction * 679glsl_to_tgsi_visitor::emit_dp(ir_instruction *ir, 680 st_dst_reg dst, st_src_reg src0, st_src_reg src1, 681 unsigned elements) 682{ 683 static const unsigned dot_opcodes[] = { 684 TGSI_OPCODE_DP2, TGSI_OPCODE_DP3, TGSI_OPCODE_DP4 685 }; 686 687 return emit(ir, dot_opcodes[elements - 2], dst, src0, src1); 688} 689 690/** 691 * Emits TGSI scalar opcodes to produce unique answers across channels. 692 * 693 * Some TGSI opcodes are scalar-only, like ARB_fp/vp. The src X 694 * channel determines the result across all channels. So to do a vec4 695 * of this operation, we want to emit a scalar per source channel used 696 * to produce dest channels. 697 */ 698void 699glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op, 700 st_dst_reg dst, 701 st_src_reg orig_src0, st_src_reg orig_src1) 702{ 703 int i, j; 704 int done_mask = ~dst.writemask; 705 706 /* TGSI RCP is a scalar operation splatting results to all channels, 707 * like ARB_fp/vp. So emit as many RCPs as necessary to cover our 708 * dst channels. 709 */ 710 for (i = 0; i < 4; i++) { 711 GLuint this_mask = (1 << i); 712 glsl_to_tgsi_instruction *inst; 713 st_src_reg src0 = orig_src0; 714 st_src_reg src1 = orig_src1; 715 716 if (done_mask & this_mask) 717 continue; 718 719 GLuint src0_swiz = GET_SWZ(src0.swizzle, i); 720 GLuint src1_swiz = GET_SWZ(src1.swizzle, i); 721 for (j = i + 1; j < 4; j++) { 722 /* If there is another enabled component in the destination that is 723 * derived from the same inputs, generate its value on this pass as 724 * well. 725 */ 726 if (!(done_mask & (1 << j)) && 727 GET_SWZ(src0.swizzle, j) == src0_swiz && 728 GET_SWZ(src1.swizzle, j) == src1_swiz) { 729 this_mask |= (1 << j); 730 } 731 } 732 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz, 733 src0_swiz, src0_swiz); 734 src1.swizzle = MAKE_SWIZZLE4(src1_swiz, src1_swiz, 735 src1_swiz, src1_swiz); 736 737 inst = emit(ir, op, dst, src0, src1); 738 inst->dst.writemask = this_mask; 739 done_mask |= this_mask; 740 } 741} 742 743void 744glsl_to_tgsi_visitor::emit_scalar(ir_instruction *ir, unsigned op, 745 st_dst_reg dst, st_src_reg src0) 746{ 747 st_src_reg undef = undef_src; 748 749 undef.swizzle = SWIZZLE_XXXX; 750 751 emit_scalar(ir, op, dst, src0, undef); 752} 753 754void 755glsl_to_tgsi_visitor::emit_arl(ir_instruction *ir, 756 st_dst_reg dst, st_src_reg src0) 757{ 758 int op = TGSI_OPCODE_ARL; 759 760 if (src0.type == GLSL_TYPE_INT || src0.type == GLSL_TYPE_UINT) 761 op = TGSI_OPCODE_UARL; 762 763 emit(NULL, op, dst, src0); 764} 765 766/** 767 * Emit an TGSI_OPCODE_SCS instruction 768 * 769 * The \c SCS opcode functions a bit differently than the other TGSI opcodes. 770 * Instead of splatting its result across all four components of the 771 * destination, it writes one value to the \c x component and another value to 772 * the \c y component. 773 * 774 * \param ir IR instruction being processed 775 * \param op Either \c TGSI_OPCODE_SIN or \c TGSI_OPCODE_COS depending 776 * on which value is desired. 777 * \param dst Destination register 778 * \param src Source register 779 */ 780void 781glsl_to_tgsi_visitor::emit_scs(ir_instruction *ir, unsigned op, 782 st_dst_reg dst, 783 const st_src_reg &src) 784{ 785 /* Vertex programs cannot use the SCS opcode. 786 */ 787 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) { 788 emit_scalar(ir, op, dst, src); 789 return; 790 } 791 792 const unsigned component = (op == TGSI_OPCODE_SIN) ? 0 : 1; 793 const unsigned scs_mask = (1U << component); 794 int done_mask = ~dst.writemask; 795 st_src_reg tmp; 796 797 assert(op == TGSI_OPCODE_SIN || op == TGSI_OPCODE_COS); 798 799 /* If there are compnents in the destination that differ from the component 800 * that will be written by the SCS instrution, we'll need a temporary. 801 */ 802 if (scs_mask != unsigned(dst.writemask)) { 803 tmp = get_temp(glsl_type::vec4_type); 804 } 805 806 for (unsigned i = 0; i < 4; i++) { 807 unsigned this_mask = (1U << i); 808 st_src_reg src0 = src; 809 810 if ((done_mask & this_mask) != 0) 811 continue; 812 813 /* The source swizzle specified which component of the source generates 814 * sine / cosine for the current component in the destination. The SCS 815 * instruction requires that this value be swizzle to the X component. 816 * Replace the current swizzle with a swizzle that puts the source in 817 * the X component. 818 */ 819 unsigned src0_swiz = GET_SWZ(src.swizzle, i); 820 821 src0.swizzle = MAKE_SWIZZLE4(src0_swiz, src0_swiz, 822 src0_swiz, src0_swiz); 823 for (unsigned j = i + 1; j < 4; j++) { 824 /* If there is another enabled component in the destination that is 825 * derived from the same inputs, generate its value on this pass as 826 * well. 827 */ 828 if (!(done_mask & (1 << j)) && 829 GET_SWZ(src0.swizzle, j) == src0_swiz) { 830 this_mask |= (1 << j); 831 } 832 } 833 834 if (this_mask != scs_mask) { 835 glsl_to_tgsi_instruction *inst; 836 st_dst_reg tmp_dst = st_dst_reg(tmp); 837 838 /* Emit the SCS instruction. 839 */ 840 inst = emit(ir, TGSI_OPCODE_SCS, tmp_dst, src0); 841 inst->dst.writemask = scs_mask; 842 843 /* Move the result of the SCS instruction to the desired location in 844 * the destination. 845 */ 846 tmp.swizzle = MAKE_SWIZZLE4(component, component, 847 component, component); 848 inst = emit(ir, TGSI_OPCODE_SCS, dst, tmp); 849 inst->dst.writemask = this_mask; 850 } else { 851 /* Emit the SCS instruction to write directly to the destination. 852 */ 853 glsl_to_tgsi_instruction *inst = emit(ir, TGSI_OPCODE_SCS, dst, src0); 854 inst->dst.writemask = scs_mask; 855 } 856 857 done_mask |= this_mask; 858 } 859} 860 861int 862glsl_to_tgsi_visitor::add_constant(gl_register_file file, 863 gl_constant_value values[4], int size, int datatype, 864 GLuint *swizzle_out) 865{ 866 if (file == PROGRAM_CONSTANT) { 867 return _mesa_add_typed_unnamed_constant(this->prog->Parameters, values, 868 size, datatype, swizzle_out); 869 } else { 870 int index = 0; 871 immediate_storage *entry; 872 assert(file == PROGRAM_IMMEDIATE); 873 874 /* Search immediate storage to see if we already have an identical 875 * immediate that we can use instead of adding a duplicate entry. 876 */ 877 foreach_iter(exec_list_iterator, iter, this->immediates) { 878 entry = (immediate_storage *)iter.get(); 879 880 if (entry->size == size && 881 entry->type == datatype && 882 !memcmp(entry->values, values, size * sizeof(gl_constant_value))) { 883 return index; 884 } 885 index++; 886 } 887 888 /* Add this immediate to the list. */ 889 entry = new(mem_ctx) immediate_storage(values, size, datatype); 890 this->immediates.push_tail(entry); 891 this->num_immediates++; 892 return index; 893 } 894} 895 896st_src_reg 897glsl_to_tgsi_visitor::st_src_reg_for_float(float val) 898{ 899 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_FLOAT); 900 union gl_constant_value uval; 901 902 uval.f = val; 903 src.index = add_constant(src.file, &uval, 1, GL_FLOAT, &src.swizzle); 904 905 return src; 906} 907 908st_src_reg 909glsl_to_tgsi_visitor::st_src_reg_for_int(int val) 910{ 911 st_src_reg src(PROGRAM_IMMEDIATE, -1, GLSL_TYPE_INT); 912 union gl_constant_value uval; 913 914 assert(native_integers); 915 916 uval.i = val; 917 src.index = add_constant(src.file, &uval, 1, GL_INT, &src.swizzle); 918 919 return src; 920} 921 922st_src_reg 923glsl_to_tgsi_visitor::st_src_reg_for_type(int type, int val) 924{ 925 if (native_integers) 926 return type == GLSL_TYPE_FLOAT ? st_src_reg_for_float(val) : 927 st_src_reg_for_int(val); 928 else 929 return st_src_reg_for_float(val); 930} 931 932static int 933type_size(const struct glsl_type *type) 934{ 935 unsigned int i; 936 int size; 937 938 switch (type->base_type) { 939 case GLSL_TYPE_UINT: 940 case GLSL_TYPE_INT: 941 case GLSL_TYPE_FLOAT: 942 case GLSL_TYPE_BOOL: 943 if (type->is_matrix()) { 944 return type->matrix_columns; 945 } else { 946 /* Regardless of size of vector, it gets a vec4. This is bad 947 * packing for things like floats, but otherwise arrays become a 948 * mess. Hopefully a later pass over the code can pack scalars 949 * down if appropriate. 950 */ 951 return 1; 952 } 953 case GLSL_TYPE_ARRAY: 954 assert(type->length > 0); 955 return type_size(type->fields.array) * type->length; 956 case GLSL_TYPE_STRUCT: 957 size = 0; 958 for (i = 0; i < type->length; i++) { 959 size += type_size(type->fields.structure[i].type); 960 } 961 return size; 962 case GLSL_TYPE_SAMPLER: 963 /* Samplers take up one slot in UNIFORMS[], but they're baked in 964 * at link time. 965 */ 966 return 1; 967 default: 968 assert(0); 969 return 0; 970 } 971} 972 973/** 974 * In the initial pass of codegen, we assign temporary numbers to 975 * intermediate results. (not SSA -- variable assignments will reuse 976 * storage). 977 */ 978st_src_reg 979glsl_to_tgsi_visitor::get_temp(const glsl_type *type) 980{ 981 st_src_reg src; 982 983 src.type = native_integers ? type->base_type : GLSL_TYPE_FLOAT; 984 src.file = PROGRAM_TEMPORARY; 985 src.index = next_temp; 986 src.reladdr = NULL; 987 next_temp += type_size(type); 988 989 if (type->is_array() || type->is_record()) { 990 src.swizzle = SWIZZLE_NOOP; 991 } else { 992 src.swizzle = swizzle_for_size(type->vector_elements); 993 } 994 src.negate = 0; 995 996 return src; 997} 998 999variable_storage * 1000glsl_to_tgsi_visitor::find_variable_storage(ir_variable *var) 1001{ 1002 1003 variable_storage *entry; 1004 1005 foreach_iter(exec_list_iterator, iter, this->variables) { 1006 entry = (variable_storage *)iter.get(); 1007 1008 if (entry->var == var) 1009 return entry; 1010 } 1011 1012 return NULL; 1013} 1014 1015void 1016glsl_to_tgsi_visitor::visit(ir_variable *ir) 1017{ 1018 if (strcmp(ir->name, "gl_FragCoord") == 0) { 1019 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog; 1020 1021 fp->OriginUpperLeft = ir->origin_upper_left; 1022 fp->PixelCenterInteger = ir->pixel_center_integer; 1023 } 1024 1025 if (ir->mode == ir_var_uniform && strncmp(ir->name, "gl_", 3) == 0) { 1026 unsigned int i; 1027 const ir_state_slot *const slots = ir->state_slots; 1028 assert(ir->state_slots != NULL); 1029 1030 /* Check if this statevar's setup in the STATE file exactly 1031 * matches how we'll want to reference it as a 1032 * struct/array/whatever. If not, then we need to move it into 1033 * temporary storage and hope that it'll get copy-propagated 1034 * out. 1035 */ 1036 for (i = 0; i < ir->num_state_slots; i++) { 1037 if (slots[i].swizzle != SWIZZLE_XYZW) { 1038 break; 1039 } 1040 } 1041 1042 variable_storage *storage; 1043 st_dst_reg dst; 1044 if (i == ir->num_state_slots) { 1045 /* We'll set the index later. */ 1046 storage = new(mem_ctx) variable_storage(ir, PROGRAM_STATE_VAR, -1); 1047 this->variables.push_tail(storage); 1048 1049 dst = undef_dst; 1050 } else { 1051 /* The variable_storage constructor allocates slots based on the size 1052 * of the type. However, this had better match the number of state 1053 * elements that we're going to copy into the new temporary. 1054 */ 1055 assert((int) ir->num_state_slots == type_size(ir->type)); 1056 1057 storage = new(mem_ctx) variable_storage(ir, PROGRAM_TEMPORARY, 1058 this->next_temp); 1059 this->variables.push_tail(storage); 1060 this->next_temp += type_size(ir->type); 1061 1062 dst = st_dst_reg(st_src_reg(PROGRAM_TEMPORARY, storage->index, 1063 native_integers ? ir->type->base_type : GLSL_TYPE_FLOAT)); 1064 } 1065 1066 1067 for (unsigned int i = 0; i < ir->num_state_slots; i++) { 1068 int index = _mesa_add_state_reference(this->prog->Parameters, 1069 (gl_state_index *)slots[i].tokens); 1070 1071 if (storage->file == PROGRAM_STATE_VAR) { 1072 if (storage->index == -1) { 1073 storage->index = index; 1074 } else { 1075 assert(index == storage->index + (int)i); 1076 } 1077 } else { 1078 st_src_reg src(PROGRAM_STATE_VAR, index, 1079 native_integers ? ir->type->base_type : GLSL_TYPE_FLOAT); 1080 src.swizzle = slots[i].swizzle; 1081 emit(ir, TGSI_OPCODE_MOV, dst, src); 1082 /* even a float takes up a whole vec4 reg in a struct/array. */ 1083 dst.index++; 1084 } 1085 } 1086 1087 if (storage->file == PROGRAM_TEMPORARY && 1088 dst.index != storage->index + (int) ir->num_state_slots) { 1089 fail_link(this->shader_program, 1090 "failed to load builtin uniform `%s' (%d/%d regs loaded)\n", 1091 ir->name, dst.index - storage->index, 1092 type_size(ir->type)); 1093 } 1094 } 1095} 1096 1097void 1098glsl_to_tgsi_visitor::visit(ir_loop *ir) 1099{ 1100 ir_dereference_variable *counter = NULL; 1101 1102 if (ir->counter != NULL) 1103 counter = new(ir) ir_dereference_variable(ir->counter); 1104 1105 if (ir->from != NULL) { 1106 assert(ir->counter != NULL); 1107 1108 ir_assignment *a = new(ir) ir_assignment(counter, ir->from, NULL); 1109 1110 a->accept(this); 1111 delete a; 1112 } 1113 1114 emit(NULL, TGSI_OPCODE_BGNLOOP); 1115 1116 if (ir->to) { 1117 ir_expression *e = 1118 new(ir) ir_expression(ir->cmp, glsl_type::bool_type, 1119 counter, ir->to); 1120 ir_if *if_stmt = new(ir) ir_if(e); 1121 1122 ir_loop_jump *brk = new(ir) ir_loop_jump(ir_loop_jump::jump_break); 1123 1124 if_stmt->then_instructions.push_tail(brk); 1125 1126 if_stmt->accept(this); 1127 1128 delete if_stmt; 1129 delete e; 1130 delete brk; 1131 } 1132 1133 visit_exec_list(&ir->body_instructions, this); 1134 1135 if (ir->increment) { 1136 ir_expression *e = 1137 new(ir) ir_expression(ir_binop_add, counter->type, 1138 counter, ir->increment); 1139 1140 ir_assignment *a = new(ir) ir_assignment(counter, e, NULL); 1141 1142 a->accept(this); 1143 delete a; 1144 delete e; 1145 } 1146 1147 emit(NULL, TGSI_OPCODE_ENDLOOP); 1148} 1149 1150void 1151glsl_to_tgsi_visitor::visit(ir_loop_jump *ir) 1152{ 1153 switch (ir->mode) { 1154 case ir_loop_jump::jump_break: 1155 emit(NULL, TGSI_OPCODE_BRK); 1156 break; 1157 case ir_loop_jump::jump_continue: 1158 emit(NULL, TGSI_OPCODE_CONT); 1159 break; 1160 } 1161} 1162 1163 1164void 1165glsl_to_tgsi_visitor::visit(ir_function_signature *ir) 1166{ 1167 assert(0); 1168 (void)ir; 1169} 1170 1171void 1172glsl_to_tgsi_visitor::visit(ir_function *ir) 1173{ 1174 /* Ignore function bodies other than main() -- we shouldn't see calls to 1175 * them since they should all be inlined before we get to glsl_to_tgsi. 1176 */ 1177 if (strcmp(ir->name, "main") == 0) { 1178 const ir_function_signature *sig; 1179 exec_list empty; 1180 1181 sig = ir->matching_signature(&empty); 1182 1183 assert(sig); 1184 1185 foreach_iter(exec_list_iterator, iter, sig->body) { 1186 ir_instruction *ir = (ir_instruction *)iter.get(); 1187 1188 ir->accept(this); 1189 } 1190 } 1191} 1192 1193bool 1194glsl_to_tgsi_visitor::try_emit_mad(ir_expression *ir, int mul_operand) 1195{ 1196 int nonmul_operand = 1 - mul_operand; 1197 st_src_reg a, b, c; 1198 st_dst_reg result_dst; 1199 1200 ir_expression *expr = ir->operands[mul_operand]->as_expression(); 1201 if (!expr || expr->operation != ir_binop_mul) 1202 return false; 1203 1204 expr->operands[0]->accept(this); 1205 a = this->result; 1206 expr->operands[1]->accept(this); 1207 b = this->result; 1208 ir->operands[nonmul_operand]->accept(this); 1209 c = this->result; 1210 1211 this->result = get_temp(ir->type); 1212 result_dst = st_dst_reg(this->result); 1213 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1214 emit(ir, TGSI_OPCODE_MAD, result_dst, a, b, c); 1215 1216 return true; 1217} 1218 1219/** 1220 * Emit MAD(a, -b, a) instead of AND(a, NOT(b)) 1221 * 1222 * The logic values are 1.0 for true and 0.0 for false. Logical-and is 1223 * implemented using multiplication, and logical-or is implemented using 1224 * addition. Logical-not can be implemented as (true - x), or (1.0 - x). 1225 * As result, the logical expression (a & !b) can be rewritten as: 1226 * 1227 * - a * !b 1228 * - a * (1 - b) 1229 * - (a * 1) - (a * b) 1230 * - a + -(a * b) 1231 * - a + (a * -b) 1232 * 1233 * This final expression can be implemented as a single MAD(a, -b, a) 1234 * instruction. 1235 */ 1236bool 1237glsl_to_tgsi_visitor::try_emit_mad_for_and_not(ir_expression *ir, int try_operand) 1238{ 1239 const int other_operand = 1 - try_operand; 1240 st_src_reg a, b; 1241 1242 ir_expression *expr = ir->operands[try_operand]->as_expression(); 1243 if (!expr || expr->operation != ir_unop_logic_not) 1244 return false; 1245 1246 ir->operands[other_operand]->accept(this); 1247 a = this->result; 1248 expr->operands[0]->accept(this); 1249 b = this->result; 1250 1251 b.negate = ~b.negate; 1252 1253 this->result = get_temp(ir->type); 1254 emit(ir, TGSI_OPCODE_MAD, st_dst_reg(this->result), a, b, a); 1255 1256 return true; 1257} 1258 1259bool 1260glsl_to_tgsi_visitor::try_emit_sat(ir_expression *ir) 1261{ 1262 /* Saturates were only introduced to vertex programs in 1263 * NV_vertex_program3, so don't give them to drivers in the VP. 1264 */ 1265 if (this->prog->Target == GL_VERTEX_PROGRAM_ARB) 1266 return false; 1267 1268 ir_rvalue *sat_src = ir->as_rvalue_to_saturate(); 1269 if (!sat_src) 1270 return false; 1271 1272 sat_src->accept(this); 1273 st_src_reg src = this->result; 1274 1275 /* If we generated an expression instruction into a temporary in 1276 * processing the saturate's operand, apply the saturate to that 1277 * instruction. Otherwise, generate a MOV to do the saturate. 1278 * 1279 * Note that we have to be careful to only do this optimization if 1280 * the instruction in question was what generated src->result. For 1281 * example, ir_dereference_array might generate a MUL instruction 1282 * to create the reladdr, and return us a src reg using that 1283 * reladdr. That MUL result is not the value we're trying to 1284 * saturate. 1285 */ 1286 ir_expression *sat_src_expr = sat_src->as_expression(); 1287 if (sat_src_expr && (sat_src_expr->operation == ir_binop_mul || 1288 sat_src_expr->operation == ir_binop_add || 1289 sat_src_expr->operation == ir_binop_dot)) { 1290 glsl_to_tgsi_instruction *new_inst; 1291 new_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 1292 new_inst->saturate = true; 1293 } else { 1294 this->result = get_temp(ir->type); 1295 st_dst_reg result_dst = st_dst_reg(this->result); 1296 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1297 glsl_to_tgsi_instruction *inst; 1298 inst = emit(ir, TGSI_OPCODE_MOV, result_dst, src); 1299 inst->saturate = true; 1300 } 1301 1302 return true; 1303} 1304 1305void 1306glsl_to_tgsi_visitor::reladdr_to_temp(ir_instruction *ir, 1307 st_src_reg *reg, int *num_reladdr) 1308{ 1309 if (!reg->reladdr) 1310 return; 1311 1312 emit_arl(ir, address_reg, *reg->reladdr); 1313 1314 if (*num_reladdr != 1) { 1315 st_src_reg temp = get_temp(glsl_type::vec4_type); 1316 1317 emit(ir, TGSI_OPCODE_MOV, st_dst_reg(temp), *reg); 1318 *reg = temp; 1319 } 1320 1321 (*num_reladdr)--; 1322} 1323 1324void 1325glsl_to_tgsi_visitor::visit(ir_expression *ir) 1326{ 1327 unsigned int operand; 1328 st_src_reg op[Elements(ir->operands)]; 1329 st_src_reg result_src; 1330 st_dst_reg result_dst; 1331 1332 /* Quick peephole: Emit MAD(a, b, c) instead of ADD(MUL(a, b), c) 1333 */ 1334 if (ir->operation == ir_binop_add) { 1335 if (try_emit_mad(ir, 1)) 1336 return; 1337 if (try_emit_mad(ir, 0)) 1338 return; 1339 } 1340 1341 /* Quick peephole: Emit OPCODE_MAD(-a, -b, a) instead of AND(a, NOT(b)) 1342 */ 1343 if (ir->operation == ir_binop_logic_and) { 1344 if (try_emit_mad_for_and_not(ir, 1)) 1345 return; 1346 if (try_emit_mad_for_and_not(ir, 0)) 1347 return; 1348 } 1349 1350 if (try_emit_sat(ir)) 1351 return; 1352 1353 if (ir->operation == ir_quadop_vector) 1354 assert(!"ir_quadop_vector should have been lowered"); 1355 1356 for (operand = 0; operand < ir->get_num_operands(); operand++) { 1357 this->result.file = PROGRAM_UNDEFINED; 1358 ir->operands[operand]->accept(this); 1359 if (this->result.file == PROGRAM_UNDEFINED) { 1360 ir_print_visitor v; 1361 printf("Failed to get tree for expression operand:\n"); 1362 ir->operands[operand]->accept(&v); 1363 exit(1); 1364 } 1365 op[operand] = this->result; 1366 1367 /* Matrix expression operands should have been broken down to vector 1368 * operations already. 1369 */ 1370 assert(!ir->operands[operand]->type->is_matrix()); 1371 } 1372 1373 int vector_elements = ir->operands[0]->type->vector_elements; 1374 if (ir->operands[1]) { 1375 vector_elements = MAX2(vector_elements, 1376 ir->operands[1]->type->vector_elements); 1377 } 1378 1379 this->result.file = PROGRAM_UNDEFINED; 1380 1381 /* Storage for our result. Ideally for an assignment we'd be using 1382 * the actual storage for the result here, instead. 1383 */ 1384 result_src = get_temp(ir->type); 1385 /* convenience for the emit functions below. */ 1386 result_dst = st_dst_reg(result_src); 1387 /* Limit writes to the channels that will be used by result_src later. 1388 * This does limit this temp's use as a temporary for multi-instruction 1389 * sequences. 1390 */ 1391 result_dst.writemask = (1 << ir->type->vector_elements) - 1; 1392 1393 switch (ir->operation) { 1394 case ir_unop_logic_not: 1395 if (result_dst.type != GLSL_TYPE_FLOAT) 1396 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]); 1397 else { 1398 /* Previously 'SEQ dst, src, 0.0' was used for this. However, many 1399 * older GPUs implement SEQ using multiple instructions (i915 uses two 1400 * SGE instructions and a MUL instruction). Since our logic values are 1401 * 0.0 and 1.0, 1-x also implements !x. 1402 */ 1403 op[0].negate = ~op[0].negate; 1404 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], st_src_reg_for_float(1.0)); 1405 } 1406 break; 1407 case ir_unop_neg: 1408 if (result_dst.type == GLSL_TYPE_INT || result_dst.type == GLSL_TYPE_UINT) 1409 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]); 1410 else { 1411 op[0].negate = ~op[0].negate; 1412 result_src = op[0]; 1413 } 1414 break; 1415 case ir_unop_abs: 1416 emit(ir, TGSI_OPCODE_ABS, result_dst, op[0]); 1417 break; 1418 case ir_unop_sign: 1419 emit(ir, TGSI_OPCODE_SSG, result_dst, op[0]); 1420 break; 1421 case ir_unop_rcp: 1422 emit_scalar(ir, TGSI_OPCODE_RCP, result_dst, op[0]); 1423 break; 1424 1425 case ir_unop_exp2: 1426 emit_scalar(ir, TGSI_OPCODE_EX2, result_dst, op[0]); 1427 break; 1428 case ir_unop_exp: 1429 case ir_unop_log: 1430 assert(!"not reached: should be handled by ir_explog_to_explog2"); 1431 break; 1432 case ir_unop_log2: 1433 emit_scalar(ir, TGSI_OPCODE_LG2, result_dst, op[0]); 1434 break; 1435 case ir_unop_sin: 1436 emit_scalar(ir, TGSI_OPCODE_SIN, result_dst, op[0]); 1437 break; 1438 case ir_unop_cos: 1439 emit_scalar(ir, TGSI_OPCODE_COS, result_dst, op[0]); 1440 break; 1441 case ir_unop_sin_reduced: 1442 emit_scs(ir, TGSI_OPCODE_SIN, result_dst, op[0]); 1443 break; 1444 case ir_unop_cos_reduced: 1445 emit_scs(ir, TGSI_OPCODE_COS, result_dst, op[0]); 1446 break; 1447 1448 case ir_unop_dFdx: 1449 emit(ir, TGSI_OPCODE_DDX, result_dst, op[0]); 1450 break; 1451 case ir_unop_dFdy: 1452 op[0].negate = ~op[0].negate; 1453 emit(ir, TGSI_OPCODE_DDY, result_dst, op[0]); 1454 break; 1455 1456 case ir_unop_noise: { 1457 /* At some point, a motivated person could add a better 1458 * implementation of noise. Currently not even the nvidia 1459 * binary drivers do anything more than this. In any case, the 1460 * place to do this is in the GL state tracker, not the poor 1461 * driver. 1462 */ 1463 emit(ir, TGSI_OPCODE_MOV, result_dst, st_src_reg_for_float(0.5)); 1464 break; 1465 } 1466 1467 case ir_binop_add: 1468 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]); 1469 break; 1470 case ir_binop_sub: 1471 emit(ir, TGSI_OPCODE_SUB, result_dst, op[0], op[1]); 1472 break; 1473 1474 case ir_binop_mul: 1475 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]); 1476 break; 1477 case ir_binop_div: 1478 if (result_dst.type == GLSL_TYPE_FLOAT) 1479 assert(!"not reached: should be handled by ir_div_to_mul_rcp"); 1480 else 1481 emit(ir, TGSI_OPCODE_DIV, result_dst, op[0], op[1]); 1482 break; 1483 case ir_binop_mod: 1484 if (result_dst.type == GLSL_TYPE_FLOAT) 1485 assert(!"ir_binop_mod should have been converted to b * fract(a/b)"); 1486 else 1487 emit(ir, TGSI_OPCODE_MOD, result_dst, op[0], op[1]); 1488 break; 1489 1490 case ir_binop_less: 1491 emit(ir, TGSI_OPCODE_SLT, result_dst, op[0], op[1]); 1492 break; 1493 case ir_binop_greater: 1494 emit(ir, TGSI_OPCODE_SLT, result_dst, op[1], op[0]); 1495 break; 1496 case ir_binop_lequal: 1497 emit(ir, TGSI_OPCODE_SGE, result_dst, op[1], op[0]); 1498 break; 1499 case ir_binop_gequal: 1500 emit(ir, TGSI_OPCODE_SGE, result_dst, op[0], op[1]); 1501 break; 1502 case ir_binop_equal: 1503 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]); 1504 break; 1505 case ir_binop_nequal: 1506 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1507 break; 1508 case ir_binop_all_equal: 1509 /* "==" operator producing a scalar boolean. */ 1510 if (ir->operands[0]->type->is_vector() || 1511 ir->operands[1]->type->is_vector()) { 1512 st_src_reg temp = get_temp(native_integers ? 1513 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) : 1514 glsl_type::vec4_type); 1515 1516 if (native_integers) { 1517 st_dst_reg temp_dst = st_dst_reg(temp); 1518 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp); 1519 1520 emit(ir, TGSI_OPCODE_SEQ, st_dst_reg(temp), op[0], op[1]); 1521 1522 /* Emit 1-3 AND operations to combine the SEQ results. */ 1523 switch (ir->operands[0]->type->vector_elements) { 1524 case 2: 1525 break; 1526 case 3: 1527 temp_dst.writemask = WRITEMASK_Y; 1528 temp1.swizzle = SWIZZLE_YYYY; 1529 temp2.swizzle = SWIZZLE_ZZZZ; 1530 emit(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2); 1531 break; 1532 case 4: 1533 temp_dst.writemask = WRITEMASK_X; 1534 temp1.swizzle = SWIZZLE_XXXX; 1535 temp2.swizzle = SWIZZLE_YYYY; 1536 emit(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2); 1537 temp_dst.writemask = WRITEMASK_Y; 1538 temp1.swizzle = SWIZZLE_ZZZZ; 1539 temp2.swizzle = SWIZZLE_WWWW; 1540 emit(ir, TGSI_OPCODE_AND, temp_dst, temp1, temp2); 1541 } 1542 1543 temp1.swizzle = SWIZZLE_XXXX; 1544 temp2.swizzle = SWIZZLE_YYYY; 1545 emit(ir, TGSI_OPCODE_AND, result_dst, temp1, temp2); 1546 } else { 1547 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]); 1548 1549 /* After the dot-product, the value will be an integer on the 1550 * range [0,4]. Zero becomes 1.0, and positive values become zero. 1551 */ 1552 emit_dp(ir, result_dst, temp, temp, vector_elements); 1553 1554 /* Negating the result of the dot-product gives values on the range 1555 * [-4, 0]. Zero becomes 1.0, and negative values become zero. 1556 * This is achieved using SGE. 1557 */ 1558 st_src_reg sge_src = result_src; 1559 sge_src.negate = ~sge_src.negate; 1560 emit(ir, TGSI_OPCODE_SGE, result_dst, sge_src, st_src_reg_for_float(0.0)); 1561 } 1562 } else { 1563 emit(ir, TGSI_OPCODE_SEQ, result_dst, op[0], op[1]); 1564 } 1565 break; 1566 case ir_binop_any_nequal: 1567 /* "!=" operator producing a scalar boolean. */ 1568 if (ir->operands[0]->type->is_vector() || 1569 ir->operands[1]->type->is_vector()) { 1570 st_src_reg temp = get_temp(native_integers ? 1571 glsl_type::get_instance(ir->operands[0]->type->base_type, 4, 1) : 1572 glsl_type::vec4_type); 1573 emit(ir, TGSI_OPCODE_SNE, st_dst_reg(temp), op[0], op[1]); 1574 1575 if (native_integers) { 1576 st_dst_reg temp_dst = st_dst_reg(temp); 1577 st_src_reg temp1 = st_src_reg(temp), temp2 = st_src_reg(temp); 1578 1579 /* Emit 1-3 OR operations to combine the SNE results. */ 1580 switch (ir->operands[0]->type->vector_elements) { 1581 case 2: 1582 break; 1583 case 3: 1584 temp_dst.writemask = WRITEMASK_Y; 1585 temp1.swizzle = SWIZZLE_YYYY; 1586 temp2.swizzle = SWIZZLE_ZZZZ; 1587 emit(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2); 1588 break; 1589 case 4: 1590 temp_dst.writemask = WRITEMASK_X; 1591 temp1.swizzle = SWIZZLE_XXXX; 1592 temp2.swizzle = SWIZZLE_YYYY; 1593 emit(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2); 1594 temp_dst.writemask = WRITEMASK_Y; 1595 temp1.swizzle = SWIZZLE_ZZZZ; 1596 temp2.swizzle = SWIZZLE_WWWW; 1597 emit(ir, TGSI_OPCODE_OR, temp_dst, temp1, temp2); 1598 } 1599 1600 temp1.swizzle = SWIZZLE_XXXX; 1601 temp2.swizzle = SWIZZLE_YYYY; 1602 emit(ir, TGSI_OPCODE_OR, result_dst, temp1, temp2); 1603 } else { 1604 /* After the dot-product, the value will be an integer on the 1605 * range [0,4]. Zero stays zero, and positive values become 1.0. 1606 */ 1607 glsl_to_tgsi_instruction *const dp = 1608 emit_dp(ir, result_dst, temp, temp, vector_elements); 1609 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) { 1610 /* The clamping to [0,1] can be done for free in the fragment 1611 * shader with a saturate. 1612 */ 1613 dp->saturate = true; 1614 } else { 1615 /* Negating the result of the dot-product gives values on the range 1616 * [-4, 0]. Zero stays zero, and negative values become 1.0. This 1617 * achieved using SLT. 1618 */ 1619 st_src_reg slt_src = result_src; 1620 slt_src.negate = ~slt_src.negate; 1621 emit(ir, TGSI_OPCODE_SLT, result_dst, slt_src, st_src_reg_for_float(0.0)); 1622 } 1623 } 1624 } else { 1625 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1626 } 1627 break; 1628 1629 case ir_unop_any: { 1630 assert(ir->operands[0]->type->is_vector()); 1631 1632 /* After the dot-product, the value will be an integer on the 1633 * range [0,4]. Zero stays zero, and positive values become 1.0. 1634 */ 1635 glsl_to_tgsi_instruction *const dp = 1636 emit_dp(ir, result_dst, op[0], op[0], 1637 ir->operands[0]->type->vector_elements); 1638 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB && 1639 result_dst.type == GLSL_TYPE_FLOAT) { 1640 /* The clamping to [0,1] can be done for free in the fragment 1641 * shader with a saturate. 1642 */ 1643 dp->saturate = true; 1644 } else if (result_dst.type == GLSL_TYPE_FLOAT) { 1645 /* Negating the result of the dot-product gives values on the range 1646 * [-4, 0]. Zero stays zero, and negative values become 1.0. This 1647 * is achieved using SLT. 1648 */ 1649 st_src_reg slt_src = result_src; 1650 slt_src.negate = ~slt_src.negate; 1651 emit(ir, TGSI_OPCODE_SLT, result_dst, slt_src, st_src_reg_for_float(0.0)); 1652 } 1653 else { 1654 /* Use SNE 0 if integers are being used as boolean values. */ 1655 emit(ir, TGSI_OPCODE_SNE, result_dst, result_src, st_src_reg_for_int(0)); 1656 } 1657 break; 1658 } 1659 1660 case ir_binop_logic_xor: 1661 if (native_integers) 1662 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]); 1663 else 1664 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], op[1]); 1665 break; 1666 1667 case ir_binop_logic_or: { 1668 if (native_integers) { 1669 /* If integers are used as booleans, we can use an actual "or" 1670 * instruction. 1671 */ 1672 assert(native_integers); 1673 emit(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]); 1674 } else { 1675 /* After the addition, the value will be an integer on the 1676 * range [0,2]. Zero stays zero, and positive values become 1.0. 1677 */ 1678 glsl_to_tgsi_instruction *add = 1679 emit(ir, TGSI_OPCODE_ADD, result_dst, op[0], op[1]); 1680 if (this->prog->Target == GL_FRAGMENT_PROGRAM_ARB) { 1681 /* The clamping to [0,1] can be done for free in the fragment 1682 * shader with a saturate if floats are being used as boolean values. 1683 */ 1684 add->saturate = true; 1685 } else { 1686 /* Negating the result of the addition gives values on the range 1687 * [-2, 0]. Zero stays zero, and negative values become 1.0. This 1688 * is achieved using SLT. 1689 */ 1690 st_src_reg slt_src = result_src; 1691 slt_src.negate = ~slt_src.negate; 1692 emit(ir, TGSI_OPCODE_SLT, result_dst, slt_src, st_src_reg_for_float(0.0)); 1693 } 1694 } 1695 break; 1696 } 1697 1698 case ir_binop_logic_and: 1699 /* If native integers are disabled, the bool args are stored as float 0.0 1700 * or 1.0, so "mul" gives us "and". If they're enabled, just use the 1701 * actual AND opcode. 1702 */ 1703 if (native_integers) 1704 emit(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]); 1705 else 1706 emit(ir, TGSI_OPCODE_MUL, result_dst, op[0], op[1]); 1707 break; 1708 1709 case ir_binop_dot: 1710 assert(ir->operands[0]->type->is_vector()); 1711 assert(ir->operands[0]->type == ir->operands[1]->type); 1712 emit_dp(ir, result_dst, op[0], op[1], 1713 ir->operands[0]->type->vector_elements); 1714 break; 1715 1716 case ir_unop_sqrt: 1717 /* sqrt(x) = x * rsq(x). */ 1718 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]); 1719 emit(ir, TGSI_OPCODE_MUL, result_dst, result_src, op[0]); 1720 /* For incoming channels <= 0, set the result to 0. */ 1721 op[0].negate = ~op[0].negate; 1722 emit(ir, TGSI_OPCODE_CMP, result_dst, 1723 op[0], result_src, st_src_reg_for_float(0.0)); 1724 break; 1725 case ir_unop_rsq: 1726 emit_scalar(ir, TGSI_OPCODE_RSQ, result_dst, op[0]); 1727 break; 1728 case ir_unop_i2f: 1729 if (native_integers) { 1730 emit(ir, TGSI_OPCODE_I2F, result_dst, op[0]); 1731 break; 1732 } 1733 /* fallthrough to next case otherwise */ 1734 case ir_unop_b2f: 1735 if (native_integers) { 1736 emit(ir, TGSI_OPCODE_AND, result_dst, op[0], st_src_reg_for_float(1.0)); 1737 break; 1738 } 1739 /* fallthrough to next case otherwise */ 1740 case ir_unop_i2u: 1741 case ir_unop_u2i: 1742 /* Converting between signed and unsigned integers is a no-op. */ 1743 result_src = op[0]; 1744 break; 1745 case ir_unop_b2i: 1746 if (native_integers) { 1747 /* Booleans are stored as integers using ~0 for true and 0 for false. 1748 * GLSL requires that int(bool) return 1 for true and 0 for false. 1749 * This conversion is done with AND, but it could be done with NEG. 1750 */ 1751 emit(ir, TGSI_OPCODE_AND, result_dst, op[0], st_src_reg_for_int(1)); 1752 } else { 1753 /* Booleans and integers are both stored as floats when native 1754 * integers are disabled. 1755 */ 1756 result_src = op[0]; 1757 } 1758 break; 1759 case ir_unop_f2i: 1760 if (native_integers) 1761 emit(ir, TGSI_OPCODE_F2I, result_dst, op[0]); 1762 else 1763 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]); 1764 break; 1765 case ir_unop_f2b: 1766 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], st_src_reg_for_float(0.0)); 1767 break; 1768 case ir_unop_i2b: 1769 if (native_integers) 1770 emit(ir, TGSI_OPCODE_INEG, result_dst, op[0]); 1771 else 1772 emit(ir, TGSI_OPCODE_SNE, result_dst, op[0], st_src_reg_for_float(0.0)); 1773 break; 1774 case ir_unop_trunc: 1775 emit(ir, TGSI_OPCODE_TRUNC, result_dst, op[0]); 1776 break; 1777 case ir_unop_ceil: 1778 op[0].negate = ~op[0].negate; 1779 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]); 1780 result_src.negate = ~result_src.negate; 1781 break; 1782 case ir_unop_floor: 1783 emit(ir, TGSI_OPCODE_FLR, result_dst, op[0]); 1784 break; 1785 case ir_unop_round_even: 1786 emit(ir, TGSI_OPCODE_ROUND, result_dst, op[0]); 1787 break; 1788 case ir_unop_fract: 1789 emit(ir, TGSI_OPCODE_FRC, result_dst, op[0]); 1790 break; 1791 1792 case ir_binop_min: 1793 emit(ir, TGSI_OPCODE_MIN, result_dst, op[0], op[1]); 1794 break; 1795 case ir_binop_max: 1796 emit(ir, TGSI_OPCODE_MAX, result_dst, op[0], op[1]); 1797 break; 1798 case ir_binop_pow: 1799 emit_scalar(ir, TGSI_OPCODE_POW, result_dst, op[0], op[1]); 1800 break; 1801 1802 case ir_unop_bit_not: 1803 if (native_integers) { 1804 emit(ir, TGSI_OPCODE_NOT, result_dst, op[0]); 1805 break; 1806 } 1807 case ir_unop_u2f: 1808 if (native_integers) { 1809 emit(ir, TGSI_OPCODE_U2F, result_dst, op[0]); 1810 break; 1811 } 1812 case ir_binop_lshift: 1813 if (native_integers) { 1814 emit(ir, TGSI_OPCODE_SHL, result_dst, op[0], op[1]); 1815 break; 1816 } 1817 case ir_binop_rshift: 1818 if (native_integers) { 1819 emit(ir, TGSI_OPCODE_ISHR, result_dst, op[0], op[1]); 1820 break; 1821 } 1822 case ir_binop_bit_and: 1823 if (native_integers) { 1824 emit(ir, TGSI_OPCODE_AND, result_dst, op[0], op[1]); 1825 break; 1826 } 1827 case ir_binop_bit_xor: 1828 if (native_integers) { 1829 emit(ir, TGSI_OPCODE_XOR, result_dst, op[0], op[1]); 1830 break; 1831 } 1832 case ir_binop_bit_or: 1833 if (native_integers) { 1834 emit(ir, TGSI_OPCODE_OR, result_dst, op[0], op[1]); 1835 break; 1836 } 1837 1838 assert(!"GLSL 1.30 features unsupported"); 1839 break; 1840 1841 case ir_quadop_vector: 1842 /* This operation should have already been handled. 1843 */ 1844 assert(!"Should not get here."); 1845 break; 1846 } 1847 1848 this->result = result_src; 1849} 1850 1851 1852void 1853glsl_to_tgsi_visitor::visit(ir_swizzle *ir) 1854{ 1855 st_src_reg src; 1856 int i; 1857 int swizzle[4]; 1858 1859 /* Note that this is only swizzles in expressions, not those on the left 1860 * hand side of an assignment, which do write masking. See ir_assignment 1861 * for that. 1862 */ 1863 1864 ir->val->accept(this); 1865 src = this->result; 1866 assert(src.file != PROGRAM_UNDEFINED); 1867 1868 for (i = 0; i < 4; i++) { 1869 if (i < ir->type->vector_elements) { 1870 switch (i) { 1871 case 0: 1872 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.x); 1873 break; 1874 case 1: 1875 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.y); 1876 break; 1877 case 2: 1878 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.z); 1879 break; 1880 case 3: 1881 swizzle[i] = GET_SWZ(src.swizzle, ir->mask.w); 1882 break; 1883 } 1884 } else { 1885 /* If the type is smaller than a vec4, replicate the last 1886 * channel out. 1887 */ 1888 swizzle[i] = swizzle[ir->type->vector_elements - 1]; 1889 } 1890 } 1891 1892 src.swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]); 1893 1894 this->result = src; 1895} 1896 1897void 1898glsl_to_tgsi_visitor::visit(ir_dereference_variable *ir) 1899{ 1900 variable_storage *entry = find_variable_storage(ir->var); 1901 ir_variable *var = ir->var; 1902 1903 if (!entry) { 1904 switch (var->mode) { 1905 case ir_var_uniform: 1906 entry = new(mem_ctx) variable_storage(var, PROGRAM_UNIFORM, 1907 var->location); 1908 this->variables.push_tail(entry); 1909 break; 1910 case ir_var_in: 1911 case ir_var_inout: 1912 /* The linker assigns locations for varyings and attributes, 1913 * including deprecated builtins (like gl_Color), user-assign 1914 * generic attributes (glBindVertexLocation), and 1915 * user-defined varyings. 1916 * 1917 * FINISHME: We would hit this path for function arguments. Fix! 1918 */ 1919 assert(var->location != -1); 1920 entry = new(mem_ctx) variable_storage(var, 1921 PROGRAM_INPUT, 1922 var->location); 1923 break; 1924 case ir_var_out: 1925 assert(var->location != -1); 1926 entry = new(mem_ctx) variable_storage(var, 1927 PROGRAM_OUTPUT, 1928 var->location); 1929 break; 1930 case ir_var_system_value: 1931 entry = new(mem_ctx) variable_storage(var, 1932 PROGRAM_SYSTEM_VALUE, 1933 var->location); 1934 break; 1935 case ir_var_auto: 1936 case ir_var_temporary: 1937 entry = new(mem_ctx) variable_storage(var, PROGRAM_TEMPORARY, 1938 this->next_temp); 1939 this->variables.push_tail(entry); 1940 1941 next_temp += type_size(var->type); 1942 break; 1943 } 1944 1945 if (!entry) { 1946 printf("Failed to make storage for %s\n", var->name); 1947 exit(1); 1948 } 1949 } 1950 1951 this->result = st_src_reg(entry->file, entry->index, var->type); 1952 if (!native_integers) 1953 this->result.type = GLSL_TYPE_FLOAT; 1954} 1955 1956void 1957glsl_to_tgsi_visitor::visit(ir_dereference_array *ir) 1958{ 1959 ir_constant *index; 1960 st_src_reg src; 1961 int element_size = type_size(ir->type); 1962 1963 index = ir->array_index->constant_expression_value(); 1964 1965 ir->array->accept(this); 1966 src = this->result; 1967 1968 if (index) { 1969 src.index += index->value.i[0] * element_size; 1970 } else { 1971 /* Variable index array dereference. It eats the "vec4" of the 1972 * base of the array and an index that offsets the TGSI register 1973 * index. 1974 */ 1975 ir->array_index->accept(this); 1976 1977 st_src_reg index_reg; 1978 1979 if (element_size == 1) { 1980 index_reg = this->result; 1981 } else { 1982 index_reg = get_temp(native_integers ? 1983 glsl_type::int_type : glsl_type::float_type); 1984 1985 emit(ir, TGSI_OPCODE_MUL, st_dst_reg(index_reg), 1986 this->result, st_src_reg_for_type(index_reg.type, element_size)); 1987 } 1988 1989 /* If there was already a relative address register involved, add the 1990 * new and the old together to get the new offset. 1991 */ 1992 if (src.reladdr != NULL) { 1993 st_src_reg accum_reg = get_temp(native_integers ? 1994 glsl_type::int_type : glsl_type::float_type); 1995 1996 emit(ir, TGSI_OPCODE_ADD, st_dst_reg(accum_reg), 1997 index_reg, *src.reladdr); 1998 1999 index_reg = accum_reg; 2000 } 2001 2002 src.reladdr = ralloc(mem_ctx, st_src_reg); 2003 memcpy(src.reladdr, &index_reg, sizeof(index_reg)); 2004 } 2005 2006 /* If the type is smaller than a vec4, replicate the last channel out. */ 2007 if (ir->type->is_scalar() || ir->type->is_vector()) 2008 src.swizzle = swizzle_for_size(ir->type->vector_elements); 2009 else 2010 src.swizzle = SWIZZLE_NOOP; 2011 2012 this->result = src; 2013} 2014 2015void 2016glsl_to_tgsi_visitor::visit(ir_dereference_record *ir) 2017{ 2018 unsigned int i; 2019 const glsl_type *struct_type = ir->record->type; 2020 int offset = 0; 2021 2022 ir->record->accept(this); 2023 2024 for (i = 0; i < struct_type->length; i++) { 2025 if (strcmp(struct_type->fields.structure[i].name, ir->field) == 0) 2026 break; 2027 offset += type_size(struct_type->fields.structure[i].type); 2028 } 2029 2030 /* If the type is smaller than a vec4, replicate the last channel out. */ 2031 if (ir->type->is_scalar() || ir->type->is_vector()) 2032 this->result.swizzle = swizzle_for_size(ir->type->vector_elements); 2033 else 2034 this->result.swizzle = SWIZZLE_NOOP; 2035 2036 this->result.index += offset; 2037} 2038 2039/** 2040 * We want to be careful in assignment setup to hit the actual storage 2041 * instead of potentially using a temporary like we might with the 2042 * ir_dereference handler. 2043 */ 2044static st_dst_reg 2045get_assignment_lhs(ir_dereference *ir, glsl_to_tgsi_visitor *v) 2046{ 2047 /* The LHS must be a dereference. If the LHS is a variable indexed array 2048 * access of a vector, it must be separated into a series conditional moves 2049 * before reaching this point (see ir_vec_index_to_cond_assign). 2050 */ 2051 assert(ir->as_dereference()); 2052 ir_dereference_array *deref_array = ir->as_dereference_array(); 2053 if (deref_array) { 2054 assert(!deref_array->array->type->is_vector()); 2055 } 2056 2057 /* Use the rvalue deref handler for the most part. We'll ignore 2058 * swizzles in it and write swizzles using writemask, though. 2059 */ 2060 ir->accept(v); 2061 return st_dst_reg(v->result); 2062} 2063 2064/** 2065 * Process the condition of a conditional assignment 2066 * 2067 * Examines the condition of a conditional assignment to generate the optimal 2068 * first operand of a \c CMP instruction. If the condition is a relational 2069 * operator with 0 (e.g., \c ir_binop_less), the value being compared will be 2070 * used as the source for the \c CMP instruction. Otherwise the comparison 2071 * is processed to a boolean result, and the boolean result is used as the 2072 * operand to the CMP instruction. 2073 */ 2074bool 2075glsl_to_tgsi_visitor::process_move_condition(ir_rvalue *ir) 2076{ 2077 ir_rvalue *src_ir = ir; 2078 bool negate = true; 2079 bool switch_order = false; 2080 2081 ir_expression *const expr = ir->as_expression(); 2082 if ((expr != NULL) && (expr->get_num_operands() == 2)) { 2083 bool zero_on_left = false; 2084 2085 if (expr->operands[0]->is_zero()) { 2086 src_ir = expr->operands[1]; 2087 zero_on_left = true; 2088 } else if (expr->operands[1]->is_zero()) { 2089 src_ir = expr->operands[0]; 2090 zero_on_left = false; 2091 } 2092 2093 /* a is - 0 + - 0 + 2094 * (a < 0) T F F ( a < 0) T F F 2095 * (0 < a) F F T (-a < 0) F F T 2096 * (a <= 0) T T F (-a < 0) F F T (swap order of other operands) 2097 * (0 <= a) F T T ( a < 0) T F F (swap order of other operands) 2098 * (a > 0) F F T (-a < 0) F F T 2099 * (0 > a) T F F ( a < 0) T F F 2100 * (a >= 0) F T T ( a < 0) T F F (swap order of other operands) 2101 * (0 >= a) T T F (-a < 0) F F T (swap order of other operands) 2102 * 2103 * Note that exchanging the order of 0 and 'a' in the comparison simply 2104 * means that the value of 'a' should be negated. 2105 */ 2106 if (src_ir != ir) { 2107 switch (expr->operation) { 2108 case ir_binop_less: 2109 switch_order = false; 2110 negate = zero_on_left; 2111 break; 2112 2113 case ir_binop_greater: 2114 switch_order = false; 2115 negate = !zero_on_left; 2116 break; 2117 2118 case ir_binop_lequal: 2119 switch_order = true; 2120 negate = !zero_on_left; 2121 break; 2122 2123 case ir_binop_gequal: 2124 switch_order = true; 2125 negate = zero_on_left; 2126 break; 2127 2128 default: 2129 /* This isn't the right kind of comparison afterall, so make sure 2130 * the whole condition is visited. 2131 */ 2132 src_ir = ir; 2133 break; 2134 } 2135 } 2136 } 2137 2138 src_ir->accept(this); 2139 2140 /* We use the TGSI_OPCODE_CMP (a < 0 ? b : c) for conditional moves, and the 2141 * condition we produced is 0.0 or 1.0. By flipping the sign, we can 2142 * choose which value TGSI_OPCODE_CMP produces without an extra instruction 2143 * computing the condition. 2144 */ 2145 if (negate) 2146 this->result.negate = ~this->result.negate; 2147 2148 return switch_order; 2149} 2150 2151void 2152glsl_to_tgsi_visitor::visit(ir_assignment *ir) 2153{ 2154 st_dst_reg l; 2155 st_src_reg r; 2156 int i; 2157 2158 ir->rhs->accept(this); 2159 r = this->result; 2160 2161 l = get_assignment_lhs(ir->lhs, this); 2162 2163 /* FINISHME: This should really set to the correct maximal writemask for each 2164 * FINISHME: component written (in the loops below). This case can only 2165 * FINISHME: occur for matrices, arrays, and structures. 2166 */ 2167 if (ir->write_mask == 0) { 2168 assert(!ir->lhs->type->is_scalar() && !ir->lhs->type->is_vector()); 2169 l.writemask = WRITEMASK_XYZW; 2170 } else if (ir->lhs->type->is_scalar() && 2171 ir->lhs->variable_referenced()->mode == ir_var_out) { 2172 /* FINISHME: This hack makes writing to gl_FragDepth, which lives in the 2173 * FINISHME: W component of fragment shader output zero, work correctly. 2174 */ 2175 l.writemask = WRITEMASK_XYZW; 2176 } else { 2177 int swizzles[4]; 2178 int first_enabled_chan = 0; 2179 int rhs_chan = 0; 2180 2181 l.writemask = ir->write_mask; 2182 2183 for (int i = 0; i < 4; i++) { 2184 if (l.writemask & (1 << i)) { 2185 first_enabled_chan = GET_SWZ(r.swizzle, i); 2186 break; 2187 } 2188 } 2189 2190 /* Swizzle a small RHS vector into the channels being written. 2191 * 2192 * glsl ir treats write_mask as dictating how many channels are 2193 * present on the RHS while TGSI treats write_mask as just 2194 * showing which channels of the vec4 RHS get written. 2195 */ 2196 for (int i = 0; i < 4; i++) { 2197 if (l.writemask & (1 << i)) 2198 swizzles[i] = GET_SWZ(r.swizzle, rhs_chan++); 2199 else 2200 swizzles[i] = first_enabled_chan; 2201 } 2202 r.swizzle = MAKE_SWIZZLE4(swizzles[0], swizzles[1], 2203 swizzles[2], swizzles[3]); 2204 } 2205 2206 assert(l.file != PROGRAM_UNDEFINED); 2207 assert(r.file != PROGRAM_UNDEFINED); 2208 2209 if (ir->condition) { 2210 const bool switch_order = this->process_move_condition(ir->condition); 2211 st_src_reg condition = this->result; 2212 2213 for (i = 0; i < type_size(ir->lhs->type); i++) { 2214 st_src_reg l_src = st_src_reg(l); 2215 st_src_reg condition_temp = condition; 2216 l_src.swizzle = swizzle_for_size(ir->lhs->type->vector_elements); 2217 2218 if (native_integers) { 2219 /* This is necessary because TGSI's CMP instruction expects the 2220 * condition to be a float, and we store booleans as integers. 2221 * If TGSI had a UCMP instruction or similar, this extra 2222 * instruction would not be necessary. 2223 */ 2224 condition_temp = get_temp(glsl_type::vec4_type); 2225 condition.negate = 0; 2226 emit(ir, TGSI_OPCODE_I2F, st_dst_reg(condition_temp), condition); 2227 condition_temp.swizzle = condition.swizzle; 2228 } 2229 2230 if (switch_order) { 2231 emit(ir, TGSI_OPCODE_CMP, l, condition_temp, l_src, r); 2232 } else { 2233 emit(ir, TGSI_OPCODE_CMP, l, condition_temp, r, l_src); 2234 } 2235 2236 l.index++; 2237 r.index++; 2238 } 2239 } else if (ir->rhs->as_expression() && 2240 this->instructions.get_tail() && 2241 ir->rhs == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->ir && 2242 type_size(ir->lhs->type) == 1 && 2243 l.writemask == ((glsl_to_tgsi_instruction *)this->instructions.get_tail())->dst.writemask) { 2244 /* To avoid emitting an extra MOV when assigning an expression to a 2245 * variable, emit the last instruction of the expression again, but 2246 * replace the destination register with the target of the assignment. 2247 * Dead code elimination will remove the original instruction. 2248 */ 2249 glsl_to_tgsi_instruction *inst, *new_inst; 2250 inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 2251 new_inst = emit(ir, inst->op, l, inst->src[0], inst->src[1], inst->src[2]); 2252 new_inst->saturate = inst->saturate; 2253 inst->dead_mask = inst->dst.writemask; 2254 } else { 2255 for (i = 0; i < type_size(ir->lhs->type); i++) { 2256 emit(ir, TGSI_OPCODE_MOV, l, r); 2257 l.index++; 2258 r.index++; 2259 } 2260 } 2261} 2262 2263 2264void 2265glsl_to_tgsi_visitor::visit(ir_constant *ir) 2266{ 2267 st_src_reg src; 2268 GLfloat stack_vals[4] = { 0 }; 2269 gl_constant_value *values = (gl_constant_value *) stack_vals; 2270 GLenum gl_type = GL_NONE; 2271 unsigned int i; 2272 static int in_array = 0; 2273 gl_register_file file = in_array ? PROGRAM_CONSTANT : PROGRAM_IMMEDIATE; 2274 2275 /* Unfortunately, 4 floats is all we can get into 2276 * _mesa_add_typed_unnamed_constant. So, make a temp to store an 2277 * aggregate constant and move each constant value into it. If we 2278 * get lucky, copy propagation will eliminate the extra moves. 2279 */ 2280 if (ir->type->base_type == GLSL_TYPE_STRUCT) { 2281 st_src_reg temp_base = get_temp(ir->type); 2282 st_dst_reg temp = st_dst_reg(temp_base); 2283 2284 foreach_iter(exec_list_iterator, iter, ir->components) { 2285 ir_constant *field_value = (ir_constant *)iter.get(); 2286 int size = type_size(field_value->type); 2287 2288 assert(size > 0); 2289 2290 field_value->accept(this); 2291 src = this->result; 2292 2293 for (i = 0; i < (unsigned int)size; i++) { 2294 emit(ir, TGSI_OPCODE_MOV, temp, src); 2295 2296 src.index++; 2297 temp.index++; 2298 } 2299 } 2300 this->result = temp_base; 2301 return; 2302 } 2303 2304 if (ir->type->is_array()) { 2305 st_src_reg temp_base = get_temp(ir->type); 2306 st_dst_reg temp = st_dst_reg(temp_base); 2307 int size = type_size(ir->type->fields.array); 2308 2309 assert(size > 0); 2310 in_array++; 2311 2312 for (i = 0; i < ir->type->length; i++) { 2313 ir->array_elements[i]->accept(this); 2314 src = this->result; 2315 for (int j = 0; j < size; j++) { 2316 emit(ir, TGSI_OPCODE_MOV, temp, src); 2317 2318 src.index++; 2319 temp.index++; 2320 } 2321 } 2322 this->result = temp_base; 2323 in_array--; 2324 return; 2325 } 2326 2327 if (ir->type->is_matrix()) { 2328 st_src_reg mat = get_temp(ir->type); 2329 st_dst_reg mat_column = st_dst_reg(mat); 2330 2331 for (i = 0; i < ir->type->matrix_columns; i++) { 2332 assert(ir->type->base_type == GLSL_TYPE_FLOAT); 2333 values = (gl_constant_value *) &ir->value.f[i * ir->type->vector_elements]; 2334 2335 src = st_src_reg(file, -1, ir->type->base_type); 2336 src.index = add_constant(file, 2337 values, 2338 ir->type->vector_elements, 2339 GL_FLOAT, 2340 &src.swizzle); 2341 emit(ir, TGSI_OPCODE_MOV, mat_column, src); 2342 2343 mat_column.index++; 2344 } 2345 2346 this->result = mat; 2347 return; 2348 } 2349 2350 switch (ir->type->base_type) { 2351 case GLSL_TYPE_FLOAT: 2352 gl_type = GL_FLOAT; 2353 for (i = 0; i < ir->type->vector_elements; i++) { 2354 values[i].f = ir->value.f[i]; 2355 } 2356 break; 2357 case GLSL_TYPE_UINT: 2358 gl_type = native_integers ? GL_UNSIGNED_INT : GL_FLOAT; 2359 for (i = 0; i < ir->type->vector_elements; i++) { 2360 if (native_integers) 2361 values[i].u = ir->value.u[i]; 2362 else 2363 values[i].f = ir->value.u[i]; 2364 } 2365 break; 2366 case GLSL_TYPE_INT: 2367 gl_type = native_integers ? GL_INT : GL_FLOAT; 2368 for (i = 0; i < ir->type->vector_elements; i++) { 2369 if (native_integers) 2370 values[i].i = ir->value.i[i]; 2371 else 2372 values[i].f = ir->value.i[i]; 2373 } 2374 break; 2375 case GLSL_TYPE_BOOL: 2376 gl_type = native_integers ? GL_BOOL : GL_FLOAT; 2377 for (i = 0; i < ir->type->vector_elements; i++) { 2378 if (native_integers) 2379 values[i].u = ir->value.b[i] ? ~0 : 0; 2380 else 2381 values[i].f = ir->value.b[i]; 2382 } 2383 break; 2384 default: 2385 assert(!"Non-float/uint/int/bool constant"); 2386 } 2387 2388 this->result = st_src_reg(file, -1, ir->type); 2389 this->result.index = add_constant(file, 2390 values, 2391 ir->type->vector_elements, 2392 gl_type, 2393 &this->result.swizzle); 2394} 2395 2396function_entry * 2397glsl_to_tgsi_visitor::get_function_signature(ir_function_signature *sig) 2398{ 2399 function_entry *entry; 2400 2401 foreach_iter(exec_list_iterator, iter, this->function_signatures) { 2402 entry = (function_entry *)iter.get(); 2403 2404 if (entry->sig == sig) 2405 return entry; 2406 } 2407 2408 entry = ralloc(mem_ctx, function_entry); 2409 entry->sig = sig; 2410 entry->sig_id = this->next_signature_id++; 2411 entry->bgn_inst = NULL; 2412 2413 /* Allocate storage for all the parameters. */ 2414 foreach_iter(exec_list_iterator, iter, sig->parameters) { 2415 ir_variable *param = (ir_variable *)iter.get(); 2416 variable_storage *storage; 2417 2418 storage = find_variable_storage(param); 2419 assert(!storage); 2420 2421 storage = new(mem_ctx) variable_storage(param, PROGRAM_TEMPORARY, 2422 this->next_temp); 2423 this->variables.push_tail(storage); 2424 2425 this->next_temp += type_size(param->type); 2426 } 2427 2428 if (!sig->return_type->is_void()) { 2429 entry->return_reg = get_temp(sig->return_type); 2430 } else { 2431 entry->return_reg = undef_src; 2432 } 2433 2434 this->function_signatures.push_tail(entry); 2435 return entry; 2436} 2437 2438void 2439glsl_to_tgsi_visitor::visit(ir_call *ir) 2440{ 2441 glsl_to_tgsi_instruction *call_inst; 2442 ir_function_signature *sig = ir->get_callee(); 2443 function_entry *entry = get_function_signature(sig); 2444 int i; 2445 2446 /* Process in parameters. */ 2447 exec_list_iterator sig_iter = sig->parameters.iterator(); 2448 foreach_iter(exec_list_iterator, iter, *ir) { 2449 ir_rvalue *param_rval = (ir_rvalue *)iter.get(); 2450 ir_variable *param = (ir_variable *)sig_iter.get(); 2451 2452 if (param->mode == ir_var_in || 2453 param->mode == ir_var_inout) { 2454 variable_storage *storage = find_variable_storage(param); 2455 assert(storage); 2456 2457 param_rval->accept(this); 2458 st_src_reg r = this->result; 2459 2460 st_dst_reg l; 2461 l.file = storage->file; 2462 l.index = storage->index; 2463 l.reladdr = NULL; 2464 l.writemask = WRITEMASK_XYZW; 2465 l.cond_mask = COND_TR; 2466 2467 for (i = 0; i < type_size(param->type); i++) { 2468 emit(ir, TGSI_OPCODE_MOV, l, r); 2469 l.index++; 2470 r.index++; 2471 } 2472 } 2473 2474 sig_iter.next(); 2475 } 2476 assert(!sig_iter.has_next()); 2477 2478 /* Emit call instruction */ 2479 call_inst = emit(ir, TGSI_OPCODE_CAL); 2480 call_inst->function = entry; 2481 2482 /* Process out parameters. */ 2483 sig_iter = sig->parameters.iterator(); 2484 foreach_iter(exec_list_iterator, iter, *ir) { 2485 ir_rvalue *param_rval = (ir_rvalue *)iter.get(); 2486 ir_variable *param = (ir_variable *)sig_iter.get(); 2487 2488 if (param->mode == ir_var_out || 2489 param->mode == ir_var_inout) { 2490 variable_storage *storage = find_variable_storage(param); 2491 assert(storage); 2492 2493 st_src_reg r; 2494 r.file = storage->file; 2495 r.index = storage->index; 2496 r.reladdr = NULL; 2497 r.swizzle = SWIZZLE_NOOP; 2498 r.negate = 0; 2499 2500 param_rval->accept(this); 2501 st_dst_reg l = st_dst_reg(this->result); 2502 2503 for (i = 0; i < type_size(param->type); i++) { 2504 emit(ir, TGSI_OPCODE_MOV, l, r); 2505 l.index++; 2506 r.index++; 2507 } 2508 } 2509 2510 sig_iter.next(); 2511 } 2512 assert(!sig_iter.has_next()); 2513 2514 /* Process return value. */ 2515 this->result = entry->return_reg; 2516} 2517 2518void 2519glsl_to_tgsi_visitor::visit(ir_texture *ir) 2520{ 2521 st_src_reg result_src, coord, lod_info, projector, dx, dy, offset; 2522 st_dst_reg result_dst, coord_dst; 2523 glsl_to_tgsi_instruction *inst = NULL; 2524 unsigned opcode = TGSI_OPCODE_NOP; 2525 2526 if (ir->coordinate) { 2527 ir->coordinate->accept(this); 2528 2529 /* Put our coords in a temp. We'll need to modify them for shadow, 2530 * projection, or LOD, so the only case we'd use it as is is if 2531 * we're doing plain old texturing. The optimization passes on 2532 * glsl_to_tgsi_visitor should handle cleaning up our mess in that case. 2533 */ 2534 coord = get_temp(glsl_type::vec4_type); 2535 coord_dst = st_dst_reg(coord); 2536 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result); 2537 } 2538 2539 if (ir->projector) { 2540 ir->projector->accept(this); 2541 projector = this->result; 2542 } 2543 2544 /* Storage for our result. Ideally for an assignment we'd be using 2545 * the actual storage for the result here, instead. 2546 */ 2547 result_src = get_temp(glsl_type::vec4_type); 2548 result_dst = st_dst_reg(result_src); 2549 2550 switch (ir->op) { 2551 case ir_tex: 2552 opcode = TGSI_OPCODE_TEX; 2553 break; 2554 case ir_txb: 2555 opcode = TGSI_OPCODE_TXB; 2556 ir->lod_info.bias->accept(this); 2557 lod_info = this->result; 2558 break; 2559 case ir_txl: 2560 opcode = TGSI_OPCODE_TXL; 2561 ir->lod_info.lod->accept(this); 2562 lod_info = this->result; 2563 break; 2564 case ir_txd: 2565 opcode = TGSI_OPCODE_TXD; 2566 ir->lod_info.grad.dPdx->accept(this); 2567 dx = this->result; 2568 ir->lod_info.grad.dPdy->accept(this); 2569 dy = this->result; 2570 break; 2571 case ir_txs: 2572 opcode = TGSI_OPCODE_TXQ; 2573 ir->lod_info.lod->accept(this); 2574 lod_info = this->result; 2575 break; 2576 case ir_txf: 2577 opcode = TGSI_OPCODE_TXF; 2578 ir->lod_info.lod->accept(this); 2579 lod_info = this->result; 2580 if (ir->offset) { 2581 ir->offset->accept(this); 2582 offset = this->result; 2583 } 2584 break; 2585 } 2586 2587 const glsl_type *sampler_type = ir->sampler->type; 2588 2589 if (ir->projector) { 2590 if (opcode == TGSI_OPCODE_TEX) { 2591 /* Slot the projector in as the last component of the coord. */ 2592 coord_dst.writemask = WRITEMASK_W; 2593 emit(ir, TGSI_OPCODE_MOV, coord_dst, projector); 2594 coord_dst.writemask = WRITEMASK_XYZW; 2595 opcode = TGSI_OPCODE_TXP; 2596 } else { 2597 st_src_reg coord_w = coord; 2598 coord_w.swizzle = SWIZZLE_WWWW; 2599 2600 /* For the other TEX opcodes there's no projective version 2601 * since the last slot is taken up by LOD info. Do the 2602 * projective divide now. 2603 */ 2604 coord_dst.writemask = WRITEMASK_W; 2605 emit(ir, TGSI_OPCODE_RCP, coord_dst, projector); 2606 2607 /* In the case where we have to project the coordinates "by hand," 2608 * the shadow comparator value must also be projected. 2609 */ 2610 st_src_reg tmp_src = coord; 2611 if (ir->shadow_comparitor) { 2612 /* Slot the shadow value in as the second to last component of the 2613 * coord. 2614 */ 2615 ir->shadow_comparitor->accept(this); 2616 2617 tmp_src = get_temp(glsl_type::vec4_type); 2618 st_dst_reg tmp_dst = st_dst_reg(tmp_src); 2619 2620 /* Projective division not allowed for array samplers. */ 2621 assert(!sampler_type->sampler_array); 2622 2623 tmp_dst.writemask = WRITEMASK_Z; 2624 emit(ir, TGSI_OPCODE_MOV, tmp_dst, this->result); 2625 2626 tmp_dst.writemask = WRITEMASK_XY; 2627 emit(ir, TGSI_OPCODE_MOV, tmp_dst, coord); 2628 } 2629 2630 coord_dst.writemask = WRITEMASK_XYZ; 2631 emit(ir, TGSI_OPCODE_MUL, coord_dst, tmp_src, coord_w); 2632 2633 coord_dst.writemask = WRITEMASK_XYZW; 2634 coord.swizzle = SWIZZLE_XYZW; 2635 } 2636 } 2637 2638 /* If projection is done and the opcode is not TGSI_OPCODE_TXP, then the shadow 2639 * comparator was put in the correct place (and projected) by the code, 2640 * above, that handles by-hand projection. 2641 */ 2642 if (ir->shadow_comparitor && (!ir->projector || opcode == TGSI_OPCODE_TXP)) { 2643 /* Slot the shadow value in as the second to last component of the 2644 * coord. 2645 */ 2646 ir->shadow_comparitor->accept(this); 2647 2648 /* XXX This will need to be updated for cubemap array samplers. */ 2649 if ((sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_2D && 2650 sampler_type->sampler_array) || 2651 sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE) { 2652 coord_dst.writemask = WRITEMASK_W; 2653 } else { 2654 coord_dst.writemask = WRITEMASK_Z; 2655 } 2656 2657 emit(ir, TGSI_OPCODE_MOV, coord_dst, this->result); 2658 coord_dst.writemask = WRITEMASK_XYZW; 2659 } 2660 2661 if (opcode == TGSI_OPCODE_TXL || opcode == TGSI_OPCODE_TXB || 2662 opcode == TGSI_OPCODE_TXF) { 2663 /* TGSI stores LOD or LOD bias in the last channel of the coords. */ 2664 coord_dst.writemask = WRITEMASK_W; 2665 emit(ir, TGSI_OPCODE_MOV, coord_dst, lod_info); 2666 coord_dst.writemask = WRITEMASK_XYZW; 2667 } 2668 2669 if (opcode == TGSI_OPCODE_TXD) 2670 inst = emit(ir, opcode, result_dst, coord, dx, dy); 2671 else if (opcode == TGSI_OPCODE_TXQ) 2672 inst = emit(ir, opcode, result_dst, lod_info); 2673 else if (opcode == TGSI_OPCODE_TXF) { 2674 inst = emit(ir, opcode, result_dst, coord); 2675 } else 2676 inst = emit(ir, opcode, result_dst, coord); 2677 2678 if (ir->shadow_comparitor) 2679 inst->tex_shadow = GL_TRUE; 2680 2681 inst->sampler = _mesa_get_sampler_uniform_value(ir->sampler, 2682 this->shader_program, 2683 this->prog); 2684 2685 if (ir->offset) { 2686 inst->tex_offset_num_offset = 1; 2687 inst->tex_offsets[0].Index = offset.index; 2688 inst->tex_offsets[0].File = offset.file; 2689 inst->tex_offsets[0].SwizzleX = GET_SWZ(offset.swizzle, 0); 2690 inst->tex_offsets[0].SwizzleY = GET_SWZ(offset.swizzle, 1); 2691 inst->tex_offsets[0].SwizzleZ = GET_SWZ(offset.swizzle, 2); 2692 } 2693 2694 switch (sampler_type->sampler_dimensionality) { 2695 case GLSL_SAMPLER_DIM_1D: 2696 inst->tex_target = (sampler_type->sampler_array) 2697 ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX; 2698 break; 2699 case GLSL_SAMPLER_DIM_2D: 2700 inst->tex_target = (sampler_type->sampler_array) 2701 ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX; 2702 break; 2703 case GLSL_SAMPLER_DIM_3D: 2704 inst->tex_target = TEXTURE_3D_INDEX; 2705 break; 2706 case GLSL_SAMPLER_DIM_CUBE: 2707 inst->tex_target = TEXTURE_CUBE_INDEX; 2708 break; 2709 case GLSL_SAMPLER_DIM_RECT: 2710 inst->tex_target = TEXTURE_RECT_INDEX; 2711 break; 2712 case GLSL_SAMPLER_DIM_BUF: 2713 assert(!"FINISHME: Implement ARB_texture_buffer_object"); 2714 break; 2715 case GLSL_SAMPLER_DIM_EXTERNAL: 2716 inst->tex_target = TEXTURE_EXTERNAL_INDEX; 2717 break; 2718 default: 2719 assert(!"Should not get here."); 2720 } 2721 2722 this->result = result_src; 2723} 2724 2725void 2726glsl_to_tgsi_visitor::visit(ir_return *ir) 2727{ 2728 if (ir->get_value()) { 2729 st_dst_reg l; 2730 int i; 2731 2732 assert(current_function); 2733 2734 ir->get_value()->accept(this); 2735 st_src_reg r = this->result; 2736 2737 l = st_dst_reg(current_function->return_reg); 2738 2739 for (i = 0; i < type_size(current_function->sig->return_type); i++) { 2740 emit(ir, TGSI_OPCODE_MOV, l, r); 2741 l.index++; 2742 r.index++; 2743 } 2744 } 2745 2746 emit(ir, TGSI_OPCODE_RET); 2747} 2748 2749void 2750glsl_to_tgsi_visitor::visit(ir_discard *ir) 2751{ 2752 struct gl_fragment_program *fp = (struct gl_fragment_program *)this->prog; 2753 2754 if (ir->condition) { 2755 ir->condition->accept(this); 2756 this->result.negate = ~this->result.negate; 2757 emit(ir, TGSI_OPCODE_KIL, undef_dst, this->result); 2758 } else { 2759 emit(ir, TGSI_OPCODE_KILP); 2760 } 2761 2762 fp->UsesKill = GL_TRUE; 2763} 2764 2765void 2766glsl_to_tgsi_visitor::visit(ir_if *ir) 2767{ 2768 glsl_to_tgsi_instruction *cond_inst, *if_inst; 2769 glsl_to_tgsi_instruction *prev_inst; 2770 2771 prev_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 2772 2773 ir->condition->accept(this); 2774 assert(this->result.file != PROGRAM_UNDEFINED); 2775 2776 if (this->options->EmitCondCodes) { 2777 cond_inst = (glsl_to_tgsi_instruction *)this->instructions.get_tail(); 2778 2779 /* See if we actually generated any instruction for generating 2780 * the condition. If not, then cook up a move to a temp so we 2781 * have something to set cond_update on. 2782 */ 2783 if (cond_inst == prev_inst) { 2784 st_src_reg temp = get_temp(glsl_type::bool_type); 2785 cond_inst = emit(ir->condition, TGSI_OPCODE_MOV, st_dst_reg(temp), result); 2786 } 2787 cond_inst->cond_update = GL_TRUE; 2788 2789 if_inst = emit(ir->condition, TGSI_OPCODE_IF); 2790 if_inst->dst.cond_mask = COND_NE; 2791 } else { 2792 if_inst = emit(ir->condition, TGSI_OPCODE_IF, undef_dst, this->result); 2793 } 2794 2795 this->instructions.push_tail(if_inst); 2796 2797 visit_exec_list(&ir->then_instructions, this); 2798 2799 if (!ir->else_instructions.is_empty()) { 2800 emit(ir->condition, TGSI_OPCODE_ELSE); 2801 visit_exec_list(&ir->else_instructions, this); 2802 } 2803 2804 if_inst = emit(ir->condition, TGSI_OPCODE_ENDIF); 2805} 2806 2807glsl_to_tgsi_visitor::glsl_to_tgsi_visitor() 2808{ 2809 result.file = PROGRAM_UNDEFINED; 2810 next_temp = 1; 2811 next_signature_id = 1; 2812 num_immediates = 0; 2813 current_function = NULL; 2814 num_address_regs = 0; 2815 indirect_addr_temps = false; 2816 indirect_addr_consts = false; 2817 mem_ctx = ralloc_context(NULL); 2818} 2819 2820glsl_to_tgsi_visitor::~glsl_to_tgsi_visitor() 2821{ 2822 ralloc_free(mem_ctx); 2823} 2824 2825extern "C" void free_glsl_to_tgsi_visitor(glsl_to_tgsi_visitor *v) 2826{ 2827 delete v; 2828} 2829 2830 2831/** 2832 * Count resources used by the given gpu program (number of texture 2833 * samplers, etc). 2834 */ 2835static void 2836count_resources(glsl_to_tgsi_visitor *v, gl_program *prog) 2837{ 2838 v->samplers_used = 0; 2839 2840 foreach_iter(exec_list_iterator, iter, v->instructions) { 2841 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2842 2843 if (is_tex_instruction(inst->op)) { 2844 v->samplers_used |= 1 << inst->sampler; 2845 2846 if (inst->tex_shadow) { 2847 prog->ShadowSamplers |= 1 << inst->sampler; 2848 } 2849 } 2850 } 2851 2852 prog->SamplersUsed = v->samplers_used; 2853 2854 if (v->shader_program != NULL) 2855 _mesa_update_shader_textures_used(v->shader_program, prog); 2856} 2857 2858static void 2859set_uniform_initializer(struct gl_context *ctx, void *mem_ctx, 2860 struct gl_shader_program *shader_program, 2861 const char *name, const glsl_type *type, 2862 ir_constant *val) 2863{ 2864 if (type->is_record()) { 2865 ir_constant *field_constant; 2866 2867 field_constant = (ir_constant *)val->components.get_head(); 2868 2869 for (unsigned int i = 0; i < type->length; i++) { 2870 const glsl_type *field_type = type->fields.structure[i].type; 2871 const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name, 2872 type->fields.structure[i].name); 2873 set_uniform_initializer(ctx, mem_ctx, shader_program, field_name, 2874 field_type, field_constant); 2875 field_constant = (ir_constant *)field_constant->next; 2876 } 2877 return; 2878 } 2879 2880 int loc = _mesa_get_uniform_location(ctx, shader_program, name); 2881 2882 if (loc == -1) { 2883 fail_link(shader_program, 2884 "Couldn't find uniform for initializer %s\n", name); 2885 return; 2886 } 2887 2888 for (unsigned int i = 0; i < (type->is_array() ? type->length : 1); i++) { 2889 ir_constant *element; 2890 const glsl_type *element_type; 2891 if (type->is_array()) { 2892 element = val->array_elements[i]; 2893 element_type = type->fields.array; 2894 } else { 2895 element = val; 2896 element_type = type; 2897 } 2898 2899 void *values; 2900 2901 if (element_type->base_type == GLSL_TYPE_BOOL) { 2902 int *conv = ralloc_array(mem_ctx, int, element_type->components()); 2903 for (unsigned int j = 0; j < element_type->components(); j++) { 2904 conv[j] = element->value.b[j]; 2905 } 2906 values = (void *)conv; 2907 element_type = glsl_type::get_instance(GLSL_TYPE_INT, 2908 element_type->vector_elements, 2909 1); 2910 } else { 2911 values = &element->value; 2912 } 2913 2914 if (element_type->is_matrix()) { 2915 _mesa_uniform_matrix(ctx, shader_program, 2916 element_type->matrix_columns, 2917 element_type->vector_elements, 2918 loc, 1, GL_FALSE, (GLfloat *)values); 2919 } else { 2920 _mesa_uniform(ctx, shader_program, loc, element_type->matrix_columns, 2921 values, element_type->gl_type); 2922 } 2923 2924 loc++; 2925 } 2926} 2927 2928/** 2929 * Returns the mask of channels (bitmask of WRITEMASK_X,Y,Z,W) which 2930 * are read from the given src in this instruction 2931 */ 2932static int 2933get_src_arg_mask(st_dst_reg dst, st_src_reg src) 2934{ 2935 int read_mask = 0, comp; 2936 2937 /* Now, given the src swizzle and the written channels, find which 2938 * components are actually read 2939 */ 2940 for (comp = 0; comp < 4; ++comp) { 2941 const unsigned coord = GET_SWZ(src.swizzle, comp); 2942 ASSERT(coord < 4); 2943 if (dst.writemask & (1 << comp) && coord <= SWIZZLE_W) 2944 read_mask |= 1 << coord; 2945 } 2946 2947 return read_mask; 2948} 2949 2950/** 2951 * This pass replaces CMP T0, T1 T2 T0 with MOV T0, T2 when the CMP 2952 * instruction is the first instruction to write to register T0. There are 2953 * several lowering passes done in GLSL IR (e.g. branches and 2954 * relative addressing) that create a large number of conditional assignments 2955 * that ir_to_mesa converts to CMP instructions like the one mentioned above. 2956 * 2957 * Here is why this conversion is safe: 2958 * CMP T0, T1 T2 T0 can be expanded to: 2959 * if (T1 < 0.0) 2960 * MOV T0, T2; 2961 * else 2962 * MOV T0, T0; 2963 * 2964 * If (T1 < 0.0) evaluates to true then our replacement MOV T0, T2 is the same 2965 * as the original program. If (T1 < 0.0) evaluates to false, executing 2966 * MOV T0, T0 will store a garbage value in T0 since T0 is uninitialized. 2967 * Therefore, it doesn't matter that we are replacing MOV T0, T0 with MOV T0, T2 2968 * because any instruction that was going to read from T0 after this was going 2969 * to read a garbage value anyway. 2970 */ 2971void 2972glsl_to_tgsi_visitor::simplify_cmp(void) 2973{ 2974 unsigned *tempWrites; 2975 unsigned outputWrites[MAX_PROGRAM_OUTPUTS]; 2976 2977 tempWrites = new unsigned[MAX_TEMPS]; 2978 if (!tempWrites) { 2979 return; 2980 } 2981 memset(tempWrites, 0, sizeof(tempWrites)); 2982 memset(outputWrites, 0, sizeof(outputWrites)); 2983 2984 foreach_iter(exec_list_iterator, iter, this->instructions) { 2985 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 2986 unsigned prevWriteMask = 0; 2987 2988 /* Give up if we encounter relative addressing or flow control. */ 2989 if (inst->dst.reladdr || 2990 tgsi_get_opcode_info(inst->op)->is_branch || 2991 inst->op == TGSI_OPCODE_BGNSUB || 2992 inst->op == TGSI_OPCODE_CONT || 2993 inst->op == TGSI_OPCODE_END || 2994 inst->op == TGSI_OPCODE_ENDSUB || 2995 inst->op == TGSI_OPCODE_RET) { 2996 break; 2997 } 2998 2999 if (inst->dst.file == PROGRAM_OUTPUT) { 3000 assert(inst->dst.index < MAX_PROGRAM_OUTPUTS); 3001 prevWriteMask = outputWrites[inst->dst.index]; 3002 outputWrites[inst->dst.index] |= inst->dst.writemask; 3003 } else if (inst->dst.file == PROGRAM_TEMPORARY) { 3004 assert(inst->dst.index < MAX_TEMPS); 3005 prevWriteMask = tempWrites[inst->dst.index]; 3006 tempWrites[inst->dst.index] |= inst->dst.writemask; 3007 } 3008 3009 /* For a CMP to be considered a conditional write, the destination 3010 * register and source register two must be the same. */ 3011 if (inst->op == TGSI_OPCODE_CMP 3012 && !(inst->dst.writemask & prevWriteMask) 3013 && inst->src[2].file == inst->dst.file 3014 && inst->src[2].index == inst->dst.index 3015 && inst->dst.writemask == get_src_arg_mask(inst->dst, inst->src[2])) { 3016 3017 inst->op = TGSI_OPCODE_MOV; 3018 inst->src[0] = inst->src[1]; 3019 } 3020 } 3021 3022 delete [] tempWrites; 3023} 3024 3025/* Replaces all references to a temporary register index with another index. */ 3026void 3027glsl_to_tgsi_visitor::rename_temp_register(int index, int new_index) 3028{ 3029 foreach_iter(exec_list_iterator, iter, this->instructions) { 3030 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3031 unsigned j; 3032 3033 for (j=0; j < num_inst_src_regs(inst->op); j++) { 3034 if (inst->src[j].file == PROGRAM_TEMPORARY && 3035 inst->src[j].index == index) { 3036 inst->src[j].index = new_index; 3037 } 3038 } 3039 3040 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) { 3041 inst->dst.index = new_index; 3042 } 3043 } 3044} 3045 3046int 3047glsl_to_tgsi_visitor::get_first_temp_read(int index) 3048{ 3049 int depth = 0; /* loop depth */ 3050 int loop_start = -1; /* index of the first active BGNLOOP (if any) */ 3051 unsigned i = 0, j; 3052 3053 foreach_iter(exec_list_iterator, iter, this->instructions) { 3054 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3055 3056 for (j=0; j < num_inst_src_regs(inst->op); j++) { 3057 if (inst->src[j].file == PROGRAM_TEMPORARY && 3058 inst->src[j].index == index) { 3059 return (depth == 0) ? i : loop_start; 3060 } 3061 } 3062 3063 if (inst->op == TGSI_OPCODE_BGNLOOP) { 3064 if(depth++ == 0) 3065 loop_start = i; 3066 } else if (inst->op == TGSI_OPCODE_ENDLOOP) { 3067 if (--depth == 0) 3068 loop_start = -1; 3069 } 3070 assert(depth >= 0); 3071 3072 i++; 3073 } 3074 3075 return -1; 3076} 3077 3078int 3079glsl_to_tgsi_visitor::get_first_temp_write(int index) 3080{ 3081 int depth = 0; /* loop depth */ 3082 int loop_start = -1; /* index of the first active BGNLOOP (if any) */ 3083 int i = 0; 3084 3085 foreach_iter(exec_list_iterator, iter, this->instructions) { 3086 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3087 3088 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) { 3089 return (depth == 0) ? i : loop_start; 3090 } 3091 3092 if (inst->op == TGSI_OPCODE_BGNLOOP) { 3093 if(depth++ == 0) 3094 loop_start = i; 3095 } else if (inst->op == TGSI_OPCODE_ENDLOOP) { 3096 if (--depth == 0) 3097 loop_start = -1; 3098 } 3099 assert(depth >= 0); 3100 3101 i++; 3102 } 3103 3104 return -1; 3105} 3106 3107int 3108glsl_to_tgsi_visitor::get_last_temp_read(int index) 3109{ 3110 int depth = 0; /* loop depth */ 3111 int last = -1; /* index of last instruction that reads the temporary */ 3112 unsigned i = 0, j; 3113 3114 foreach_iter(exec_list_iterator, iter, this->instructions) { 3115 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3116 3117 for (j=0; j < num_inst_src_regs(inst->op); j++) { 3118 if (inst->src[j].file == PROGRAM_TEMPORARY && 3119 inst->src[j].index == index) { 3120 last = (depth == 0) ? i : -2; 3121 } 3122 } 3123 3124 if (inst->op == TGSI_OPCODE_BGNLOOP) 3125 depth++; 3126 else if (inst->op == TGSI_OPCODE_ENDLOOP) 3127 if (--depth == 0 && last == -2) 3128 last = i; 3129 assert(depth >= 0); 3130 3131 i++; 3132 } 3133 3134 assert(last >= -1); 3135 return last; 3136} 3137 3138int 3139glsl_to_tgsi_visitor::get_last_temp_write(int index) 3140{ 3141 int depth = 0; /* loop depth */ 3142 int last = -1; /* index of last instruction that writes to the temporary */ 3143 int i = 0; 3144 3145 foreach_iter(exec_list_iterator, iter, this->instructions) { 3146 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3147 3148 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == index) 3149 last = (depth == 0) ? i : -2; 3150 3151 if (inst->op == TGSI_OPCODE_BGNLOOP) 3152 depth++; 3153 else if (inst->op == TGSI_OPCODE_ENDLOOP) 3154 if (--depth == 0 && last == -2) 3155 last = i; 3156 assert(depth >= 0); 3157 3158 i++; 3159 } 3160 3161 assert(last >= -1); 3162 return last; 3163} 3164 3165/* 3166 * On a basic block basis, tracks available PROGRAM_TEMPORARY register 3167 * channels for copy propagation and updates following instructions to 3168 * use the original versions. 3169 * 3170 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3171 * will occur. As an example, a TXP production before this pass: 3172 * 3173 * 0: MOV TEMP[1], INPUT[4].xyyy; 3174 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3175 * 2: TXP TEMP[2], TEMP[1], texture[0], 2D; 3176 * 3177 * and after: 3178 * 3179 * 0: MOV TEMP[1], INPUT[4].xyyy; 3180 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3181 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3182 * 3183 * which allows for dead code elimination on TEMP[1]'s writes. 3184 */ 3185void 3186glsl_to_tgsi_visitor::copy_propagate(void) 3187{ 3188 glsl_to_tgsi_instruction **acp = rzalloc_array(mem_ctx, 3189 glsl_to_tgsi_instruction *, 3190 this->next_temp * 4); 3191 int *acp_level = rzalloc_array(mem_ctx, int, this->next_temp * 4); 3192 int level = 0; 3193 3194 foreach_iter(exec_list_iterator, iter, this->instructions) { 3195 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3196 3197 assert(inst->dst.file != PROGRAM_TEMPORARY 3198 || inst->dst.index < this->next_temp); 3199 3200 /* First, do any copy propagation possible into the src regs. */ 3201 for (int r = 0; r < 3; r++) { 3202 glsl_to_tgsi_instruction *first = NULL; 3203 bool good = true; 3204 int acp_base = inst->src[r].index * 4; 3205 3206 if (inst->src[r].file != PROGRAM_TEMPORARY || 3207 inst->src[r].reladdr) 3208 continue; 3209 3210 /* See if we can find entries in the ACP consisting of MOVs 3211 * from the same src register for all the swizzled channels 3212 * of this src register reference. 3213 */ 3214 for (int i = 0; i < 4; i++) { 3215 int src_chan = GET_SWZ(inst->src[r].swizzle, i); 3216 glsl_to_tgsi_instruction *copy_chan = acp[acp_base + src_chan]; 3217 3218 if (!copy_chan) { 3219 good = false; 3220 break; 3221 } 3222 3223 assert(acp_level[acp_base + src_chan] <= level); 3224 3225 if (!first) { 3226 first = copy_chan; 3227 } else { 3228 if (first->src[0].file != copy_chan->src[0].file || 3229 first->src[0].index != copy_chan->src[0].index) { 3230 good = false; 3231 break; 3232 } 3233 } 3234 } 3235 3236 if (good) { 3237 /* We've now validated that we can copy-propagate to 3238 * replace this src register reference. Do it. 3239 */ 3240 inst->src[r].file = first->src[0].file; 3241 inst->src[r].index = first->src[0].index; 3242 3243 int swizzle = 0; 3244 for (int i = 0; i < 4; i++) { 3245 int src_chan = GET_SWZ(inst->src[r].swizzle, i); 3246 glsl_to_tgsi_instruction *copy_inst = acp[acp_base + src_chan]; 3247 swizzle |= (GET_SWZ(copy_inst->src[0].swizzle, src_chan) << 3248 (3 * i)); 3249 } 3250 inst->src[r].swizzle = swizzle; 3251 } 3252 } 3253 3254 switch (inst->op) { 3255 case TGSI_OPCODE_BGNLOOP: 3256 case TGSI_OPCODE_ENDLOOP: 3257 /* End of a basic block, clear the ACP entirely. */ 3258 memset(acp, 0, sizeof(*acp) * this->next_temp * 4); 3259 break; 3260 3261 case TGSI_OPCODE_IF: 3262 ++level; 3263 break; 3264 3265 case TGSI_OPCODE_ENDIF: 3266 case TGSI_OPCODE_ELSE: 3267 /* Clear all channels written inside the block from the ACP, but 3268 * leaving those that were not touched. 3269 */ 3270 for (int r = 0; r < this->next_temp; r++) { 3271 for (int c = 0; c < 4; c++) { 3272 if (!acp[4 * r + c]) 3273 continue; 3274 3275 if (acp_level[4 * r + c] >= level) 3276 acp[4 * r + c] = NULL; 3277 } 3278 } 3279 if (inst->op == TGSI_OPCODE_ENDIF) 3280 --level; 3281 break; 3282 3283 default: 3284 /* Continuing the block, clear any written channels from 3285 * the ACP. 3286 */ 3287 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.reladdr) { 3288 /* Any temporary might be written, so no copy propagation 3289 * across this instruction. 3290 */ 3291 memset(acp, 0, sizeof(*acp) * this->next_temp * 4); 3292 } else if (inst->dst.file == PROGRAM_OUTPUT && 3293 inst->dst.reladdr) { 3294 /* Any output might be written, so no copy propagation 3295 * from outputs across this instruction. 3296 */ 3297 for (int r = 0; r < this->next_temp; r++) { 3298 for (int c = 0; c < 4; c++) { 3299 if (!acp[4 * r + c]) 3300 continue; 3301 3302 if (acp[4 * r + c]->src[0].file == PROGRAM_OUTPUT) 3303 acp[4 * r + c] = NULL; 3304 } 3305 } 3306 } else if (inst->dst.file == PROGRAM_TEMPORARY || 3307 inst->dst.file == PROGRAM_OUTPUT) { 3308 /* Clear where it's used as dst. */ 3309 if (inst->dst.file == PROGRAM_TEMPORARY) { 3310 for (int c = 0; c < 4; c++) { 3311 if (inst->dst.writemask & (1 << c)) { 3312 acp[4 * inst->dst.index + c] = NULL; 3313 } 3314 } 3315 } 3316 3317 /* Clear where it's used as src. */ 3318 for (int r = 0; r < this->next_temp; r++) { 3319 for (int c = 0; c < 4; c++) { 3320 if (!acp[4 * r + c]) 3321 continue; 3322 3323 int src_chan = GET_SWZ(acp[4 * r + c]->src[0].swizzle, c); 3324 3325 if (acp[4 * r + c]->src[0].file == inst->dst.file && 3326 acp[4 * r + c]->src[0].index == inst->dst.index && 3327 inst->dst.writemask & (1 << src_chan)) 3328 { 3329 acp[4 * r + c] = NULL; 3330 } 3331 } 3332 } 3333 } 3334 break; 3335 } 3336 3337 /* If this is a copy, add it to the ACP. */ 3338 if (inst->op == TGSI_OPCODE_MOV && 3339 inst->dst.file == PROGRAM_TEMPORARY && 3340 !inst->dst.reladdr && 3341 !inst->saturate && 3342 !inst->src[0].reladdr && 3343 !inst->src[0].negate) { 3344 for (int i = 0; i < 4; i++) { 3345 if (inst->dst.writemask & (1 << i)) { 3346 acp[4 * inst->dst.index + i] = inst; 3347 acp_level[4 * inst->dst.index + i] = level; 3348 } 3349 } 3350 } 3351 } 3352 3353 ralloc_free(acp_level); 3354 ralloc_free(acp); 3355} 3356 3357/* 3358 * Tracks available PROGRAM_TEMPORARY registers for dead code elimination. 3359 * 3360 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3361 * will occur. As an example, a TXP production after copy propagation but 3362 * before this pass: 3363 * 3364 * 0: MOV TEMP[1], INPUT[4].xyyy; 3365 * 1: MOV TEMP[1].w, INPUT[4].wwww; 3366 * 2: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3367 * 3368 * and after this pass: 3369 * 3370 * 0: TXP TEMP[2], INPUT[4].xyyw, texture[0], 2D; 3371 * 3372 * FIXME: assumes that all functions are inlined (no support for BGNSUB/ENDSUB) 3373 * FIXME: doesn't eliminate all dead code inside of loops; it steps around them 3374 */ 3375void 3376glsl_to_tgsi_visitor::eliminate_dead_code(void) 3377{ 3378 int i; 3379 3380 for (i=0; i < this->next_temp; i++) { 3381 int last_read = get_last_temp_read(i); 3382 int j = 0; 3383 3384 foreach_iter(exec_list_iterator, iter, this->instructions) { 3385 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3386 3387 if (inst->dst.file == PROGRAM_TEMPORARY && inst->dst.index == i && 3388 j > last_read) 3389 { 3390 iter.remove(); 3391 delete inst; 3392 } 3393 3394 j++; 3395 } 3396 } 3397} 3398 3399/* 3400 * On a basic block basis, tracks available PROGRAM_TEMPORARY registers for dead 3401 * code elimination. This is less primitive than eliminate_dead_code(), as it 3402 * is per-channel and can detect consecutive writes without a read between them 3403 * as dead code. However, there is some dead code that can be eliminated by 3404 * eliminate_dead_code() but not this function - for example, this function 3405 * cannot eliminate an instruction writing to a register that is never read and 3406 * is the only instruction writing to that register. 3407 * 3408 * The glsl_to_tgsi_visitor lazily produces code assuming that this pass 3409 * will occur. 3410 */ 3411int 3412glsl_to_tgsi_visitor::eliminate_dead_code_advanced(void) 3413{ 3414 glsl_to_tgsi_instruction **writes = rzalloc_array(mem_ctx, 3415 glsl_to_tgsi_instruction *, 3416 this->next_temp * 4); 3417 int *write_level = rzalloc_array(mem_ctx, int, this->next_temp * 4); 3418 int level = 0; 3419 int removed = 0; 3420 3421 foreach_iter(exec_list_iterator, iter, this->instructions) { 3422 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3423 3424 assert(inst->dst.file != PROGRAM_TEMPORARY 3425 || inst->dst.index < this->next_temp); 3426 3427 switch (inst->op) { 3428 case TGSI_OPCODE_BGNLOOP: 3429 case TGSI_OPCODE_ENDLOOP: 3430 case TGSI_OPCODE_CONT: 3431 case TGSI_OPCODE_BRK: 3432 /* End of a basic block, clear the write array entirely. 3433 * 3434 * This keeps us from killing dead code when the writes are 3435 * on either side of a loop, even when the register isn't touched 3436 * inside the loop. However, glsl_to_tgsi_visitor doesn't seem to emit 3437 * dead code of this type, so it shouldn't make a difference as long as 3438 * the dead code elimination pass in the GLSL compiler does its job. 3439 */ 3440 memset(writes, 0, sizeof(*writes) * this->next_temp * 4); 3441 break; 3442 3443 case TGSI_OPCODE_ENDIF: 3444 case TGSI_OPCODE_ELSE: 3445 /* Promote the recorded level of all channels written inside the 3446 * preceding if or else block to the level above the if/else block. 3447 */ 3448 for (int r = 0; r < this->next_temp; r++) { 3449 for (int c = 0; c < 4; c++) { 3450 if (!writes[4 * r + c]) 3451 continue; 3452 3453 if (write_level[4 * r + c] == level) 3454 write_level[4 * r + c] = level-1; 3455 } 3456 } 3457 3458 if(inst->op == TGSI_OPCODE_ENDIF) 3459 --level; 3460 3461 break; 3462 3463 case TGSI_OPCODE_IF: 3464 ++level; 3465 /* fallthrough to default case to mark the condition as read */ 3466 3467 default: 3468 /* Continuing the block, clear any channels from the write array that 3469 * are read by this instruction. 3470 */ 3471 for (unsigned i = 0; i < Elements(inst->src); i++) { 3472 if (inst->src[i].file == PROGRAM_TEMPORARY && inst->src[i].reladdr){ 3473 /* Any temporary might be read, so no dead code elimination 3474 * across this instruction. 3475 */ 3476 memset(writes, 0, sizeof(*writes) * this->next_temp * 4); 3477 } else if (inst->src[i].file == PROGRAM_TEMPORARY) { 3478 /* Clear where it's used as src. */ 3479 int src_chans = 1 << GET_SWZ(inst->src[i].swizzle, 0); 3480 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 1); 3481 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 2); 3482 src_chans |= 1 << GET_SWZ(inst->src[i].swizzle, 3); 3483 3484 for (int c = 0; c < 4; c++) { 3485 if (src_chans & (1 << c)) { 3486 writes[4 * inst->src[i].index + c] = NULL; 3487 } 3488 } 3489 } 3490 } 3491 break; 3492 } 3493 3494 /* If this instruction writes to a temporary, add it to the write array. 3495 * If there is already an instruction in the write array for one or more 3496 * of the channels, flag that channel write as dead. 3497 */ 3498 if (inst->dst.file == PROGRAM_TEMPORARY && 3499 !inst->dst.reladdr && 3500 !inst->saturate) { 3501 for (int c = 0; c < 4; c++) { 3502 if (inst->dst.writemask & (1 << c)) { 3503 if (writes[4 * inst->dst.index + c]) { 3504 if (write_level[4 * inst->dst.index + c] < level) 3505 continue; 3506 else 3507 writes[4 * inst->dst.index + c]->dead_mask |= (1 << c); 3508 } 3509 writes[4 * inst->dst.index + c] = inst; 3510 write_level[4 * inst->dst.index + c] = level; 3511 } 3512 } 3513 } 3514 } 3515 3516 /* Anything still in the write array at this point is dead code. */ 3517 for (int r = 0; r < this->next_temp; r++) { 3518 for (int c = 0; c < 4; c++) { 3519 glsl_to_tgsi_instruction *inst = writes[4 * r + c]; 3520 if (inst) 3521 inst->dead_mask |= (1 << c); 3522 } 3523 } 3524 3525 /* Now actually remove the instructions that are completely dead and update 3526 * the writemask of other instructions with dead channels. 3527 */ 3528 foreach_iter(exec_list_iterator, iter, this->instructions) { 3529 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3530 3531 if (!inst->dead_mask || !inst->dst.writemask) 3532 continue; 3533 else if ((inst->dst.writemask & ~inst->dead_mask) == 0) { 3534 iter.remove(); 3535 delete inst; 3536 removed++; 3537 } else 3538 inst->dst.writemask &= ~(inst->dead_mask); 3539 } 3540 3541 ralloc_free(write_level); 3542 ralloc_free(writes); 3543 3544 return removed; 3545} 3546 3547/* Merges temporary registers together where possible to reduce the number of 3548 * registers needed to run a program. 3549 * 3550 * Produces optimal code only after copy propagation and dead code elimination 3551 * have been run. */ 3552void 3553glsl_to_tgsi_visitor::merge_registers(void) 3554{ 3555 int *last_reads = rzalloc_array(mem_ctx, int, this->next_temp); 3556 int *first_writes = rzalloc_array(mem_ctx, int, this->next_temp); 3557 int i, j; 3558 3559 /* Read the indices of the last read and first write to each temp register 3560 * into an array so that we don't have to traverse the instruction list as 3561 * much. */ 3562 for (i=0; i < this->next_temp; i++) { 3563 last_reads[i] = get_last_temp_read(i); 3564 first_writes[i] = get_first_temp_write(i); 3565 } 3566 3567 /* Start looking for registers with non-overlapping usages that can be 3568 * merged together. */ 3569 for (i=0; i < this->next_temp; i++) { 3570 /* Don't touch unused registers. */ 3571 if (last_reads[i] < 0 || first_writes[i] < 0) continue; 3572 3573 for (j=0; j < this->next_temp; j++) { 3574 /* Don't touch unused registers. */ 3575 if (last_reads[j] < 0 || first_writes[j] < 0) continue; 3576 3577 /* We can merge the two registers if the first write to j is after or 3578 * in the same instruction as the last read from i. Note that the 3579 * register at index i will always be used earlier or at the same time 3580 * as the register at index j. */ 3581 if (first_writes[i] <= first_writes[j] && 3582 last_reads[i] <= first_writes[j]) 3583 { 3584 rename_temp_register(j, i); /* Replace all references to j with i.*/ 3585 3586 /* Update the first_writes and last_reads arrays with the new 3587 * values for the merged register index, and mark the newly unused 3588 * register index as such. */ 3589 last_reads[i] = last_reads[j]; 3590 first_writes[j] = -1; 3591 last_reads[j] = -1; 3592 } 3593 } 3594 } 3595 3596 ralloc_free(last_reads); 3597 ralloc_free(first_writes); 3598} 3599 3600/* Reassign indices to temporary registers by reusing unused indices created 3601 * by optimization passes. */ 3602void 3603glsl_to_tgsi_visitor::renumber_registers(void) 3604{ 3605 int i = 0; 3606 int new_index = 0; 3607 3608 for (i=0; i < this->next_temp; i++) { 3609 if (get_first_temp_read(i) < 0) continue; 3610 if (i != new_index) 3611 rename_temp_register(i, new_index); 3612 new_index++; 3613 } 3614 3615 this->next_temp = new_index; 3616} 3617 3618/** 3619 * Returns a fragment program which implements the current pixel transfer ops. 3620 * Based on get_pixel_transfer_program in st_atom_pixeltransfer.c. 3621 */ 3622extern "C" void 3623get_pixel_transfer_visitor(struct st_fragment_program *fp, 3624 glsl_to_tgsi_visitor *original, 3625 int scale_and_bias, int pixel_maps) 3626{ 3627 glsl_to_tgsi_visitor *v = new glsl_to_tgsi_visitor(); 3628 struct st_context *st = st_context(original->ctx); 3629 struct gl_program *prog = &fp->Base.Base; 3630 struct gl_program_parameter_list *params = _mesa_new_parameter_list(); 3631 st_src_reg coord, src0; 3632 st_dst_reg dst0; 3633 glsl_to_tgsi_instruction *inst; 3634 3635 /* Copy attributes of the glsl_to_tgsi_visitor in the original shader. */ 3636 v->ctx = original->ctx; 3637 v->prog = prog; 3638 v->shader_program = NULL; 3639 v->glsl_version = original->glsl_version; 3640 v->native_integers = original->native_integers; 3641 v->options = original->options; 3642 v->next_temp = original->next_temp; 3643 v->num_address_regs = original->num_address_regs; 3644 v->samplers_used = prog->SamplersUsed = original->samplers_used; 3645 v->indirect_addr_temps = original->indirect_addr_temps; 3646 v->indirect_addr_consts = original->indirect_addr_consts; 3647 memcpy(&v->immediates, &original->immediates, sizeof(v->immediates)); 3648 v->num_immediates = original->num_immediates; 3649 3650 /* 3651 * Get initial pixel color from the texture. 3652 * TEX colorTemp, fragment.texcoord[0], texture[0], 2D; 3653 */ 3654 coord = st_src_reg(PROGRAM_INPUT, FRAG_ATTRIB_TEX0, glsl_type::vec2_type); 3655 src0 = v->get_temp(glsl_type::vec4_type); 3656 dst0 = st_dst_reg(src0); 3657 inst = v->emit(NULL, TGSI_OPCODE_TEX, dst0, coord); 3658 inst->sampler = 0; 3659 inst->tex_target = TEXTURE_2D_INDEX; 3660 3661 prog->InputsRead |= FRAG_BIT_TEX0; 3662 prog->SamplersUsed |= (1 << 0); /* mark sampler 0 as used */ 3663 v->samplers_used |= (1 << 0); 3664 3665 if (scale_and_bias) { 3666 static const gl_state_index scale_state[STATE_LENGTH] = 3667 { STATE_INTERNAL, STATE_PT_SCALE, 3668 (gl_state_index) 0, (gl_state_index) 0, (gl_state_index) 0 }; 3669 static const gl_state_index bias_state[STATE_LENGTH] = 3670 { STATE_INTERNAL, STATE_PT_BIAS, 3671 (gl_state_index) 0, (gl_state_index) 0, (gl_state_index) 0 }; 3672 GLint scale_p, bias_p; 3673 st_src_reg scale, bias; 3674 3675 scale_p = _mesa_add_state_reference(params, scale_state); 3676 bias_p = _mesa_add_state_reference(params, bias_state); 3677 3678 /* MAD colorTemp, colorTemp, scale, bias; */ 3679 scale = st_src_reg(PROGRAM_STATE_VAR, scale_p, GLSL_TYPE_FLOAT); 3680 bias = st_src_reg(PROGRAM_STATE_VAR, bias_p, GLSL_TYPE_FLOAT); 3681 inst = v->emit(NULL, TGSI_OPCODE_MAD, dst0, src0, scale, bias); 3682 } 3683 3684 if (pixel_maps) { 3685 st_src_reg temp = v->get_temp(glsl_type::vec4_type); 3686 st_dst_reg temp_dst = st_dst_reg(temp); 3687 3688 assert(st->pixel_xfer.pixelmap_texture); 3689 3690 /* With a little effort, we can do four pixel map look-ups with 3691 * two TEX instructions: 3692 */ 3693 3694 /* TEX temp.rg, colorTemp.rgba, texture[1], 2D; */ 3695 temp_dst.writemask = WRITEMASK_XY; /* write R,G */ 3696 inst = v->emit(NULL, TGSI_OPCODE_TEX, temp_dst, src0); 3697 inst->sampler = 1; 3698 inst->tex_target = TEXTURE_2D_INDEX; 3699 3700 /* TEX temp.ba, colorTemp.baba, texture[1], 2D; */ 3701 src0.swizzle = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W); 3702 temp_dst.writemask = WRITEMASK_ZW; /* write B,A */ 3703 inst = v->emit(NULL, TGSI_OPCODE_TEX, temp_dst, src0); 3704 inst->sampler = 1; 3705 inst->tex_target = TEXTURE_2D_INDEX; 3706 3707 prog->SamplersUsed |= (1 << 1); /* mark sampler 1 as used */ 3708 v->samplers_used |= (1 << 1); 3709 3710 /* MOV colorTemp, temp; */ 3711 inst = v->emit(NULL, TGSI_OPCODE_MOV, dst0, temp); 3712 } 3713 3714 /* Now copy the instructions from the original glsl_to_tgsi_visitor into the 3715 * new visitor. */ 3716 foreach_iter(exec_list_iterator, iter, original->instructions) { 3717 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3718 glsl_to_tgsi_instruction *newinst; 3719 st_src_reg src_regs[3]; 3720 3721 if (inst->dst.file == PROGRAM_OUTPUT) 3722 prog->OutputsWritten |= BITFIELD64_BIT(inst->dst.index); 3723 3724 for (int i=0; i<3; i++) { 3725 src_regs[i] = inst->src[i]; 3726 if (src_regs[i].file == PROGRAM_INPUT && 3727 src_regs[i].index == FRAG_ATTRIB_COL0) 3728 { 3729 src_regs[i].file = PROGRAM_TEMPORARY; 3730 src_regs[i].index = src0.index; 3731 } 3732 else if (src_regs[i].file == PROGRAM_INPUT) 3733 prog->InputsRead |= BITFIELD64_BIT(src_regs[i].index); 3734 } 3735 3736 newinst = v->emit(NULL, inst->op, inst->dst, src_regs[0], src_regs[1], src_regs[2]); 3737 newinst->tex_target = inst->tex_target; 3738 } 3739 3740 /* Make modifications to fragment program info. */ 3741 prog->Parameters = _mesa_combine_parameter_lists(params, 3742 original->prog->Parameters); 3743 _mesa_free_parameter_list(params); 3744 count_resources(v, prog); 3745 fp->glsl_to_tgsi = v; 3746} 3747 3748/** 3749 * Make fragment program for glBitmap: 3750 * Sample the texture and kill the fragment if the bit is 0. 3751 * This program will be combined with the user's fragment program. 3752 * 3753 * Based on make_bitmap_fragment_program in st_cb_bitmap.c. 3754 */ 3755extern "C" void 3756get_bitmap_visitor(struct st_fragment_program *fp, 3757 glsl_to_tgsi_visitor *original, int samplerIndex) 3758{ 3759 glsl_to_tgsi_visitor *v = new glsl_to_tgsi_visitor(); 3760 struct st_context *st = st_context(original->ctx); 3761 struct gl_program *prog = &fp->Base.Base; 3762 st_src_reg coord, src0; 3763 st_dst_reg dst0; 3764 glsl_to_tgsi_instruction *inst; 3765 3766 /* Copy attributes of the glsl_to_tgsi_visitor in the original shader. */ 3767 v->ctx = original->ctx; 3768 v->prog = prog; 3769 v->shader_program = NULL; 3770 v->glsl_version = original->glsl_version; 3771 v->native_integers = original->native_integers; 3772 v->options = original->options; 3773 v->next_temp = original->next_temp; 3774 v->num_address_regs = original->num_address_regs; 3775 v->samplers_used = prog->SamplersUsed = original->samplers_used; 3776 v->indirect_addr_temps = original->indirect_addr_temps; 3777 v->indirect_addr_consts = original->indirect_addr_consts; 3778 memcpy(&v->immediates, &original->immediates, sizeof(v->immediates)); 3779 v->num_immediates = original->num_immediates; 3780 3781 /* TEX tmp0, fragment.texcoord[0], texture[0], 2D; */ 3782 coord = st_src_reg(PROGRAM_INPUT, FRAG_ATTRIB_TEX0, glsl_type::vec2_type); 3783 src0 = v->get_temp(glsl_type::vec4_type); 3784 dst0 = st_dst_reg(src0); 3785 inst = v->emit(NULL, TGSI_OPCODE_TEX, dst0, coord); 3786 inst->sampler = samplerIndex; 3787 inst->tex_target = TEXTURE_2D_INDEX; 3788 3789 prog->InputsRead |= FRAG_BIT_TEX0; 3790 prog->SamplersUsed |= (1 << samplerIndex); /* mark sampler as used */ 3791 v->samplers_used |= (1 << samplerIndex); 3792 3793 /* KIL if -tmp0 < 0 # texel=0 -> keep / texel=0 -> discard */ 3794 src0.negate = NEGATE_XYZW; 3795 if (st->bitmap.tex_format == PIPE_FORMAT_L8_UNORM) 3796 src0.swizzle = SWIZZLE_XXXX; 3797 inst = v->emit(NULL, TGSI_OPCODE_KIL, undef_dst, src0); 3798 3799 /* Now copy the instructions from the original glsl_to_tgsi_visitor into the 3800 * new visitor. */ 3801 foreach_iter(exec_list_iterator, iter, original->instructions) { 3802 glsl_to_tgsi_instruction *inst = (glsl_to_tgsi_instruction *)iter.get(); 3803 glsl_to_tgsi_instruction *newinst; 3804 st_src_reg src_regs[3]; 3805 3806 if (inst->dst.file == PROGRAM_OUTPUT) 3807 prog->OutputsWritten |= BITFIELD64_BIT(inst->dst.index); 3808 3809 for (int i=0; i<3; i++) { 3810 src_regs[i] = inst->src[i]; 3811 if (src_regs[i].file == PROGRAM_INPUT) 3812 prog->InputsRead |= BITFIELD64_BIT(src_regs[i].index); 3813 } 3814 3815 newinst = v->emit(NULL, inst->op, inst->dst, src_regs[0], src_regs[1], src_regs[2]); 3816 newinst->tex_target = inst->tex_target; 3817 } 3818 3819 /* Make modifications to fragment program info. */ 3820 prog->Parameters = _mesa_clone_parameter_list(original->prog->Parameters); 3821 count_resources(v, prog); 3822 fp->glsl_to_tgsi = v; 3823} 3824 3825/* ------------------------- TGSI conversion stuff -------------------------- */ 3826struct label { 3827 unsigned branch_target; 3828 unsigned token; 3829}; 3830 3831/** 3832 * Intermediate state used during shader translation. 3833 */ 3834struct st_translate { 3835 struct ureg_program *ureg; 3836 3837 struct ureg_dst temps[MAX_TEMPS]; 3838 struct ureg_src *constants; 3839 struct ureg_src *immediates; 3840 struct ureg_dst outputs[PIPE_MAX_SHADER_OUTPUTS]; 3841 struct ureg_src inputs[PIPE_MAX_SHADER_INPUTS]; 3842 struct ureg_dst address[1]; 3843 struct ureg_src samplers[PIPE_MAX_SAMPLERS]; 3844 struct ureg_src systemValues[SYSTEM_VALUE_MAX]; 3845 3846 /* Extra info for handling point size clamping in vertex shader */ 3847 struct ureg_dst pointSizeResult; /**< Actual point size output register */ 3848 struct ureg_src pointSizeConst; /**< Point size range constant register */ 3849 GLint pointSizeOutIndex; /**< Temp point size output register */ 3850 GLboolean prevInstWrotePointSize; 3851 3852 const GLuint *inputMapping; 3853 const GLuint *outputMapping; 3854 3855 /* For every instruction that contains a label (eg CALL), keep 3856 * details so that we can go back afterwards and emit the correct 3857 * tgsi instruction number for each label. 3858 */ 3859 struct label *labels; 3860 unsigned labels_size; 3861 unsigned labels_count; 3862 3863 /* Keep a record of the tgsi instruction number that each mesa 3864 * instruction starts at, will be used to fix up labels after 3865 * translation. 3866 */ 3867 unsigned *insn; 3868 unsigned insn_size; 3869 unsigned insn_count; 3870 3871 unsigned procType; /**< TGSI_PROCESSOR_VERTEX/FRAGMENT */ 3872 3873 boolean error; 3874}; 3875 3876/** Map Mesa's SYSTEM_VALUE_x to TGSI_SEMANTIC_x */ 3877static unsigned mesa_sysval_to_semantic[SYSTEM_VALUE_MAX] = { 3878 TGSI_SEMANTIC_FACE, 3879 TGSI_SEMANTIC_VERTEXID, 3880 TGSI_SEMANTIC_INSTANCEID 3881}; 3882 3883/** 3884 * Make note of a branch to a label in the TGSI code. 3885 * After we've emitted all instructions, we'll go over the list 3886 * of labels built here and patch the TGSI code with the actual 3887 * location of each label. 3888 */ 3889static unsigned *get_label(struct st_translate *t, unsigned branch_target) 3890{ 3891 unsigned i; 3892 3893 if (t->labels_count + 1 >= t->labels_size) { 3894 t->labels_size = 1 << (util_logbase2(t->labels_size) + 1); 3895 t->labels = (struct label *)realloc(t->labels, 3896 t->labels_size * sizeof(struct label)); 3897 if (t->labels == NULL) { 3898 static unsigned dummy; 3899 t->error = TRUE; 3900 return &dummy; 3901 } 3902 } 3903 3904 i = t->labels_count++; 3905 t->labels[i].branch_target = branch_target; 3906 return &t->labels[i].token; 3907} 3908 3909/** 3910 * Called prior to emitting the TGSI code for each instruction. 3911 * Allocate additional space for instructions if needed. 3912 * Update the insn[] array so the next glsl_to_tgsi_instruction points to 3913 * the next TGSI instruction. 3914 */ 3915static void set_insn_start(struct st_translate *t, unsigned start) 3916{ 3917 if (t->insn_count + 1 >= t->insn_size) { 3918 t->insn_size = 1 << (util_logbase2(t->insn_size) + 1); 3919 t->insn = (unsigned *)realloc(t->insn, t->insn_size * sizeof(t->insn[0])); 3920 if (t->insn == NULL) { 3921 t->error = TRUE; 3922 return; 3923 } 3924 } 3925 3926 t->insn[t->insn_count++] = start; 3927} 3928 3929/** 3930 * Map a glsl_to_tgsi constant/immediate to a TGSI immediate. 3931 */ 3932static struct ureg_src 3933emit_immediate(struct st_translate *t, 3934 gl_constant_value values[4], 3935 int type, int size) 3936{ 3937 struct ureg_program *ureg = t->ureg; 3938 3939 switch(type) 3940 { 3941 case GL_FLOAT: 3942 return ureg_DECL_immediate(ureg, &values[0].f, size); 3943 case GL_INT: 3944 return ureg_DECL_immediate_int(ureg, &values[0].i, size); 3945 case GL_UNSIGNED_INT: 3946 case GL_BOOL: 3947 return ureg_DECL_immediate_uint(ureg, &values[0].u, size); 3948 default: 3949 assert(!"should not get here - type must be float, int, uint, or bool"); 3950 return ureg_src_undef(); 3951 } 3952} 3953 3954/** 3955 * Map a glsl_to_tgsi dst register to a TGSI ureg_dst register. 3956 */ 3957static struct ureg_dst 3958dst_register(struct st_translate *t, 3959 gl_register_file file, 3960 GLuint index) 3961{ 3962 switch(file) { 3963 case PROGRAM_UNDEFINED: 3964 return ureg_dst_undef(); 3965 3966 case PROGRAM_TEMPORARY: 3967 if (ureg_dst_is_undef(t->temps[index])) 3968 t->temps[index] = ureg_DECL_temporary(t->ureg); 3969 3970 return t->temps[index]; 3971 3972 case PROGRAM_OUTPUT: 3973 if (t->procType == TGSI_PROCESSOR_VERTEX && index == VERT_RESULT_PSIZ) 3974 t->prevInstWrotePointSize = GL_TRUE; 3975 3976 if (t->procType == TGSI_PROCESSOR_VERTEX) 3977 assert(index < VERT_RESULT_MAX); 3978 else if (t->procType == TGSI_PROCESSOR_FRAGMENT) 3979 assert(index < FRAG_RESULT_MAX); 3980 else 3981 assert(index < GEOM_RESULT_MAX); 3982 3983 assert(t->outputMapping[index] < Elements(t->outputs)); 3984 3985 return t->outputs[t->outputMapping[index]]; 3986 3987 case PROGRAM_ADDRESS: 3988 return t->address[index]; 3989 3990 default: 3991 assert(!"unknown dst register file"); 3992 return ureg_dst_undef(); 3993 } 3994} 3995 3996/** 3997 * Map a glsl_to_tgsi src register to a TGSI ureg_src register. 3998 */ 3999static struct ureg_src 4000src_register(struct st_translate *t, 4001 gl_register_file file, 4002 GLuint index) 4003{ 4004 switch(file) { 4005 case PROGRAM_UNDEFINED: 4006 return ureg_src_undef(); 4007 4008 case PROGRAM_TEMPORARY: 4009 assert(index >= 0); 4010 assert(index < Elements(t->temps)); 4011 if (ureg_dst_is_undef(t->temps[index])) 4012 t->temps[index] = ureg_DECL_temporary(t->ureg); 4013 return ureg_src(t->temps[index]); 4014 4015 case PROGRAM_NAMED_PARAM: 4016 case PROGRAM_ENV_PARAM: 4017 case PROGRAM_LOCAL_PARAM: 4018 case PROGRAM_UNIFORM: 4019 assert(index >= 0); 4020 return t->constants[index]; 4021 case PROGRAM_STATE_VAR: 4022 case PROGRAM_CONSTANT: /* ie, immediate */ 4023 if (index < 0) 4024 return ureg_DECL_constant(t->ureg, 0); 4025 else 4026 return t->constants[index]; 4027 4028 case PROGRAM_IMMEDIATE: 4029 return t->immediates[index]; 4030 4031 case PROGRAM_INPUT: 4032 assert(t->inputMapping[index] < Elements(t->inputs)); 4033 return t->inputs[t->inputMapping[index]]; 4034 4035 case PROGRAM_OUTPUT: 4036 assert(t->outputMapping[index] < Elements(t->outputs)); 4037 return ureg_src(t->outputs[t->outputMapping[index]]); /* not needed? */ 4038 4039 case PROGRAM_ADDRESS: 4040 return ureg_src(t->address[index]); 4041 4042 case PROGRAM_SYSTEM_VALUE: 4043 assert(index < Elements(t->systemValues)); 4044 return t->systemValues[index]; 4045 4046 default: 4047 assert(!"unknown src register file"); 4048 return ureg_src_undef(); 4049 } 4050} 4051 4052/** 4053 * Create a TGSI ureg_dst register from an st_dst_reg. 4054 */ 4055static struct ureg_dst 4056translate_dst(struct st_translate *t, 4057 const st_dst_reg *dst_reg, 4058 bool saturate) 4059{ 4060 struct ureg_dst dst = dst_register(t, 4061 dst_reg->file, 4062 dst_reg->index); 4063 4064 dst = ureg_writemask(dst, dst_reg->writemask); 4065 4066 if (saturate) 4067 dst = ureg_saturate(dst); 4068 4069 if (dst_reg->reladdr != NULL) 4070 dst = ureg_dst_indirect(dst, ureg_src(t->address[0])); 4071 4072 return dst; 4073} 4074 4075/** 4076 * Create a TGSI ureg_src register from an st_src_reg. 4077 */ 4078static struct ureg_src 4079translate_src(struct st_translate *t, const st_src_reg *src_reg) 4080{ 4081 struct ureg_src src = src_register(t, src_reg->file, src_reg->index); 4082 4083 src = ureg_swizzle(src, 4084 GET_SWZ(src_reg->swizzle, 0) & 0x3, 4085 GET_SWZ(src_reg->swizzle, 1) & 0x3, 4086 GET_SWZ(src_reg->swizzle, 2) & 0x3, 4087 GET_SWZ(src_reg->swizzle, 3) & 0x3); 4088 4089 if ((src_reg->negate & 0xf) == NEGATE_XYZW) 4090 src = ureg_negate(src); 4091 4092 if (src_reg->reladdr != NULL) { 4093 /* Normally ureg_src_indirect() would be used here, but a stupid compiler 4094 * bug in g++ makes ureg_src_indirect (an inline C function) erroneously 4095 * set the bit for src.Negate. So we have to do the operation manually 4096 * here to work around the compiler's problems. */ 4097 /*src = ureg_src_indirect(src, ureg_src(t->address[0]));*/ 4098 struct ureg_src addr = ureg_src(t->address[0]); 4099 src.Indirect = 1; 4100 src.IndirectFile = addr.File; 4101 src.IndirectIndex = addr.Index; 4102 src.IndirectSwizzle = addr.SwizzleX; 4103 4104 if (src_reg->file != PROGRAM_INPUT && 4105 src_reg->file != PROGRAM_OUTPUT) { 4106 /* If src_reg->index was negative, it was set to zero in 4107 * src_register(). Reassign it now. But don't do this 4108 * for input/output regs since they get remapped while 4109 * const buffers don't. 4110 */ 4111 src.Index = src_reg->index; 4112 } 4113 } 4114 4115 return src; 4116} 4117 4118static struct tgsi_texture_offset 4119translate_tex_offset(struct st_translate *t, 4120 const struct tgsi_texture_offset *in_offset) 4121{ 4122 struct tgsi_texture_offset offset; 4123 4124 assert(in_offset->File == PROGRAM_IMMEDIATE); 4125 4126 offset.File = TGSI_FILE_IMMEDIATE; 4127 offset.Index = in_offset->Index; 4128 offset.SwizzleX = in_offset->SwizzleX; 4129 offset.SwizzleY = in_offset->SwizzleY; 4130 offset.SwizzleZ = in_offset->SwizzleZ; 4131 4132 return offset; 4133} 4134 4135static void 4136compile_tgsi_instruction(struct st_translate *t, 4137 const glsl_to_tgsi_instruction *inst) 4138{ 4139 struct ureg_program *ureg = t->ureg; 4140 GLuint i; 4141 struct ureg_dst dst[1]; 4142 struct ureg_src src[4]; 4143 struct tgsi_texture_offset texoffsets[MAX_GLSL_TEXTURE_OFFSET]; 4144 4145 unsigned num_dst; 4146 unsigned num_src; 4147 4148 num_dst = num_inst_dst_regs(inst->op); 4149 num_src = num_inst_src_regs(inst->op); 4150 4151 if (num_dst) 4152 dst[0] = translate_dst(t, 4153 &inst->dst, 4154 inst->saturate); 4155 4156 for (i = 0; i < num_src; i++) 4157 src[i] = translate_src(t, &inst->src[i]); 4158 4159 switch(inst->op) { 4160 case TGSI_OPCODE_BGNLOOP: 4161 case TGSI_OPCODE_CAL: 4162 case TGSI_OPCODE_ELSE: 4163 case TGSI_OPCODE_ENDLOOP: 4164 case TGSI_OPCODE_IF: 4165 assert(num_dst == 0); 4166 ureg_label_insn(ureg, 4167 inst->op, 4168 src, num_src, 4169 get_label(t, 4170 inst->op == TGSI_OPCODE_CAL ? inst->function->sig_id : 0)); 4171 return; 4172 4173 case TGSI_OPCODE_TEX: 4174 case TGSI_OPCODE_TXB: 4175 case TGSI_OPCODE_TXD: 4176 case TGSI_OPCODE_TXL: 4177 case TGSI_OPCODE_TXP: 4178 case TGSI_OPCODE_TXQ: 4179 case TGSI_OPCODE_TXF: 4180 src[num_src++] = t->samplers[inst->sampler]; 4181 for (i = 0; i < inst->tex_offset_num_offset; i++) { 4182 texoffsets[i] = translate_tex_offset(t, &inst->tex_offsets[i]); 4183 } 4184 ureg_tex_insn(ureg, 4185 inst->op, 4186 dst, num_dst, 4187 st_translate_texture_target(inst->tex_target, inst->tex_shadow), 4188 texoffsets, inst->tex_offset_num_offset, 4189 src, num_src); 4190 return; 4191 4192 case TGSI_OPCODE_SCS: 4193 dst[0] = ureg_writemask(dst[0], TGSI_WRITEMASK_XY); 4194 ureg_insn(ureg, inst->op, dst, num_dst, src, num_src); 4195 break; 4196 4197 default: 4198 ureg_insn(ureg, 4199 inst->op, 4200 dst, num_dst, 4201 src, num_src); 4202 break; 4203 } 4204} 4205 4206/** 4207 * Emit the TGSI instructions for inverting and adjusting WPOS. 4208 * This code is unavoidable because it also depends on whether 4209 * a FBO is bound (STATE_FB_WPOS_Y_TRANSFORM). 4210 */ 4211static void 4212emit_wpos_adjustment( struct st_translate *t, 4213 const struct gl_program *program, 4214 boolean invert, 4215 GLfloat adjX, GLfloat adjY[2]) 4216{ 4217 struct ureg_program *ureg = t->ureg; 4218 4219 /* Fragment program uses fragment position input. 4220 * Need to replace instances of INPUT[WPOS] with temp T 4221 * where T = INPUT[WPOS] by y is inverted. 4222 */ 4223 static const gl_state_index wposTransformState[STATE_LENGTH] 4224 = { STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 4225 (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 }; 4226 4227 /* XXX: note we are modifying the incoming shader here! Need to 4228 * do this before emitting the constant decls below, or this 4229 * will be missed: 4230 */ 4231 unsigned wposTransConst = _mesa_add_state_reference(program->Parameters, 4232 wposTransformState); 4233 4234 struct ureg_src wpostrans = ureg_DECL_constant( ureg, wposTransConst ); 4235 struct ureg_dst wpos_temp = ureg_DECL_temporary( ureg ); 4236 struct ureg_src wpos_input = t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]]; 4237 4238 /* First, apply the coordinate shift: */ 4239 if (adjX || adjY[0] || adjY[1]) { 4240 if (adjY[0] != adjY[1]) { 4241 /* Adjust the y coordinate by adjY[1] or adjY[0] respectively 4242 * depending on whether inversion is actually going to be applied 4243 * or not, which is determined by testing against the inversion 4244 * state variable used below, which will be either +1 or -1. 4245 */ 4246 struct ureg_dst adj_temp = ureg_DECL_temporary(ureg); 4247 4248 ureg_CMP(ureg, adj_temp, 4249 ureg_scalar(wpostrans, invert ? 2 : 0), 4250 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f), 4251 ureg_imm4f(ureg, adjX, adjY[1], 0.0f, 0.0f)); 4252 ureg_ADD(ureg, wpos_temp, wpos_input, ureg_src(adj_temp)); 4253 } else { 4254 ureg_ADD(ureg, wpos_temp, wpos_input, 4255 ureg_imm4f(ureg, adjX, adjY[0], 0.0f, 0.0f)); 4256 } 4257 wpos_input = ureg_src(wpos_temp); 4258 } else { 4259 /* MOV wpos_temp, input[wpos] 4260 */ 4261 ureg_MOV( ureg, wpos_temp, wpos_input ); 4262 } 4263 4264 /* Now the conditional y flip: STATE_FB_WPOS_Y_TRANSFORM.xy/zw will be 4265 * inversion/identity, or the other way around if we're drawing to an FBO. 4266 */ 4267 if (invert) { 4268 /* MAD wpos_temp.y, wpos_input, wpostrans.xxxx, wpostrans.yyyy 4269 */ 4270 ureg_MAD( ureg, 4271 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ), 4272 wpos_input, 4273 ureg_scalar(wpostrans, 0), 4274 ureg_scalar(wpostrans, 1)); 4275 } else { 4276 /* MAD wpos_temp.y, wpos_input, wpostrans.zzzz, wpostrans.wwww 4277 */ 4278 ureg_MAD( ureg, 4279 ureg_writemask(wpos_temp, TGSI_WRITEMASK_Y ), 4280 wpos_input, 4281 ureg_scalar(wpostrans, 2), 4282 ureg_scalar(wpostrans, 3)); 4283 } 4284 4285 /* Use wpos_temp as position input from here on: 4286 */ 4287 t->inputs[t->inputMapping[FRAG_ATTRIB_WPOS]] = ureg_src(wpos_temp); 4288} 4289 4290 4291/** 4292 * Emit fragment position/ooordinate code. 4293 */ 4294static void 4295emit_wpos(struct st_context *st, 4296 struct st_translate *t, 4297 const struct gl_program *program, 4298 struct ureg_program *ureg) 4299{ 4300 const struct gl_fragment_program *fp = 4301 (const struct gl_fragment_program *) program; 4302 struct pipe_screen *pscreen = st->pipe->screen; 4303 GLfloat adjX = 0.0f; 4304 GLfloat adjY[2] = { 0.0f, 0.0f }; 4305 boolean invert = FALSE; 4306 4307 /* Query the pixel center conventions supported by the pipe driver and set 4308 * adjX, adjY to help out if it cannot handle the requested one internally. 4309 * 4310 * The bias of the y-coordinate depends on whether y-inversion takes place 4311 * (adjY[1]) or not (adjY[0]), which is in turn dependent on whether we are 4312 * drawing to an FBO (causes additional inversion), and whether the the pipe 4313 * driver origin and the requested origin differ (the latter condition is 4314 * stored in the 'invert' variable). 4315 * 4316 * For height = 100 (i = integer, h = half-integer, l = lower, u = upper): 4317 * 4318 * center shift only: 4319 * i -> h: +0.5 4320 * h -> i: -0.5 4321 * 4322 * inversion only: 4323 * l,i -> u,i: ( 0.0 + 1.0) * -1 + 100 = 99 4324 * l,h -> u,h: ( 0.5 + 0.0) * -1 + 100 = 99.5 4325 * u,i -> l,i: (99.0 + 1.0) * -1 + 100 = 0 4326 * u,h -> l,h: (99.5 + 0.0) * -1 + 100 = 0.5 4327 * 4328 * inversion and center shift: 4329 * l,i -> u,h: ( 0.0 + 0.5) * -1 + 100 = 99.5 4330 * l,h -> u,i: ( 0.5 + 0.5) * -1 + 100 = 99 4331 * u,i -> l,h: (99.0 + 0.5) * -1 + 100 = 0.5 4332 * u,h -> l,i: (99.5 + 0.5) * -1 + 100 = 0 4333 */ 4334 if (fp->OriginUpperLeft) { 4335 /* Fragment shader wants origin in upper-left */ 4336 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) { 4337 /* the driver supports upper-left origin */ 4338 } 4339 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) { 4340 /* the driver supports lower-left origin, need to invert Y */ 4341 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT); 4342 invert = TRUE; 4343 } 4344 else 4345 assert(0); 4346 } 4347 else { 4348 /* Fragment shader wants origin in lower-left */ 4349 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT)) 4350 /* the driver supports lower-left origin */ 4351 ureg_property_fs_coord_origin(ureg, TGSI_FS_COORD_ORIGIN_LOWER_LEFT); 4352 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT)) 4353 /* the driver supports upper-left origin, need to invert Y */ 4354 invert = TRUE; 4355 else 4356 assert(0); 4357 } 4358 4359 if (fp->PixelCenterInteger) { 4360 /* Fragment shader wants pixel center integer */ 4361 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) { 4362 /* the driver supports pixel center integer */ 4363 adjY[1] = 1.0f; 4364 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER); 4365 } 4366 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) { 4367 /* the driver supports pixel center half integer, need to bias X,Y */ 4368 adjX = -0.5f; 4369 adjY[0] = -0.5f; 4370 adjY[1] = 0.5f; 4371 } 4372 else 4373 assert(0); 4374 } 4375 else { 4376 /* Fragment shader wants pixel center half integer */ 4377 if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER)) { 4378 /* the driver supports pixel center half integer */ 4379 } 4380 else if (pscreen->get_param(pscreen, PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER)) { 4381 /* the driver supports pixel center integer, need to bias X,Y */ 4382 adjX = adjY[0] = adjY[1] = 0.5f; 4383 ureg_property_fs_coord_pixel_center(ureg, TGSI_FS_COORD_PIXEL_CENTER_INTEGER); 4384 } 4385 else 4386 assert(0); 4387 } 4388 4389 /* we invert after adjustment so that we avoid the MOV to temporary, 4390 * and reuse the adjustment ADD instead */ 4391 emit_wpos_adjustment(t, program, invert, adjX, adjY); 4392} 4393 4394/** 4395 * OpenGL's fragment gl_FrontFace input is 1 for front-facing, 0 for back. 4396 * TGSI uses +1 for front, -1 for back. 4397 * This function converts the TGSI value to the GL value. Simply clamping/ 4398 * saturating the value to [0,1] does the job. 4399 */ 4400static void 4401emit_face_var(struct st_translate *t) 4402{ 4403 struct ureg_program *ureg = t->ureg; 4404 struct ureg_dst face_temp = ureg_DECL_temporary(ureg); 4405 struct ureg_src face_input = t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]]; 4406 4407 /* MOV_SAT face_temp, input[face] */ 4408 face_temp = ureg_saturate(face_temp); 4409 ureg_MOV(ureg, face_temp, face_input); 4410 4411 /* Use face_temp as face input from here on: */ 4412 t->inputs[t->inputMapping[FRAG_ATTRIB_FACE]] = ureg_src(face_temp); 4413} 4414 4415static void 4416emit_edgeflags(struct st_translate *t) 4417{ 4418 struct ureg_program *ureg = t->ureg; 4419 struct ureg_dst edge_dst = t->outputs[t->outputMapping[VERT_RESULT_EDGE]]; 4420 struct ureg_src edge_src = t->inputs[t->inputMapping[VERT_ATTRIB_EDGEFLAG]]; 4421 4422 ureg_MOV(ureg, edge_dst, edge_src); 4423} 4424 4425/** 4426 * Translate intermediate IR (glsl_to_tgsi_instruction) to TGSI format. 4427 * \param program the program to translate 4428 * \param numInputs number of input registers used 4429 * \param inputMapping maps Mesa fragment program inputs to TGSI generic 4430 * input indexes 4431 * \param inputSemanticName the TGSI_SEMANTIC flag for each input 4432 * \param inputSemanticIndex the semantic index (ex: which texcoord) for 4433 * each input 4434 * \param interpMode the TGSI_INTERPOLATE_LINEAR/PERSP mode for each input 4435 * \param numOutputs number of output registers used 4436 * \param outputMapping maps Mesa fragment program outputs to TGSI 4437 * generic outputs 4438 * \param outputSemanticName the TGSI_SEMANTIC flag for each output 4439 * \param outputSemanticIndex the semantic index (ex: which texcoord) for 4440 * each output 4441 * 4442 * \return PIPE_OK or PIPE_ERROR_OUT_OF_MEMORY 4443 */ 4444extern "C" enum pipe_error 4445st_translate_program( 4446 struct gl_context *ctx, 4447 uint procType, 4448 struct ureg_program *ureg, 4449 glsl_to_tgsi_visitor *program, 4450 const struct gl_program *proginfo, 4451 GLuint numInputs, 4452 const GLuint inputMapping[], 4453 const ubyte inputSemanticName[], 4454 const ubyte inputSemanticIndex[], 4455 const GLuint interpMode[], 4456 GLuint numOutputs, 4457 const GLuint outputMapping[], 4458 const ubyte outputSemanticName[], 4459 const ubyte outputSemanticIndex[], 4460 boolean passthrough_edgeflags) 4461{ 4462 struct st_translate *t; 4463 unsigned i; 4464 enum pipe_error ret = PIPE_OK; 4465 4466 assert(numInputs <= Elements(t->inputs)); 4467 assert(numOutputs <= Elements(t->outputs)); 4468 4469 t = CALLOC_STRUCT(st_translate); 4470 if (!t) { 4471 ret = PIPE_ERROR_OUT_OF_MEMORY; 4472 goto out; 4473 } 4474 4475 memset(t, 0, sizeof *t); 4476 4477 t->procType = procType; 4478 t->inputMapping = inputMapping; 4479 t->outputMapping = outputMapping; 4480 t->ureg = ureg; 4481 t->pointSizeOutIndex = -1; 4482 t->prevInstWrotePointSize = GL_FALSE; 4483 4484 if (program->shader_program) { 4485 for (i = 0; i < program->shader_program->NumUserUniformStorage; i++) { 4486 struct gl_uniform_storage *const storage = 4487 &program->shader_program->UniformStorage[i]; 4488 4489 _mesa_uniform_detach_all_driver_storage(storage); 4490 } 4491 } 4492 4493 /* 4494 * Declare input attributes. 4495 */ 4496 if (procType == TGSI_PROCESSOR_FRAGMENT) { 4497 for (i = 0; i < numInputs; i++) { 4498 t->inputs[i] = ureg_DECL_fs_input(ureg, 4499 inputSemanticName[i], 4500 inputSemanticIndex[i], 4501 interpMode[i]); 4502 } 4503 4504 if (proginfo->InputsRead & FRAG_BIT_WPOS) { 4505 /* Must do this after setting up t->inputs, and before 4506 * emitting constant references, below: 4507 */ 4508 emit_wpos(st_context(ctx), t, proginfo, ureg); 4509 } 4510 4511 if (proginfo->InputsRead & FRAG_BIT_FACE) 4512 emit_face_var(t); 4513 4514 /* 4515 * Declare output attributes. 4516 */ 4517 for (i = 0; i < numOutputs; i++) { 4518 switch (outputSemanticName[i]) { 4519 case TGSI_SEMANTIC_POSITION: 4520 t->outputs[i] = ureg_DECL_output(ureg, 4521 TGSI_SEMANTIC_POSITION, /* Z/Depth */ 4522 outputSemanticIndex[i]); 4523 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Z); 4524 break; 4525 case TGSI_SEMANTIC_STENCIL: 4526 t->outputs[i] = ureg_DECL_output(ureg, 4527 TGSI_SEMANTIC_STENCIL, /* Stencil */ 4528 outputSemanticIndex[i]); 4529 t->outputs[i] = ureg_writemask(t->outputs[i], TGSI_WRITEMASK_Y); 4530 break; 4531 case TGSI_SEMANTIC_COLOR: 4532 t->outputs[i] = ureg_DECL_output(ureg, 4533 TGSI_SEMANTIC_COLOR, 4534 outputSemanticIndex[i]); 4535 break; 4536 default: 4537 assert(!"fragment shader outputs must be POSITION/STENCIL/COLOR"); 4538 ret = PIPE_ERROR_BAD_INPUT; 4539 goto out; 4540 } 4541 } 4542 } 4543 else if (procType == TGSI_PROCESSOR_GEOMETRY) { 4544 for (i = 0; i < numInputs; i++) { 4545 t->inputs[i] = ureg_DECL_gs_input(ureg, 4546 i, 4547 inputSemanticName[i], 4548 inputSemanticIndex[i]); 4549 } 4550 4551 for (i = 0; i < numOutputs; i++) { 4552 t->outputs[i] = ureg_DECL_output(ureg, 4553 outputSemanticName[i], 4554 outputSemanticIndex[i]); 4555 } 4556 } 4557 else { 4558 assert(procType == TGSI_PROCESSOR_VERTEX); 4559 4560 for (i = 0; i < numInputs; i++) { 4561 t->inputs[i] = ureg_DECL_vs_input(ureg, i); 4562 } 4563 4564 for (i = 0; i < numOutputs; i++) { 4565 if (outputSemanticName[i] == TGSI_SEMANTIC_CLIPDIST) { 4566 int mask = ((1 << (program->num_clip_distances - 4*outputSemanticIndex[i])) - 1) & TGSI_WRITEMASK_XYZW; 4567 t->outputs[i] = ureg_DECL_output_masked(ureg, 4568 outputSemanticName[i], 4569 outputSemanticIndex[i], 4570 mask); 4571 } else { 4572 t->outputs[i] = ureg_DECL_output(ureg, 4573 outputSemanticName[i], 4574 outputSemanticIndex[i]); 4575 } 4576 if ((outputSemanticName[i] == TGSI_SEMANTIC_PSIZE) && proginfo->Id) { 4577 /* Writing to the point size result register requires special 4578 * handling to implement clamping. 4579 */ 4580 static const gl_state_index pointSizeClampState[STATE_LENGTH] 4581 = { STATE_INTERNAL, STATE_POINT_SIZE_IMPL_CLAMP, (gl_state_index)0, (gl_state_index)0, (gl_state_index)0 }; 4582 /* XXX: note we are modifying the incoming shader here! Need to 4583 * do this before emitting the constant decls below, or this 4584 * will be missed. 4585 */ 4586 unsigned pointSizeClampConst = 4587 _mesa_add_state_reference(proginfo->Parameters, 4588 pointSizeClampState); 4589 struct ureg_dst psizregtemp = ureg_DECL_temporary(ureg); 4590 t->pointSizeConst = ureg_DECL_constant(ureg, pointSizeClampConst); 4591 t->pointSizeResult = t->outputs[i]; 4592 t->pointSizeOutIndex = i; 4593 t->outputs[i] = psizregtemp; 4594 } 4595 } 4596 if (passthrough_edgeflags) 4597 emit_edgeflags(t); 4598 } 4599 4600 /* Declare address register. 4601 */ 4602 if (program->num_address_regs > 0) { 4603 assert(program->num_address_regs == 1); 4604 t->address[0] = ureg_DECL_address(ureg); 4605 } 4606 4607 /* Declare misc input registers 4608 */ 4609 { 4610 GLbitfield sysInputs = proginfo->SystemValuesRead; 4611 unsigned numSys = 0; 4612 for (i = 0; sysInputs; i++) { 4613 if (sysInputs & (1 << i)) { 4614 unsigned semName = mesa_sysval_to_semantic[i]; 4615 t->systemValues[i] = ureg_DECL_system_value(ureg, numSys, semName, 0); 4616 numSys++; 4617 sysInputs &= ~(1 << i); 4618 } 4619 } 4620 } 4621 4622 if (program->indirect_addr_temps) { 4623 /* If temps are accessed with indirect addressing, declare temporaries 4624 * in sequential order. Else, we declare them on demand elsewhere. 4625 * (Note: the number of temporaries is equal to program->next_temp) 4626 */ 4627 for (i = 0; i < (unsigned)program->next_temp; i++) { 4628 /* XXX use TGSI_FILE_TEMPORARY_ARRAY when it's supported by ureg */ 4629 t->temps[i] = ureg_DECL_temporary(t->ureg); 4630 } 4631 } 4632 4633 /* Emit constants and uniforms. TGSI uses a single index space for these, 4634 * so we put all the translated regs in t->constants. 4635 */ 4636 if (proginfo->Parameters) { 4637 t->constants = (struct ureg_src *)CALLOC(proginfo->Parameters->NumParameters * sizeof(t->constants[0])); 4638 if (t->constants == NULL) { 4639 ret = PIPE_ERROR_OUT_OF_MEMORY; 4640 goto out; 4641 } 4642 4643 for (i = 0; i < proginfo->Parameters->NumParameters; i++) { 4644 switch (proginfo->Parameters->Parameters[i].Type) { 4645 case PROGRAM_ENV_PARAM: 4646 case PROGRAM_LOCAL_PARAM: 4647 case PROGRAM_STATE_VAR: 4648 case PROGRAM_NAMED_PARAM: 4649 case PROGRAM_UNIFORM: 4650 t->constants[i] = ureg_DECL_constant(ureg, i); 4651 break; 4652 4653 /* Emit immediates for PROGRAM_CONSTANT only when there's no indirect 4654 * addressing of the const buffer. 4655 * FIXME: Be smarter and recognize param arrays: 4656 * indirect addressing is only valid within the referenced 4657 * array. 4658 */ 4659 case PROGRAM_CONSTANT: 4660 if (program->indirect_addr_consts) 4661 t->constants[i] = ureg_DECL_constant(ureg, i); 4662 else 4663 t->constants[i] = emit_immediate(t, 4664 proginfo->Parameters->ParameterValues[i], 4665 proginfo->Parameters->Parameters[i].DataType, 4666 4); 4667 break; 4668 default: 4669 break; 4670 } 4671 } 4672 } 4673 4674 /* Emit immediate values. 4675 */ 4676 t->immediates = (struct ureg_src *)CALLOC(program->num_immediates * sizeof(struct ureg_src)); 4677 if (t->immediates == NULL) { 4678 ret = PIPE_ERROR_OUT_OF_MEMORY; 4679 goto out; 4680 } 4681 i = 0; 4682 foreach_iter(exec_list_iterator, iter, program->immediates) { 4683 immediate_storage *imm = (immediate_storage *)iter.get(); 4684 assert(i < program->num_immediates); 4685 t->immediates[i++] = emit_immediate(t, imm->values, imm->type, imm->size); 4686 } 4687 assert(i == program->num_immediates); 4688 4689 /* texture samplers */ 4690 for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) { 4691 if (program->samplers_used & (1 << i)) { 4692 t->samplers[i] = ureg_DECL_sampler(ureg, i); 4693 } 4694 } 4695 4696 /* Emit each instruction in turn: 4697 */ 4698 foreach_iter(exec_list_iterator, iter, program->instructions) { 4699 set_insn_start(t, ureg_get_instruction_number(ureg)); 4700 compile_tgsi_instruction(t, (glsl_to_tgsi_instruction *)iter.get()); 4701 4702 if (t->prevInstWrotePointSize && proginfo->Id) { 4703 /* The previous instruction wrote to the (fake) vertex point size 4704 * result register. Now we need to clamp that value to the min/max 4705 * point size range, putting the result into the real point size 4706 * register. 4707 * Note that we can't do this easily at the end of program due to 4708 * possible early return. 4709 */ 4710 set_insn_start(t, ureg_get_instruction_number(ureg)); 4711 ureg_MAX(t->ureg, 4712 ureg_writemask(t->outputs[t->pointSizeOutIndex], WRITEMASK_X), 4713 ureg_src(t->outputs[t->pointSizeOutIndex]), 4714 ureg_swizzle(t->pointSizeConst, 1,1,1,1)); 4715 ureg_MIN(t->ureg, ureg_writemask(t->pointSizeResult, WRITEMASK_X), 4716 ureg_src(t->outputs[t->pointSizeOutIndex]), 4717 ureg_swizzle(t->pointSizeConst, 2,2,2,2)); 4718 } 4719 t->prevInstWrotePointSize = GL_FALSE; 4720 } 4721 4722 /* Fix up all emitted labels: 4723 */ 4724 for (i = 0; i < t->labels_count; i++) { 4725 ureg_fixup_label(ureg, t->labels[i].token, 4726 t->insn[t->labels[i].branch_target]); 4727 } 4728 4729 if (program->shader_program) { 4730 /* This has to be done last. Any operation the can cause 4731 * prog->ParameterValues to get reallocated (e.g., anything that adds a 4732 * program constant) has to happen before creating this linkage. 4733 */ 4734 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { 4735 if (program->shader_program->_LinkedShaders[i] == NULL) 4736 continue; 4737 4738 _mesa_associate_uniform_storage(ctx, program->shader_program, 4739 program->shader_program->_LinkedShaders[i]->Program->Parameters); 4740 } 4741 } 4742 4743out: 4744 if (t) { 4745 FREE(t->insn); 4746 FREE(t->labels); 4747 FREE(t->constants); 4748 FREE(t->immediates); 4749 4750 if (t->error) { 4751 debug_printf("%s: translate error flag set\n", __FUNCTION__); 4752 } 4753 4754 FREE(t); 4755 } 4756 4757 return ret; 4758} 4759/* ----------------------------- End TGSI code ------------------------------ */ 4760 4761/** 4762 * Convert a shader's GLSL IR into a Mesa gl_program, although without 4763 * generating Mesa IR. 4764 */ 4765static struct gl_program * 4766get_mesa_program(struct gl_context *ctx, 4767 struct gl_shader_program *shader_program, 4768 struct gl_shader *shader, 4769 int num_clip_distances) 4770{ 4771 glsl_to_tgsi_visitor* v = new glsl_to_tgsi_visitor(); 4772 struct gl_program *prog; 4773 struct pipe_screen * screen = st_context(ctx)->pipe->screen; 4774 unsigned pipe_shader_type; 4775 GLenum target; 4776 const char *target_string; 4777 bool progress; 4778 struct gl_shader_compiler_options *options = 4779 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(shader->Type)]; 4780 4781 switch (shader->Type) { 4782 case GL_VERTEX_SHADER: 4783 target = GL_VERTEX_PROGRAM_ARB; 4784 target_string = "vertex"; 4785 pipe_shader_type = PIPE_SHADER_VERTEX; 4786 break; 4787 case GL_FRAGMENT_SHADER: 4788 target = GL_FRAGMENT_PROGRAM_ARB; 4789 target_string = "fragment"; 4790 pipe_shader_type = PIPE_SHADER_FRAGMENT; 4791 break; 4792 case GL_GEOMETRY_SHADER: 4793 target = GL_GEOMETRY_PROGRAM_NV; 4794 target_string = "geometry"; 4795 pipe_shader_type = PIPE_SHADER_GEOMETRY; 4796 break; 4797 default: 4798 assert(!"should not be reached"); 4799 return NULL; 4800 } 4801 4802 validate_ir_tree(shader->ir); 4803 4804 prog = ctx->Driver.NewProgram(ctx, target, shader_program->Name); 4805 if (!prog) 4806 return NULL; 4807 prog->Parameters = _mesa_new_parameter_list(); 4808 v->ctx = ctx; 4809 v->prog = prog; 4810 v->shader_program = shader_program; 4811 v->options = options; 4812 v->glsl_version = ctx->Const.GLSLVersion; 4813 v->native_integers = ctx->Const.NativeIntegers; 4814 v->num_clip_distances = num_clip_distances; 4815 4816 _mesa_generate_parameters_list_for_uniforms(shader_program, shader, 4817 prog->Parameters); 4818 4819 if (!screen->get_shader_param(screen, pipe_shader_type, 4820 PIPE_SHADER_CAP_OUTPUT_READ)) { 4821 /* Remove reads to output registers, and to varyings in vertex shaders. */ 4822 lower_output_reads(shader->ir); 4823 } 4824 4825 4826 /* Emit intermediate IR for main(). */ 4827 visit_exec_list(shader->ir, v); 4828 4829 /* Now emit bodies for any functions that were used. */ 4830 do { 4831 progress = GL_FALSE; 4832 4833 foreach_iter(exec_list_iterator, iter, v->function_signatures) { 4834 function_entry *entry = (function_entry *)iter.get(); 4835 4836 if (!entry->bgn_inst) { 4837 v->current_function = entry; 4838 4839 entry->bgn_inst = v->emit(NULL, TGSI_OPCODE_BGNSUB); 4840 entry->bgn_inst->function = entry; 4841 4842 visit_exec_list(&entry->sig->body, v); 4843 4844 glsl_to_tgsi_instruction *last; 4845 last = (glsl_to_tgsi_instruction *)v->instructions.get_tail(); 4846 if (last->op != TGSI_OPCODE_RET) 4847 v->emit(NULL, TGSI_OPCODE_RET); 4848 4849 glsl_to_tgsi_instruction *end; 4850 end = v->emit(NULL, TGSI_OPCODE_ENDSUB); 4851 end->function = entry; 4852 4853 progress = GL_TRUE; 4854 } 4855 } 4856 } while (progress); 4857 4858#if 0 4859 /* Print out some information (for debugging purposes) used by the 4860 * optimization passes. */ 4861 for (i=0; i < v->next_temp; i++) { 4862 int fr = v->get_first_temp_read(i); 4863 int fw = v->get_first_temp_write(i); 4864 int lr = v->get_last_temp_read(i); 4865 int lw = v->get_last_temp_write(i); 4866 4867 printf("Temp %d: FR=%3d FW=%3d LR=%3d LW=%3d\n", i, fr, fw, lr, lw); 4868 assert(fw <= fr); 4869 } 4870#endif 4871 4872 /* Perform optimizations on the instructions in the glsl_to_tgsi_visitor. */ 4873 v->simplify_cmp(); 4874 v->copy_propagate(); 4875 while (v->eliminate_dead_code_advanced()); 4876 4877 /* FIXME: These passes to optimize temporary registers don't work when there 4878 * is indirect addressing of the temporary register space. We need proper 4879 * array support so that we don't have to give up these passes in every 4880 * shader that uses arrays. 4881 */ 4882 if (!v->indirect_addr_temps) { 4883 v->eliminate_dead_code(); 4884 v->merge_registers(); 4885 v->renumber_registers(); 4886 } 4887 4888 /* Write the END instruction. */ 4889 v->emit(NULL, TGSI_OPCODE_END); 4890 4891 if (ctx->Shader.Flags & GLSL_DUMP) { 4892 printf("\n"); 4893 printf("GLSL IR for linked %s program %d:\n", target_string, 4894 shader_program->Name); 4895 _mesa_print_ir(shader->ir, NULL); 4896 printf("\n"); 4897 printf("\n"); 4898 fflush(stdout); 4899 } 4900 4901 prog->Instructions = NULL; 4902 prog->NumInstructions = 0; 4903 4904 do_set_program_inouts(shader->ir, prog, shader->Type == GL_FRAGMENT_SHADER); 4905 count_resources(v, prog); 4906 4907 _mesa_reference_program(ctx, &shader->Program, prog); 4908 4909 /* This has to be done last. Any operation the can cause 4910 * prog->ParameterValues to get reallocated (e.g., anything that adds a 4911 * program constant) has to happen before creating this linkage. 4912 */ 4913 _mesa_associate_uniform_storage(ctx, shader_program, prog->Parameters); 4914 if (!shader_program->LinkStatus) { 4915 return NULL; 4916 } 4917 4918 struct st_vertex_program *stvp; 4919 struct st_fragment_program *stfp; 4920 struct st_geometry_program *stgp; 4921 4922 switch (shader->Type) { 4923 case GL_VERTEX_SHADER: 4924 stvp = (struct st_vertex_program *)prog; 4925 stvp->glsl_to_tgsi = v; 4926 break; 4927 case GL_FRAGMENT_SHADER: 4928 stfp = (struct st_fragment_program *)prog; 4929 stfp->glsl_to_tgsi = v; 4930 break; 4931 case GL_GEOMETRY_SHADER: 4932 stgp = (struct st_geometry_program *)prog; 4933 stgp->glsl_to_tgsi = v; 4934 break; 4935 default: 4936 assert(!"should not be reached"); 4937 return NULL; 4938 } 4939 4940 return prog; 4941} 4942 4943/** 4944 * Searches through the IR for a declaration of gl_ClipDistance and returns the 4945 * declared size of the gl_ClipDistance array. Returns 0 if gl_ClipDistance is 4946 * not declared in the IR. 4947 */ 4948int get_clip_distance_size(exec_list *ir) 4949{ 4950 foreach_iter (exec_list_iterator, iter, *ir) { 4951 ir_instruction *inst = (ir_instruction *)iter.get(); 4952 ir_variable *var = inst->as_variable(); 4953 if (var == NULL) continue; 4954 if (!strcmp(var->name, "gl_ClipDistance")) { 4955 return var->type->length; 4956 } 4957 } 4958 4959 return 0; 4960} 4961 4962extern "C" { 4963 4964struct gl_shader * 4965st_new_shader(struct gl_context *ctx, GLuint name, GLuint type) 4966{ 4967 struct gl_shader *shader; 4968 assert(type == GL_FRAGMENT_SHADER || type == GL_VERTEX_SHADER || 4969 type == GL_GEOMETRY_SHADER_ARB); 4970 shader = rzalloc(NULL, struct gl_shader); 4971 if (shader) { 4972 shader->Type = type; 4973 shader->Name = name; 4974 _mesa_init_shader(ctx, shader); 4975 } 4976 return shader; 4977} 4978 4979struct gl_shader_program * 4980st_new_shader_program(struct gl_context *ctx, GLuint name) 4981{ 4982 struct gl_shader_program *shProg; 4983 shProg = rzalloc(NULL, struct gl_shader_program); 4984 if (shProg) { 4985 shProg->Name = name; 4986 _mesa_init_shader_program(ctx, shProg); 4987 } 4988 return shProg; 4989} 4990 4991/** 4992 * Link a shader. 4993 * Called via ctx->Driver.LinkShader() 4994 * This actually involves converting GLSL IR into an intermediate TGSI-like IR 4995 * with code lowering and other optimizations. 4996 */ 4997GLboolean 4998st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog) 4999{ 5000 int num_clip_distances[MESA_SHADER_TYPES]; 5001 assert(prog->LinkStatus); 5002 5003 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { 5004 if (prog->_LinkedShaders[i] == NULL) 5005 continue; 5006 5007 bool progress; 5008 exec_list *ir = prog->_LinkedShaders[i]->ir; 5009 const struct gl_shader_compiler_options *options = 5010 &ctx->ShaderCompilerOptions[_mesa_shader_type_to_index(prog->_LinkedShaders[i]->Type)]; 5011 5012 /* We have to determine the length of the gl_ClipDistance array before 5013 * the array is lowered to two vec4s by lower_clip_distance(). 5014 */ 5015 num_clip_distances[i] = get_clip_distance_size(ir); 5016 5017 do { 5018 unsigned what_to_lower = MOD_TO_FRACT | DIV_TO_MUL_RCP | 5019 EXP_TO_EXP2 | LOG_TO_LOG2; 5020 if (options->EmitNoPow) 5021 what_to_lower |= POW_TO_EXP2; 5022 if (!ctx->Const.NativeIntegers) 5023 what_to_lower |= INT_DIV_TO_MUL_RCP; 5024 5025 progress = false; 5026 5027 /* Lowering */ 5028 do_mat_op_to_vec(ir); 5029 lower_instructions(ir, what_to_lower); 5030 5031 progress = do_lower_jumps(ir, true, true, options->EmitNoMainReturn, options->EmitNoCont, options->EmitNoLoops) || progress; 5032 5033 progress = do_common_optimization(ir, true, true, 5034 options->MaxUnrollIterations) 5035 || progress; 5036 5037 progress = lower_quadop_vector(ir, false) || progress; 5038 progress = lower_clip_distance(ir) || progress; 5039 5040 if (options->MaxIfDepth == 0) 5041 progress = lower_discard(ir) || progress; 5042 5043 progress = lower_if_to_cond_assign(ir, options->MaxIfDepth) || progress; 5044 5045 if (options->EmitNoNoise) 5046 progress = lower_noise(ir) || progress; 5047 5048 /* If there are forms of indirect addressing that the driver 5049 * cannot handle, perform the lowering pass. 5050 */ 5051 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput 5052 || options->EmitNoIndirectTemp || options->EmitNoIndirectUniform) 5053 progress = 5054 lower_variable_index_to_cond_assign(ir, 5055 options->EmitNoIndirectInput, 5056 options->EmitNoIndirectOutput, 5057 options->EmitNoIndirectTemp, 5058 options->EmitNoIndirectUniform) 5059 || progress; 5060 5061 progress = do_vec_index_to_cond_assign(ir) || progress; 5062 } while (progress); 5063 5064 validate_ir_tree(ir); 5065 } 5066 5067 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { 5068 struct gl_program *linked_prog; 5069 5070 if (prog->_LinkedShaders[i] == NULL) 5071 continue; 5072 5073 linked_prog = get_mesa_program(ctx, prog, prog->_LinkedShaders[i], 5074 num_clip_distances[i]); 5075 5076 if (linked_prog) { 5077 static const GLenum targets[] = { 5078 GL_VERTEX_PROGRAM_ARB, 5079 GL_FRAGMENT_PROGRAM_ARB, 5080 GL_GEOMETRY_PROGRAM_NV 5081 }; 5082 5083 _mesa_reference_program(ctx, &prog->_LinkedShaders[i]->Program, 5084 linked_prog); 5085 if (!ctx->Driver.ProgramStringNotify(ctx, targets[i], linked_prog)) { 5086 _mesa_reference_program(ctx, &prog->_LinkedShaders[i]->Program, 5087 NULL); 5088 _mesa_reference_program(ctx, &linked_prog, NULL); 5089 return GL_FALSE; 5090 } 5091 } 5092 5093 _mesa_reference_program(ctx, &linked_prog, NULL); 5094 } 5095 5096 return GL_TRUE; 5097} 5098 5099void 5100st_translate_stream_output_info(glsl_to_tgsi_visitor *glsl_to_tgsi, 5101 const GLuint outputMapping[], 5102 struct pipe_stream_output_info *so) 5103{ 5104 unsigned i; 5105 struct gl_transform_feedback_info *info = 5106 &glsl_to_tgsi->shader_program->LinkedTransformFeedback; 5107 5108 for (i = 0; i < info->NumOutputs; i++) { 5109 so->output[i].register_index = 5110 outputMapping[info->Outputs[i].OutputRegister]; 5111 so->output[i].start_component = info->Outputs[i].ComponentOffset; 5112 so->output[i].num_components = info->Outputs[i].NumComponents; 5113 so->output[i].output_buffer = info->Outputs[i].OutputBuffer; 5114 so->output[i].dst_offset = info->Outputs[i].DstOffset; 5115 } 5116 5117 for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { 5118 so->stride[i] = info->BufferStride[i]; 5119 } 5120 so->num_outputs = info->NumOutputs; 5121} 5122 5123} /* extern "C" */ 5124