linker.cpp revision 586b4b500fed64fb724beb3753bc190cd1c676e0
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24/** 25 * \file linker.cpp 26 * GLSL linker implementation 27 * 28 * Given a set of shaders that are to be linked to generate a final program, 29 * there are three distinct stages. 30 * 31 * In the first stage shaders are partitioned into groups based on the shader 32 * type. All shaders of a particular type (e.g., vertex shaders) are linked 33 * together. 34 * 35 * - Undefined references in each shader are resolve to definitions in 36 * another shader. 37 * - Types and qualifiers of uniforms, outputs, and global variables defined 38 * in multiple shaders with the same name are verified to be the same. 39 * - Initializers for uniforms and global variables defined 40 * in multiple shaders with the same name are verified to be the same. 41 * 42 * The result, in the terminology of the GLSL spec, is a set of shader 43 * executables for each processing unit. 44 * 45 * After the first stage is complete, a series of semantic checks are performed 46 * on each of the shader executables. 47 * 48 * - Each shader executable must define a \c main function. 49 * - Each vertex shader executable must write to \c gl_Position. 50 * - Each fragment shader executable must write to either \c gl_FragData or 51 * \c gl_FragColor. 52 * 53 * In the final stage individual shader executables are linked to create a 54 * complete exectuable. 55 * 56 * - Types of uniforms defined in multiple shader stages with the same name 57 * are verified to be the same. 58 * - Initializers for uniforms defined in multiple shader stages with the 59 * same name are verified to be the same. 60 * - Types and qualifiers of outputs defined in one stage are verified to 61 * be the same as the types and qualifiers of inputs defined with the same 62 * name in a later stage. 63 * 64 * \author Ian Romanick <ian.d.romanick@intel.com> 65 */ 66#include <cstdlib> 67#include <cstdio> 68#include <cstdarg> 69#include <climits> 70 71extern "C" { 72#include <talloc.h> 73} 74 75#include "main/core.h" 76#include "glsl_symbol_table.h" 77#include "ir.h" 78#include "program.h" 79#include "program/hash_table.h" 80#include "linker.h" 81#include "ir_optimization.h" 82 83/** 84 * Visitor that determines whether or not a variable is ever written. 85 */ 86class find_assignment_visitor : public ir_hierarchical_visitor { 87public: 88 find_assignment_visitor(const char *name) 89 : name(name), found(false) 90 { 91 /* empty */ 92 } 93 94 virtual ir_visitor_status visit_enter(ir_assignment *ir) 95 { 96 ir_variable *const var = ir->lhs->variable_referenced(); 97 98 if (strcmp(name, var->name) == 0) { 99 found = true; 100 return visit_stop; 101 } 102 103 return visit_continue_with_parent; 104 } 105 106 virtual ir_visitor_status visit_enter(ir_call *ir) 107 { 108 exec_list_iterator sig_iter = ir->get_callee()->parameters.iterator(); 109 foreach_iter(exec_list_iterator, iter, *ir) { 110 ir_rvalue *param_rval = (ir_rvalue *)iter.get(); 111 ir_variable *sig_param = (ir_variable *)sig_iter.get(); 112 113 if (sig_param->mode == ir_var_out || 114 sig_param->mode == ir_var_inout) { 115 ir_variable *var = param_rval->variable_referenced(); 116 if (var && strcmp(name, var->name) == 0) { 117 found = true; 118 return visit_stop; 119 } 120 } 121 sig_iter.next(); 122 } 123 124 return visit_continue_with_parent; 125 } 126 127 bool variable_found() 128 { 129 return found; 130 } 131 132private: 133 const char *name; /**< Find writes to a variable with this name. */ 134 bool found; /**< Was a write to the variable found? */ 135}; 136 137 138/** 139 * Visitor that determines whether or not a variable is ever read. 140 */ 141class find_deref_visitor : public ir_hierarchical_visitor { 142public: 143 find_deref_visitor(const char *name) 144 : name(name), found(false) 145 { 146 /* empty */ 147 } 148 149 virtual ir_visitor_status visit(ir_dereference_variable *ir) 150 { 151 if (strcmp(this->name, ir->var->name) == 0) { 152 this->found = true; 153 return visit_stop; 154 } 155 156 return visit_continue; 157 } 158 159 bool variable_found() const 160 { 161 return this->found; 162 } 163 164private: 165 const char *name; /**< Find writes to a variable with this name. */ 166 bool found; /**< Was a write to the variable found? */ 167}; 168 169 170void 171linker_error_printf(gl_shader_program *prog, const char *fmt, ...) 172{ 173 va_list ap; 174 175 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: "); 176 va_start(ap, fmt); 177 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap); 178 va_end(ap); 179} 180 181 182void 183invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode, 184 int generic_base) 185{ 186 foreach_list(node, sh->ir) { 187 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 188 189 if ((var == NULL) || (var->mode != (unsigned) mode)) 190 continue; 191 192 /* Only assign locations for generic attributes / varyings / etc. 193 */ 194 if (var->location >= generic_base) 195 var->location = -1; 196 } 197} 198 199 200/** 201 * Determine the number of attribute slots required for a particular type 202 * 203 * This code is here because it implements the language rules of a specific 204 * GLSL version. Since it's a property of the language and not a property of 205 * types in general, it doesn't really belong in glsl_type. 206 */ 207unsigned 208count_attribute_slots(const glsl_type *t) 209{ 210 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec: 211 * 212 * "A scalar input counts the same amount against this limit as a vec4, 213 * so applications may want to consider packing groups of four 214 * unrelated float inputs together into a vector to better utilize the 215 * capabilities of the underlying hardware. A matrix input will use up 216 * multiple locations. The number of locations used will equal the 217 * number of columns in the matrix." 218 * 219 * The spec does not explicitly say how arrays are counted. However, it 220 * should be safe to assume the total number of slots consumed by an array 221 * is the number of entries in the array multiplied by the number of slots 222 * consumed by a single element of the array. 223 */ 224 225 if (t->is_array()) 226 return t->array_size() * count_attribute_slots(t->element_type()); 227 228 if (t->is_matrix()) 229 return t->matrix_columns; 230 231 return 1; 232} 233 234 235/** 236 * Verify that a vertex shader executable meets all semantic requirements 237 * 238 * \param shader Vertex shader executable to be verified 239 */ 240bool 241validate_vertex_shader_executable(struct gl_shader_program *prog, 242 struct gl_shader *shader) 243{ 244 if (shader == NULL) 245 return true; 246 247 find_assignment_visitor find("gl_Position"); 248 find.run(shader->ir); 249 if (!find.variable_found()) { 250 linker_error_printf(prog, 251 "vertex shader does not write to `gl_Position'\n"); 252 return false; 253 } 254 255 return true; 256} 257 258 259/** 260 * Verify that a fragment shader executable meets all semantic requirements 261 * 262 * \param shader Fragment shader executable to be verified 263 */ 264bool 265validate_fragment_shader_executable(struct gl_shader_program *prog, 266 struct gl_shader *shader) 267{ 268 if (shader == NULL) 269 return true; 270 271 find_assignment_visitor frag_color("gl_FragColor"); 272 find_assignment_visitor frag_data("gl_FragData"); 273 274 frag_color.run(shader->ir); 275 frag_data.run(shader->ir); 276 277 if (frag_color.variable_found() && frag_data.variable_found()) { 278 linker_error_printf(prog, "fragment shader writes to both " 279 "`gl_FragColor' and `gl_FragData'\n"); 280 return false; 281 } 282 283 return true; 284} 285 286 287/** 288 * Generate a string describing the mode of a variable 289 */ 290static const char * 291mode_string(const ir_variable *var) 292{ 293 switch (var->mode) { 294 case ir_var_auto: 295 return (var->read_only) ? "global constant" : "global variable"; 296 297 case ir_var_uniform: return "uniform"; 298 case ir_var_in: return "shader input"; 299 case ir_var_out: return "shader output"; 300 case ir_var_inout: return "shader inout"; 301 302 case ir_var_temporary: 303 default: 304 assert(!"Should not get here."); 305 return "invalid variable"; 306 } 307} 308 309 310/** 311 * Perform validation of global variables used across multiple shaders 312 */ 313bool 314cross_validate_globals(struct gl_shader_program *prog, 315 struct gl_shader **shader_list, 316 unsigned num_shaders, 317 bool uniforms_only) 318{ 319 /* Examine all of the uniforms in all of the shaders and cross validate 320 * them. 321 */ 322 glsl_symbol_table variables; 323 for (unsigned i = 0; i < num_shaders; i++) { 324 foreach_list(node, shader_list[i]->ir) { 325 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 326 327 if (var == NULL) 328 continue; 329 330 if (uniforms_only && (var->mode != ir_var_uniform)) 331 continue; 332 333 /* Don't cross validate temporaries that are at global scope. These 334 * will eventually get pulled into the shaders 'main'. 335 */ 336 if (var->mode == ir_var_temporary) 337 continue; 338 339 /* If a global with this name has already been seen, verify that the 340 * new instance has the same type. In addition, if the globals have 341 * initializers, the values of the initializers must be the same. 342 */ 343 ir_variable *const existing = variables.get_variable(var->name); 344 if (existing != NULL) { 345 if (var->type != existing->type) { 346 /* Consider the types to be "the same" if both types are arrays 347 * of the same type and one of the arrays is implicitly sized. 348 * In addition, set the type of the linked variable to the 349 * explicitly sized array. 350 */ 351 if (var->type->is_array() 352 && existing->type->is_array() 353 && (var->type->fields.array == existing->type->fields.array) 354 && ((var->type->length == 0) 355 || (existing->type->length == 0))) { 356 if (existing->type->length == 0) 357 existing->type = var->type; 358 } else { 359 linker_error_printf(prog, "%s `%s' declared as type " 360 "`%s' and type `%s'\n", 361 mode_string(var), 362 var->name, var->type->name, 363 existing->type->name); 364 return false; 365 } 366 } 367 368 /* FINISHME: Handle non-constant initializers. 369 */ 370 if (var->constant_value != NULL) { 371 if (existing->constant_value != NULL) { 372 if (!var->constant_value->has_value(existing->constant_value)) { 373 linker_error_printf(prog, "initializers for %s " 374 "`%s' have differing values\n", 375 mode_string(var), var->name); 376 return false; 377 } 378 } else 379 /* If the first-seen instance of a particular uniform did not 380 * have an initializer but a later instance does, copy the 381 * initializer to the version stored in the symbol table. 382 */ 383 /* FINISHME: This is wrong. The constant_value field should 384 * FINISHME: not be modified! Imagine a case where a shader 385 * FINISHME: without an initializer is linked in two different 386 * FINISHME: programs with shaders that have differing 387 * FINISHME: initializers. Linking with the first will 388 * FINISHME: modify the shader, and linking with the second 389 * FINISHME: will fail. 390 */ 391 existing->constant_value = 392 var->constant_value->clone(talloc_parent(existing), NULL); 393 } 394 } else 395 variables.add_variable(var->name, var); 396 } 397 } 398 399 return true; 400} 401 402 403/** 404 * Perform validation of uniforms used across multiple shader stages 405 */ 406bool 407cross_validate_uniforms(struct gl_shader_program *prog) 408{ 409 return cross_validate_globals(prog, prog->_LinkedShaders, 410 prog->_NumLinkedShaders, true); 411} 412 413 414/** 415 * Validate that outputs from one stage match inputs of another 416 */ 417bool 418cross_validate_outputs_to_inputs(struct gl_shader_program *prog, 419 gl_shader *producer, gl_shader *consumer) 420{ 421 glsl_symbol_table parameters; 422 /* FINISHME: Figure these out dynamically. */ 423 const char *const producer_stage = "vertex"; 424 const char *const consumer_stage = "fragment"; 425 426 /* Find all shader outputs in the "producer" stage. 427 */ 428 foreach_list(node, producer->ir) { 429 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 430 431 /* FINISHME: For geometry shaders, this should also look for inout 432 * FINISHME: variables. 433 */ 434 if ((var == NULL) || (var->mode != ir_var_out)) 435 continue; 436 437 parameters.add_variable(var->name, var); 438 } 439 440 441 /* Find all shader inputs in the "consumer" stage. Any variables that have 442 * matching outputs already in the symbol table must have the same type and 443 * qualifiers. 444 */ 445 foreach_list(node, consumer->ir) { 446 ir_variable *const input = ((ir_instruction *) node)->as_variable(); 447 448 /* FINISHME: For geometry shaders, this should also look for inout 449 * FINISHME: variables. 450 */ 451 if ((input == NULL) || (input->mode != ir_var_in)) 452 continue; 453 454 ir_variable *const output = parameters.get_variable(input->name); 455 if (output != NULL) { 456 /* Check that the types match between stages. 457 */ 458 if (input->type != output->type) { 459 linker_error_printf(prog, 460 "%s shader output `%s' declared as " 461 "type `%s', but %s shader input declared " 462 "as type `%s'\n", 463 producer_stage, output->name, 464 output->type->name, 465 consumer_stage, input->type->name); 466 return false; 467 } 468 469 /* Check that all of the qualifiers match between stages. 470 */ 471 if (input->centroid != output->centroid) { 472 linker_error_printf(prog, 473 "%s shader output `%s' %s centroid qualifier, " 474 "but %s shader input %s centroid qualifier\n", 475 producer_stage, 476 output->name, 477 (output->centroid) ? "has" : "lacks", 478 consumer_stage, 479 (input->centroid) ? "has" : "lacks"); 480 return false; 481 } 482 483 if (input->invariant != output->invariant) { 484 linker_error_printf(prog, 485 "%s shader output `%s' %s invariant qualifier, " 486 "but %s shader input %s invariant qualifier\n", 487 producer_stage, 488 output->name, 489 (output->invariant) ? "has" : "lacks", 490 consumer_stage, 491 (input->invariant) ? "has" : "lacks"); 492 return false; 493 } 494 495 if (input->interpolation != output->interpolation) { 496 linker_error_printf(prog, 497 "%s shader output `%s' specifies %s " 498 "interpolation qualifier, " 499 "but %s shader input specifies %s " 500 "interpolation qualifier\n", 501 producer_stage, 502 output->name, 503 output->interpolation_string(), 504 consumer_stage, 505 input->interpolation_string()); 506 return false; 507 } 508 } 509 } 510 511 return true; 512} 513 514 515/** 516 * Populates a shaders symbol table with all global declarations 517 */ 518static void 519populate_symbol_table(gl_shader *sh) 520{ 521 sh->symbols = new(sh) glsl_symbol_table; 522 523 foreach_list(node, sh->ir) { 524 ir_instruction *const inst = (ir_instruction *) node; 525 ir_variable *var; 526 ir_function *func; 527 528 if ((func = inst->as_function()) != NULL) { 529 sh->symbols->add_function(func->name, func); 530 } else if ((var = inst->as_variable()) != NULL) { 531 sh->symbols->add_variable(var->name, var); 532 } 533 } 534} 535 536 537/** 538 * Remap variables referenced in an instruction tree 539 * 540 * This is used when instruction trees are cloned from one shader and placed in 541 * another. These trees will contain references to \c ir_variable nodes that 542 * do not exist in the target shader. This function finds these \c ir_variable 543 * references and replaces the references with matching variables in the target 544 * shader. 545 * 546 * If there is no matching variable in the target shader, a clone of the 547 * \c ir_variable is made and added to the target shader. The new variable is 548 * added to \b both the instruction stream and the symbol table. 549 * 550 * \param inst IR tree that is to be processed. 551 * \param symbols Symbol table containing global scope symbols in the 552 * linked shader. 553 * \param instructions Instruction stream where new variable declarations 554 * should be added. 555 */ 556void 557remap_variables(ir_instruction *inst, struct gl_shader *target, 558 hash_table *temps) 559{ 560 class remap_visitor : public ir_hierarchical_visitor { 561 public: 562 remap_visitor(struct gl_shader *target, 563 hash_table *temps) 564 { 565 this->target = target; 566 this->symbols = target->symbols; 567 this->instructions = target->ir; 568 this->temps = temps; 569 } 570 571 virtual ir_visitor_status visit(ir_dereference_variable *ir) 572 { 573 if (ir->var->mode == ir_var_temporary) { 574 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var); 575 576 assert(var != NULL); 577 ir->var = var; 578 return visit_continue; 579 } 580 581 ir_variable *const existing = 582 this->symbols->get_variable(ir->var->name); 583 if (existing != NULL) 584 ir->var = existing; 585 else { 586 ir_variable *copy = ir->var->clone(this->target, NULL); 587 588 this->symbols->add_variable(copy->name, copy); 589 this->instructions->push_head(copy); 590 ir->var = copy; 591 } 592 593 return visit_continue; 594 } 595 596 private: 597 struct gl_shader *target; 598 glsl_symbol_table *symbols; 599 exec_list *instructions; 600 hash_table *temps; 601 }; 602 603 remap_visitor v(target, temps); 604 605 inst->accept(&v); 606} 607 608 609/** 610 * Move non-declarations from one instruction stream to another 611 * 612 * The intended usage pattern of this function is to pass the pointer to the 613 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node 614 * pointer) for \c last and \c false for \c make_copies on the first 615 * call. Successive calls pass the return value of the previous call for 616 * \c last and \c true for \c make_copies. 617 * 618 * \param instructions Source instruction stream 619 * \param last Instruction after which new instructions should be 620 * inserted in the target instruction stream 621 * \param make_copies Flag selecting whether instructions in \c instructions 622 * should be copied (via \c ir_instruction::clone) into the 623 * target list or moved. 624 * 625 * \return 626 * The new "last" instruction in the target instruction stream. This pointer 627 * is suitable for use as the \c last parameter of a later call to this 628 * function. 629 */ 630exec_node * 631move_non_declarations(exec_list *instructions, exec_node *last, 632 bool make_copies, gl_shader *target) 633{ 634 hash_table *temps = NULL; 635 636 if (make_copies) 637 temps = hash_table_ctor(0, hash_table_pointer_hash, 638 hash_table_pointer_compare); 639 640 foreach_list_safe(node, instructions) { 641 ir_instruction *inst = (ir_instruction *) node; 642 643 if (inst->as_function()) 644 continue; 645 646 ir_variable *var = inst->as_variable(); 647 if ((var != NULL) && (var->mode != ir_var_temporary)) 648 continue; 649 650 assert(inst->as_assignment() 651 || ((var != NULL) && (var->mode == ir_var_temporary))); 652 653 if (make_copies) { 654 inst = inst->clone(target, NULL); 655 656 if (var != NULL) 657 hash_table_insert(temps, inst, var); 658 else 659 remap_variables(inst, target, temps); 660 } else { 661 inst->remove(); 662 } 663 664 last->insert_after(inst); 665 last = inst; 666 } 667 668 if (make_copies) 669 hash_table_dtor(temps); 670 671 return last; 672} 673 674/** 675 * Get the function signature for main from a shader 676 */ 677static ir_function_signature * 678get_main_function_signature(gl_shader *sh) 679{ 680 ir_function *const f = sh->symbols->get_function("main"); 681 if (f != NULL) { 682 exec_list void_parameters; 683 684 /* Look for the 'void main()' signature and ensure that it's defined. 685 * This keeps the linker from accidentally pick a shader that just 686 * contains a prototype for main. 687 * 688 * We don't have to check for multiple definitions of main (in multiple 689 * shaders) because that would have already been caught above. 690 */ 691 ir_function_signature *sig = f->matching_signature(&void_parameters); 692 if ((sig != NULL) && sig->is_defined) { 693 return sig; 694 } 695 } 696 697 return NULL; 698} 699 700 701/** 702 * Combine a group of shaders for a single stage to generate a linked shader 703 * 704 * \note 705 * If this function is supplied a single shader, it is cloned, and the new 706 * shader is returned. 707 */ 708static struct gl_shader * 709link_intrastage_shaders(GLcontext *ctx, 710 struct gl_shader_program *prog, 711 struct gl_shader **shader_list, 712 unsigned num_shaders) 713{ 714 /* Check that global variables defined in multiple shaders are consistent. 715 */ 716 if (!cross_validate_globals(prog, shader_list, num_shaders, false)) 717 return NULL; 718 719 /* Check that there is only a single definition of each function signature 720 * across all shaders. 721 */ 722 for (unsigned i = 0; i < (num_shaders - 1); i++) { 723 foreach_list(node, shader_list[i]->ir) { 724 ir_function *const f = ((ir_instruction *) node)->as_function(); 725 726 if (f == NULL) 727 continue; 728 729 for (unsigned j = i + 1; j < num_shaders; j++) { 730 ir_function *const other = 731 shader_list[j]->symbols->get_function(f->name); 732 733 /* If the other shader has no function (and therefore no function 734 * signatures) with the same name, skip to the next shader. 735 */ 736 if (other == NULL) 737 continue; 738 739 foreach_iter (exec_list_iterator, iter, *f) { 740 ir_function_signature *sig = 741 (ir_function_signature *) iter.get(); 742 743 if (!sig->is_defined || sig->is_builtin) 744 continue; 745 746 ir_function_signature *other_sig = 747 other->exact_matching_signature(& sig->parameters); 748 749 if ((other_sig != NULL) && other_sig->is_defined 750 && !other_sig->is_builtin) { 751 linker_error_printf(prog, 752 "function `%s' is multiply defined", 753 f->name); 754 return NULL; 755 } 756 } 757 } 758 } 759 } 760 761 /* Find the shader that defines main, and make a clone of it. 762 * 763 * Starting with the clone, search for undefined references. If one is 764 * found, find the shader that defines it. Clone the reference and add 765 * it to the shader. Repeat until there are no undefined references or 766 * until a reference cannot be resolved. 767 */ 768 gl_shader *main = NULL; 769 for (unsigned i = 0; i < num_shaders; i++) { 770 if (get_main_function_signature(shader_list[i]) != NULL) { 771 main = shader_list[i]; 772 break; 773 } 774 } 775 776 if (main == NULL) { 777 linker_error_printf(prog, "%s shader lacks `main'\n", 778 (shader_list[0]->Type == GL_VERTEX_SHADER) 779 ? "vertex" : "fragment"); 780 return NULL; 781 } 782 783 gl_shader *const linked = ctx->Driver.NewShader(NULL, 0, main->Type); 784 linked->ir = new(linked) exec_list; 785 clone_ir_list(linked, linked->ir, main->ir); 786 787 populate_symbol_table(linked); 788 789 /* The a pointer to the main function in the final linked shader (i.e., the 790 * copy of the original shader that contained the main function). 791 */ 792 ir_function_signature *const main_sig = get_main_function_signature(linked); 793 794 /* Move any instructions other than variable declarations or function 795 * declarations into main. 796 */ 797 exec_node *insertion_point = 798 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false, 799 linked); 800 801 for (unsigned i = 0; i < num_shaders; i++) { 802 if (shader_list[i] == main) 803 continue; 804 805 insertion_point = move_non_declarations(shader_list[i]->ir, 806 insertion_point, true, linked); 807 } 808 809 /* Resolve initializers for global variables in the linked shader. 810 */ 811 unsigned num_linking_shaders = num_shaders; 812 for (unsigned i = 0; i < num_shaders; i++) 813 num_linking_shaders += shader_list[i]->num_builtins_to_link; 814 815 gl_shader **linking_shaders = 816 (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *)); 817 818 memcpy(linking_shaders, shader_list, 819 sizeof(linking_shaders[0]) * num_shaders); 820 821 unsigned idx = num_shaders; 822 for (unsigned i = 0; i < num_shaders; i++) { 823 memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link, 824 sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link); 825 idx += shader_list[i]->num_builtins_to_link; 826 } 827 828 assert(idx == num_linking_shaders); 829 830 link_function_calls(prog, linked, linking_shaders, num_linking_shaders); 831 832 free(linking_shaders); 833 834 return linked; 835} 836 837 838struct uniform_node { 839 exec_node link; 840 struct gl_uniform *u; 841 unsigned slots; 842}; 843 844/** 845 * Update the sizes of linked shader uniform arrays to the maximum 846 * array index used. 847 * 848 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec: 849 * 850 * If one or more elements of an array are active, 851 * GetActiveUniform will return the name of the array in name, 852 * subject to the restrictions listed above. The type of the array 853 * is returned in type. The size parameter contains the highest 854 * array element index used, plus one. The compiler or linker 855 * determines the highest index used. There will be only one 856 * active uniform reported by the GL per uniform array. 857 858 */ 859static void 860update_array_sizes(struct gl_shader_program *prog) 861{ 862 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) { 863 foreach_list(node, prog->_LinkedShaders[i]->ir) { 864 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 865 866 if ((var == NULL) || (var->mode != ir_var_uniform && 867 var->mode != ir_var_in && 868 var->mode != ir_var_out) || 869 !var->type->is_array()) 870 continue; 871 872 unsigned int size = var->max_array_access; 873 for (unsigned j = 0; j < prog->_NumLinkedShaders; j++) { 874 foreach_list(node2, prog->_LinkedShaders[j]->ir) { 875 ir_variable *other_var = ((ir_instruction *) node2)->as_variable(); 876 if (!other_var) 877 continue; 878 879 if (strcmp(var->name, other_var->name) == 0 && 880 other_var->max_array_access > size) { 881 size = other_var->max_array_access; 882 } 883 } 884 } 885 886 if (size + 1 != var->type->fields.array->length) { 887 var->type = glsl_type::get_array_instance(var->type->fields.array, 888 size + 1); 889 /* FINISHME: We should update the types of array 890 * dereferences of this variable now. 891 */ 892 } 893 } 894 } 895} 896 897static void 898add_uniform(void *mem_ctx, exec_list *uniforms, struct hash_table *ht, 899 const char *name, const glsl_type *type, GLenum shader_type, 900 unsigned *next_shader_pos, unsigned *total_uniforms) 901{ 902 if (type->is_record()) { 903 for (unsigned int i = 0; i < type->length; i++) { 904 const glsl_type *field_type = type->fields.structure[i].type; 905 char *field_name = talloc_asprintf(mem_ctx, "%s.%s", name, 906 type->fields.structure[i].name); 907 908 add_uniform(mem_ctx, uniforms, ht, field_name, field_type, 909 shader_type, next_shader_pos, total_uniforms); 910 } 911 } else { 912 uniform_node *n = (uniform_node *) hash_table_find(ht, name); 913 unsigned int vec4_slots; 914 const glsl_type *array_elem_type = NULL; 915 916 if (type->is_array()) { 917 array_elem_type = type->fields.array; 918 /* Array of structures. */ 919 if (array_elem_type->is_record()) { 920 for (unsigned int i = 0; i < type->length; i++) { 921 char *elem_name = talloc_asprintf(mem_ctx, "%s[%d]", name, i); 922 add_uniform(mem_ctx, uniforms, ht, elem_name, array_elem_type, 923 shader_type, next_shader_pos, total_uniforms); 924 } 925 return; 926 } 927 } 928 929 /* Fix the storage size of samplers at 1 vec4 each. Be sure to pad out 930 * vectors to vec4 slots. 931 */ 932 if (type->is_array()) { 933 if (array_elem_type->is_sampler()) 934 vec4_slots = type->length; 935 else 936 vec4_slots = type->length * array_elem_type->matrix_columns; 937 } else if (type->is_sampler()) { 938 vec4_slots = 1; 939 } else { 940 vec4_slots = type->matrix_columns; 941 } 942 943 if (n == NULL) { 944 n = (uniform_node *) calloc(1, sizeof(struct uniform_node)); 945 n->u = (gl_uniform *) calloc(1, sizeof(struct gl_uniform)); 946 n->slots = vec4_slots; 947 948 n->u->Name = strdup(name); 949 n->u->Type = type; 950 n->u->VertPos = -1; 951 n->u->FragPos = -1; 952 n->u->GeomPos = -1; 953 (*total_uniforms)++; 954 955 hash_table_insert(ht, n, name); 956 uniforms->push_tail(& n->link); 957 } 958 959 switch (shader_type) { 960 case GL_VERTEX_SHADER: 961 n->u->VertPos = *next_shader_pos; 962 break; 963 case GL_FRAGMENT_SHADER: 964 n->u->FragPos = *next_shader_pos; 965 break; 966 case GL_GEOMETRY_SHADER: 967 n->u->GeomPos = *next_shader_pos; 968 break; 969 } 970 971 (*next_shader_pos) += vec4_slots; 972 } 973} 974 975void 976assign_uniform_locations(struct gl_shader_program *prog) 977{ 978 /* */ 979 exec_list uniforms; 980 unsigned total_uniforms = 0; 981 hash_table *ht = hash_table_ctor(32, hash_table_string_hash, 982 hash_table_string_compare); 983 void *mem_ctx = talloc_new(NULL); 984 985 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) { 986 unsigned next_position = 0; 987 988 foreach_list(node, prog->_LinkedShaders[i]->ir) { 989 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 990 991 if ((var == NULL) || (var->mode != ir_var_uniform)) 992 continue; 993 994 if (strncmp(var->name, "gl_", 3) == 0) { 995 /* At the moment, we don't allocate uniform locations for 996 * builtin uniforms. It's permitted by spec, and we'll 997 * likely switch to doing that at some point, but not yet. 998 */ 999 continue; 1000 } 1001 1002 var->location = next_position; 1003 add_uniform(mem_ctx, &uniforms, ht, var->name, var->type, 1004 prog->_LinkedShaders[i]->Type, 1005 &next_position, &total_uniforms); 1006 } 1007 } 1008 1009 talloc_free(mem_ctx); 1010 1011 gl_uniform_list *ul = (gl_uniform_list *) 1012 calloc(1, sizeof(gl_uniform_list)); 1013 1014 ul->Size = total_uniforms; 1015 ul->NumUniforms = total_uniforms; 1016 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform)); 1017 1018 unsigned idx = 0; 1019 uniform_node *next; 1020 for (uniform_node *node = (uniform_node *) uniforms.head 1021 ; node->link.next != NULL 1022 ; node = next) { 1023 next = (uniform_node *) node->link.next; 1024 1025 node->link.remove(); 1026 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform)); 1027 idx++; 1028 1029 free(node->u); 1030 free(node); 1031 } 1032 1033 hash_table_dtor(ht); 1034 1035 prog->Uniforms = ul; 1036} 1037 1038 1039/** 1040 * Find a contiguous set of available bits in a bitmask 1041 * 1042 * \param used_mask Bits representing used (1) and unused (0) locations 1043 * \param needed_count Number of contiguous bits needed. 1044 * 1045 * \return 1046 * Base location of the available bits on success or -1 on failure. 1047 */ 1048int 1049find_available_slots(unsigned used_mask, unsigned needed_count) 1050{ 1051 unsigned needed_mask = (1 << needed_count) - 1; 1052 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count; 1053 1054 /* The comparison to 32 is redundant, but without it GCC emits "warning: 1055 * cannot optimize possibly infinite loops" for the loop below. 1056 */ 1057 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32)) 1058 return -1; 1059 1060 for (int i = 0; i <= max_bit_to_test; i++) { 1061 if ((needed_mask & ~used_mask) == needed_mask) 1062 return i; 1063 1064 needed_mask <<= 1; 1065 } 1066 1067 return -1; 1068} 1069 1070 1071bool 1072assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index) 1073{ 1074 /* Mark invalid attribute locations as being used. 1075 */ 1076 unsigned used_locations = (max_attribute_index >= 32) 1077 ? ~0 : ~((1 << max_attribute_index) - 1); 1078 1079 gl_shader *const sh = prog->_LinkedShaders[0]; 1080 assert(sh->Type == GL_VERTEX_SHADER); 1081 1082 /* Operate in a total of four passes. 1083 * 1084 * 1. Invalidate the location assignments for all vertex shader inputs. 1085 * 1086 * 2. Assign locations for inputs that have user-defined (via 1087 * glBindVertexAttribLocation) locatoins. 1088 * 1089 * 3. Sort the attributes without assigned locations by number of slots 1090 * required in decreasing order. Fragmentation caused by attribute 1091 * locations assigned by the application may prevent large attributes 1092 * from having enough contiguous space. 1093 * 1094 * 4. Assign locations to any inputs without assigned locations. 1095 */ 1096 1097 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0); 1098 1099 if (prog->Attributes != NULL) { 1100 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) { 1101 ir_variable *const var = 1102 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name); 1103 1104 /* Note: attributes that occupy multiple slots, such as arrays or 1105 * matrices, may appear in the attrib array multiple times. 1106 */ 1107 if ((var == NULL) || (var->location != -1)) 1108 continue; 1109 1110 /* From page 61 of the OpenGL 4.0 spec: 1111 * 1112 * "LinkProgram will fail if the attribute bindings assigned by 1113 * BindAttribLocation do not leave not enough space to assign a 1114 * location for an active matrix attribute or an active attribute 1115 * array, both of which require multiple contiguous generic 1116 * attributes." 1117 * 1118 * Previous versions of the spec contain similar language but omit the 1119 * bit about attribute arrays. 1120 * 1121 * Page 61 of the OpenGL 4.0 spec also says: 1122 * 1123 * "It is possible for an application to bind more than one 1124 * attribute name to the same location. This is referred to as 1125 * aliasing. This will only work if only one of the aliased 1126 * attributes is active in the executable program, or if no path 1127 * through the shader consumes more than one attribute of a set 1128 * of attributes aliased to the same location. A link error can 1129 * occur if the linker determines that every path through the 1130 * shader consumes multiple aliased attributes, but 1131 * implementations are not required to generate an error in this 1132 * case." 1133 * 1134 * These two paragraphs are either somewhat contradictory, or I don't 1135 * fully understand one or both of them. 1136 */ 1137 /* FINISHME: The code as currently written does not support attribute 1138 * FINISHME: location aliasing (see comment above). 1139 */ 1140 const int attr = prog->Attributes->Parameters[i].StateIndexes[0]; 1141 const unsigned slots = count_attribute_slots(var->type); 1142 1143 /* Mask representing the contiguous slots that will be used by this 1144 * attribute. 1145 */ 1146 const unsigned use_mask = (1 << slots) - 1; 1147 1148 /* Generate a link error if the set of bits requested for this 1149 * attribute overlaps any previously allocated bits. 1150 */ 1151 if ((~(use_mask << attr) & used_locations) != used_locations) { 1152 linker_error_printf(prog, 1153 "insufficient contiguous attribute locations " 1154 "available for vertex shader input `%s'", 1155 var->name); 1156 return false; 1157 } 1158 1159 var->location = VERT_ATTRIB_GENERIC0 + attr; 1160 used_locations |= (use_mask << attr); 1161 } 1162 } 1163 1164 /* Temporary storage for the set of attributes that need locations assigned. 1165 */ 1166 struct temp_attr { 1167 unsigned slots; 1168 ir_variable *var; 1169 1170 /* Used below in the call to qsort. */ 1171 static int compare(const void *a, const void *b) 1172 { 1173 const temp_attr *const l = (const temp_attr *) a; 1174 const temp_attr *const r = (const temp_attr *) b; 1175 1176 /* Reversed because we want a descending order sort below. */ 1177 return r->slots - l->slots; 1178 } 1179 } to_assign[16]; 1180 1181 unsigned num_attr = 0; 1182 1183 foreach_list(node, sh->ir) { 1184 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 1185 1186 if ((var == NULL) || (var->mode != ir_var_in)) 1187 continue; 1188 1189 /* The location was explicitly assigned, nothing to do here. 1190 */ 1191 if (var->location != -1) 1192 continue; 1193 1194 to_assign[num_attr].slots = count_attribute_slots(var->type); 1195 to_assign[num_attr].var = var; 1196 num_attr++; 1197 } 1198 1199 /* If all of the attributes were assigned locations by the application (or 1200 * are built-in attributes with fixed locations), return early. This should 1201 * be the common case. 1202 */ 1203 if (num_attr == 0) 1204 return true; 1205 1206 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare); 1207 1208 /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only 1209 * be explicitly assigned by via glBindAttribLocation. Mark it as reserved 1210 * to prevent it from being automatically allocated below. 1211 */ 1212 find_deref_visitor find("gl_Vertex"); 1213 find.run(sh->ir); 1214 if (find.variable_found()) 1215 used_locations |= (1 << 0); 1216 1217 for (unsigned i = 0; i < num_attr; i++) { 1218 /* Mask representing the contiguous slots that will be used by this 1219 * attribute. 1220 */ 1221 const unsigned use_mask = (1 << to_assign[i].slots) - 1; 1222 1223 int location = find_available_slots(used_locations, to_assign[i].slots); 1224 1225 if (location < 0) { 1226 linker_error_printf(prog, 1227 "insufficient contiguous attribute locations " 1228 "available for vertex shader input `%s'", 1229 to_assign[i].var->name); 1230 return false; 1231 } 1232 1233 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location; 1234 used_locations |= (use_mask << location); 1235 } 1236 1237 return true; 1238} 1239 1240 1241/** 1242 * Demote shader outputs that are not read to being just plain global variables 1243 */ 1244void 1245demote_unread_shader_outputs(gl_shader *sh) 1246{ 1247 foreach_list(node, sh->ir) { 1248 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 1249 1250 if ((var == NULL) || (var->mode != ir_var_out)) 1251 continue; 1252 1253 /* An 'out' variable is only really a shader output if its value is read 1254 * by the following stage. 1255 */ 1256 if (var->location == -1) { 1257 var->mode = ir_var_auto; 1258 } 1259 } 1260} 1261 1262 1263void 1264assign_varying_locations(struct gl_shader_program *prog, 1265 gl_shader *producer, gl_shader *consumer) 1266{ 1267 /* FINISHME: Set dynamically when geometry shader support is added. */ 1268 unsigned output_index = VERT_RESULT_VAR0; 1269 unsigned input_index = FRAG_ATTRIB_VAR0; 1270 1271 /* Operate in a total of three passes. 1272 * 1273 * 1. Assign locations for any matching inputs and outputs. 1274 * 1275 * 2. Mark output variables in the producer that do not have locations as 1276 * not being outputs. This lets the optimizer eliminate them. 1277 * 1278 * 3. Mark input variables in the consumer that do not have locations as 1279 * not being inputs. This lets the optimizer eliminate them. 1280 */ 1281 1282 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0); 1283 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0); 1284 1285 foreach_list(node, producer->ir) { 1286 ir_variable *const output_var = ((ir_instruction *) node)->as_variable(); 1287 1288 if ((output_var == NULL) || (output_var->mode != ir_var_out) 1289 || (output_var->location != -1)) 1290 continue; 1291 1292 ir_variable *const input_var = 1293 consumer->symbols->get_variable(output_var->name); 1294 1295 if ((input_var == NULL) || (input_var->mode != ir_var_in)) 1296 continue; 1297 1298 assert(input_var->location == -1); 1299 1300 output_var->location = output_index; 1301 input_var->location = input_index; 1302 1303 /* FINISHME: Support for "varying" records in GLSL 1.50. */ 1304 assert(!output_var->type->is_record()); 1305 1306 if (output_var->type->is_array()) { 1307 const unsigned slots = output_var->type->length 1308 * output_var->type->fields.array->matrix_columns; 1309 1310 output_index += slots; 1311 input_index += slots; 1312 } else { 1313 const unsigned slots = output_var->type->matrix_columns; 1314 1315 output_index += slots; 1316 input_index += slots; 1317 } 1318 } 1319 1320 demote_unread_shader_outputs(producer); 1321 1322 foreach_list(node, consumer->ir) { 1323 ir_variable *const var = ((ir_instruction *) node)->as_variable(); 1324 1325 if ((var == NULL) || (var->mode != ir_var_in)) 1326 continue; 1327 1328 if (var->location == -1) { 1329 if (prog->Version <= 120) { 1330 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec: 1331 * 1332 * Only those varying variables used (i.e. read) in 1333 * the fragment shader executable must be written to 1334 * by the vertex shader executable; declaring 1335 * superfluous varying variables in a vertex shader is 1336 * permissible. 1337 * 1338 * We interpret this text as meaning that the VS must 1339 * write the variable for the FS to read it. See 1340 * "glsl1-varying read but not written" in piglit. 1341 */ 1342 1343 linker_error_printf(prog, "fragment shader varying %s not written " 1344 "by vertex shader\n.", var->name); 1345 prog->LinkStatus = false; 1346 } 1347 1348 /* An 'in' variable is only really a shader input if its 1349 * value is written by the previous stage. 1350 */ 1351 var->mode = ir_var_auto; 1352 } 1353 } 1354} 1355 1356 1357void 1358link_shaders(GLcontext *ctx, struct gl_shader_program *prog) 1359{ 1360 prog->LinkStatus = false; 1361 prog->Validated = false; 1362 prog->_Used = false; 1363 1364 if (prog->InfoLog != NULL) 1365 talloc_free(prog->InfoLog); 1366 1367 prog->InfoLog = talloc_strdup(NULL, ""); 1368 1369 /* Separate the shaders into groups based on their type. 1370 */ 1371 struct gl_shader **vert_shader_list; 1372 unsigned num_vert_shaders = 0; 1373 struct gl_shader **frag_shader_list; 1374 unsigned num_frag_shaders = 0; 1375 1376 vert_shader_list = (struct gl_shader **) 1377 calloc(2 * prog->NumShaders, sizeof(struct gl_shader *)); 1378 frag_shader_list = &vert_shader_list[prog->NumShaders]; 1379 1380 unsigned min_version = UINT_MAX; 1381 unsigned max_version = 0; 1382 for (unsigned i = 0; i < prog->NumShaders; i++) { 1383 min_version = MIN2(min_version, prog->Shaders[i]->Version); 1384 max_version = MAX2(max_version, prog->Shaders[i]->Version); 1385 1386 switch (prog->Shaders[i]->Type) { 1387 case GL_VERTEX_SHADER: 1388 vert_shader_list[num_vert_shaders] = prog->Shaders[i]; 1389 num_vert_shaders++; 1390 break; 1391 case GL_FRAGMENT_SHADER: 1392 frag_shader_list[num_frag_shaders] = prog->Shaders[i]; 1393 num_frag_shaders++; 1394 break; 1395 case GL_GEOMETRY_SHADER: 1396 /* FINISHME: Support geometry shaders. */ 1397 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER); 1398 break; 1399 } 1400 } 1401 1402 /* Previous to GLSL version 1.30, different compilation units could mix and 1403 * match shading language versions. With GLSL 1.30 and later, the versions 1404 * of all shaders must match. 1405 */ 1406 assert(min_version >= 100); 1407 assert(max_version <= 130); 1408 if ((max_version >= 130 || min_version == 100) 1409 && min_version != max_version) { 1410 linker_error_printf(prog, "all shaders must use same shading " 1411 "language version\n"); 1412 goto done; 1413 } 1414 1415 prog->Version = max_version; 1416 1417 for (unsigned int i = 0; i < prog->_NumLinkedShaders; i++) { 1418 ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]); 1419 } 1420 1421 /* Link all shaders for a particular stage and validate the result. 1422 */ 1423 prog->_NumLinkedShaders = 0; 1424 if (num_vert_shaders > 0) { 1425 gl_shader *const sh = 1426 link_intrastage_shaders(ctx, prog, vert_shader_list, num_vert_shaders); 1427 1428 if (sh == NULL) 1429 goto done; 1430 1431 if (!validate_vertex_shader_executable(prog, sh)) 1432 goto done; 1433 1434 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh; 1435 prog->_NumLinkedShaders++; 1436 } 1437 1438 if (num_frag_shaders > 0) { 1439 gl_shader *const sh = 1440 link_intrastage_shaders(ctx, prog, frag_shader_list, num_frag_shaders); 1441 1442 if (sh == NULL) 1443 goto done; 1444 1445 if (!validate_fragment_shader_executable(prog, sh)) 1446 goto done; 1447 1448 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh; 1449 prog->_NumLinkedShaders++; 1450 } 1451 1452 /* Here begins the inter-stage linking phase. Some initial validation is 1453 * performed, then locations are assigned for uniforms, attributes, and 1454 * varyings. 1455 */ 1456 if (cross_validate_uniforms(prog)) { 1457 /* Validate the inputs of each stage with the output of the preceeding 1458 * stage. 1459 */ 1460 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) { 1461 if (!cross_validate_outputs_to_inputs(prog, 1462 prog->_LinkedShaders[i - 1], 1463 prog->_LinkedShaders[i])) 1464 goto done; 1465 } 1466 1467 prog->LinkStatus = true; 1468 } 1469 1470 /* Do common optimization before assigning storage for attributes, 1471 * uniforms, and varyings. Later optimization could possibly make 1472 * some of that unused. 1473 */ 1474 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) { 1475 while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, 32)) 1476 ; 1477 } 1478 1479 update_array_sizes(prog); 1480 1481 assign_uniform_locations(prog); 1482 1483 if (prog->_NumLinkedShaders && prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER) { 1484 /* FINISHME: The value of the max_attribute_index parameter is 1485 * FINISHME: implementation dependent based on the value of 1486 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be 1487 * FINISHME: at least 16, so hardcode 16 for now. 1488 */ 1489 if (!assign_attribute_locations(prog, 16)) 1490 goto done; 1491 1492 if (prog->_NumLinkedShaders == 1) 1493 demote_unread_shader_outputs(prog->_LinkedShaders[0]); 1494 } 1495 1496 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) 1497 assign_varying_locations(prog, 1498 prog->_LinkedShaders[i - 1], 1499 prog->_LinkedShaders[i]); 1500 1501 /* FINISHME: Assign fragment shader output locations. */ 1502 1503done: 1504 free(vert_shader_list); 1505} 1506