brw_nir.c revision 45912fb908f7a1d2efbce0f1dbe81e5bc975fbe1
1/* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "brw_nir.h" 25#include "brw_shader.h" 26#include "compiler/glsl_types.h" 27#include "compiler/nir/nir_builder.h" 28 29static bool 30is_input(nir_intrinsic_instr *intrin) 31{ 32 return intrin->intrinsic == nir_intrinsic_load_input || 33 intrin->intrinsic == nir_intrinsic_load_per_vertex_input || 34 intrin->intrinsic == nir_intrinsic_load_interpolated_input; 35} 36 37static bool 38is_output(nir_intrinsic_instr *intrin) 39{ 40 return intrin->intrinsic == nir_intrinsic_load_output || 41 intrin->intrinsic == nir_intrinsic_load_per_vertex_output || 42 intrin->intrinsic == nir_intrinsic_store_output || 43 intrin->intrinsic == nir_intrinsic_store_per_vertex_output; 44} 45 46/** 47 * In many cases, we just add the base and offset together, so there's no 48 * reason to keep them separate. Sometimes, combining them is essential: 49 * if a shader only accesses part of a compound variable (such as a matrix 50 * or array), the variable's base may not actually exist in the VUE map. 51 * 52 * This pass adds constant offsets to instr->const_index[0], and resets 53 * the offset source to 0. Non-constant offsets remain unchanged - since 54 * we don't know what part of a compound variable is accessed, we allocate 55 * storage for the entire thing. 56 */ 57 58static bool 59add_const_offset_to_base_block(nir_block *block, nir_builder *b, 60 nir_variable_mode mode) 61{ 62 nir_foreach_instr_safe(instr, block) { 63 if (instr->type != nir_instr_type_intrinsic) 64 continue; 65 66 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 67 68 if ((mode == nir_var_shader_in && is_input(intrin)) || 69 (mode == nir_var_shader_out && is_output(intrin))) { 70 nir_src *offset = nir_get_io_offset_src(intrin); 71 nir_const_value *const_offset = nir_src_as_const_value(*offset); 72 73 if (const_offset) { 74 intrin->const_index[0] += const_offset->u32[0]; 75 b->cursor = nir_before_instr(&intrin->instr); 76 nir_instr_rewrite_src(&intrin->instr, offset, 77 nir_src_for_ssa(nir_imm_int(b, 0))); 78 } 79 } 80 } 81 return true; 82} 83 84static void 85add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode) 86{ 87 nir_foreach_function(f, nir) { 88 if (f->impl) { 89 nir_builder b; 90 nir_builder_init(&b, f->impl); 91 nir_foreach_block(block, f->impl) { 92 add_const_offset_to_base_block(block, &b, mode); 93 } 94 } 95 } 96} 97 98static bool 99remap_vs_attrs(nir_block *block, shader_info *nir_info) 100{ 101 nir_foreach_instr(instr, block) { 102 if (instr->type != nir_instr_type_intrinsic) 103 continue; 104 105 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 106 107 if (intrin->intrinsic == nir_intrinsic_load_input) { 108 /* Attributes come in a contiguous block, ordered by their 109 * gl_vert_attrib value. That means we can compute the slot 110 * number for an attribute by masking out the enabled attributes 111 * before it and counting the bits. 112 */ 113 int attr = intrin->const_index[0]; 114 int slot = _mesa_bitcount_64(nir_info->inputs_read & 115 BITFIELD64_MASK(attr)); 116 int dslot = _mesa_bitcount_64(nir_info->double_inputs_read & 117 BITFIELD64_MASK(attr)); 118 intrin->const_index[0] = 4 * (slot + dslot); 119 } 120 } 121 return true; 122} 123 124static bool 125remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map) 126{ 127 nir_foreach_instr(instr, block) { 128 if (instr->type != nir_instr_type_intrinsic) 129 continue; 130 131 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 132 133 if (intrin->intrinsic == nir_intrinsic_load_input || 134 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) { 135 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]]; 136 assert(vue_slot != -1); 137 intrin->const_index[0] = vue_slot; 138 } 139 } 140 return true; 141} 142 143static bool 144remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr, 145 GLenum primitive_mode) 146{ 147 const int location = nir_intrinsic_base(intr); 148 const unsigned component = nir_intrinsic_component(intr); 149 bool out_of_bounds; 150 151 if (location == VARYING_SLOT_TESS_LEVEL_INNER) { 152 switch (primitive_mode) { 153 case GL_QUADS: 154 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */ 155 nir_intrinsic_set_base(intr, 0); 156 nir_intrinsic_set_component(intr, 3 - component); 157 out_of_bounds = false; 158 break; 159 case GL_TRIANGLES: 160 /* gl_TessLevelInner[0] lives at DWord 4. */ 161 nir_intrinsic_set_base(intr, 1); 162 out_of_bounds = component > 0; 163 break; 164 case GL_ISOLINES: 165 out_of_bounds = true; 166 break; 167 default: 168 unreachable("Bogus tessellation domain"); 169 } 170 } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) { 171 if (primitive_mode == GL_ISOLINES) { 172 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */ 173 nir_intrinsic_set_base(intr, 1); 174 nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr)); 175 out_of_bounds = component > 1; 176 } else { 177 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */ 178 nir_intrinsic_set_base(intr, 1); 179 nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr)); 180 out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES; 181 } 182 } else { 183 return false; 184 } 185 186 if (out_of_bounds) { 187 if (nir_intrinsic_infos[intr->intrinsic].has_dest) { 188 b->cursor = nir_before_instr(&intr->instr); 189 nir_ssa_def *undef = nir_ssa_undef(b, 1, 32); 190 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(undef)); 191 } 192 nir_instr_remove(&intr->instr); 193 } 194 195 return true; 196} 197 198static bool 199remap_patch_urb_offsets(nir_block *block, nir_builder *b, 200 const struct brw_vue_map *vue_map, 201 GLenum tes_primitive_mode) 202{ 203 const bool is_passthrough_tcs = b->shader->info->name && 204 strcmp(b->shader->info->name, "passthrough") == 0; 205 206 nir_foreach_instr_safe(instr, block) { 207 if (instr->type != nir_instr_type_intrinsic) 208 continue; 209 210 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 211 212 gl_shader_stage stage = b->shader->stage; 213 214 if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) || 215 (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) { 216 217 if (!is_passthrough_tcs && 218 remap_tess_levels(b, intrin, tes_primitive_mode)) 219 continue; 220 221 int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]]; 222 assert(vue_slot != -1); 223 intrin->const_index[0] = vue_slot; 224 225 nir_src *vertex = nir_get_io_vertex_index_src(intrin); 226 if (vertex) { 227 nir_const_value *const_vertex = nir_src_as_const_value(*vertex); 228 if (const_vertex) { 229 intrin->const_index[0] += const_vertex->u32[0] * 230 vue_map->num_per_vertex_slots; 231 } else { 232 b->cursor = nir_before_instr(&intrin->instr); 233 234 /* Multiply by the number of per-vertex slots. */ 235 nir_ssa_def *vertex_offset = 236 nir_imul(b, 237 nir_ssa_for_src(b, *vertex, 1), 238 nir_imm_int(b, 239 vue_map->num_per_vertex_slots)); 240 241 /* Add it to the existing offset */ 242 nir_src *offset = nir_get_io_offset_src(intrin); 243 nir_ssa_def *total_offset = 244 nir_iadd(b, vertex_offset, 245 nir_ssa_for_src(b, *offset, 1)); 246 247 nir_instr_rewrite_src(&intrin->instr, offset, 248 nir_src_for_ssa(total_offset)); 249 } 250 } 251 } 252 } 253 return true; 254} 255 256void 257brw_nir_lower_vs_inputs(nir_shader *nir, 258 bool is_scalar, 259 bool use_legacy_snorm_formula, 260 const uint8_t *vs_attrib_wa_flags) 261{ 262 /* Start with the location of the variable's base. */ 263 foreach_list_typed(nir_variable, var, node, &nir->inputs) { 264 var->data.driver_location = var->data.location; 265 } 266 267 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are 268 * loaded as one vec4 or dvec4 per element (or matrix column), depending on 269 * whether it is a double-precision type or not. 270 */ 271 nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0); 272 273 /* This pass needs actual constants */ 274 nir_opt_constant_folding(nir); 275 276 add_const_offset_to_base(nir, nir_var_shader_in); 277 278 brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula, 279 vs_attrib_wa_flags); 280 281 if (is_scalar) { 282 /* Finally, translate VERT_ATTRIB_* values into the actual registers. */ 283 284 nir_foreach_function(function, nir) { 285 if (function->impl) { 286 nir_foreach_block(block, function->impl) { 287 remap_vs_attrs(block, nir->info); 288 } 289 } 290 } 291 } 292} 293 294void 295brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar, 296 const struct brw_vue_map *vue_map) 297{ 298 foreach_list_typed(nir_variable, var, node, &nir->inputs) { 299 var->data.driver_location = var->data.location; 300 } 301 302 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */ 303 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0); 304 305 if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) { 306 /* This pass needs actual constants */ 307 nir_opt_constant_folding(nir); 308 309 add_const_offset_to_base(nir, nir_var_shader_in); 310 311 nir_foreach_function(function, nir) { 312 if (function->impl) { 313 nir_foreach_block(block, function->impl) { 314 remap_inputs_with_vue_map(block, vue_map); 315 } 316 } 317 } 318 } 319} 320 321void 322brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map) 323{ 324 foreach_list_typed(nir_variable, var, node, &nir->inputs) { 325 var->data.driver_location = var->data.location; 326 } 327 328 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0); 329 330 /* This pass needs actual constants */ 331 nir_opt_constant_folding(nir); 332 333 add_const_offset_to_base(nir, nir_var_shader_in); 334 335 nir_foreach_function(function, nir) { 336 if (function->impl) { 337 nir_builder b; 338 nir_builder_init(&b, function->impl); 339 nir_foreach_block(block, function->impl) { 340 remap_patch_urb_offsets(block, &b, vue_map, 341 nir->info->tes.primitive_mode); 342 } 343 } 344 } 345} 346 347void 348brw_nir_lower_fs_inputs(nir_shader *nir, struct brw_vue_map *vue_map, 349 struct gl_program *prog, 350 const struct gen_device_info *devinfo, 351 const struct brw_wm_prog_key *key) 352{ 353 foreach_list_typed(nir_variable, var, node, &nir->inputs) { 354 var->data.driver_location = var->data.location; 355 356 /* Apply default interpolation mode. 357 * 358 * Everything defaults to smooth except for the legacy GL color 359 * built-in variables, which might be flat depending on API state. 360 */ 361 if (var->data.interpolation == INTERP_MODE_NONE) { 362 const bool flat = key->flat_shade && 363 (var->data.location == VARYING_SLOT_COL0 || 364 var->data.location == VARYING_SLOT_COL1); 365 366 var->data.interpolation = flat ? INTERP_MODE_FLAT 367 : INTERP_MODE_SMOOTH; 368 } 369 370 /* On Ironlake and below, there is only one interpolation mode. 371 * Centroid interpolation doesn't mean anything on this hardware -- 372 * there is no multisampling. 373 */ 374 if (devinfo->gen < 6) { 375 var->data.centroid = false; 376 var->data.sample = false; 377 } 378 } 379 380 if (devinfo->gen < 6) { 381 assert(prog); /* prog will be NULL when called from Vulkan */ 382 brw_setup_vue_interpolation(vue_map, nir, prog, devinfo); 383 } 384 385 nir_lower_io_options lower_io_options = 0; 386 if (key->persample_interp) 387 lower_io_options |= nir_lower_io_force_sample_interpolation; 388 389 nir_lower_io(nir, nir_var_shader_in, type_size_vec4, lower_io_options); 390 391 /* This pass needs actual constants */ 392 nir_opt_constant_folding(nir); 393 394 add_const_offset_to_base(nir, nir_var_shader_in); 395} 396 397void 398brw_nir_lower_vue_outputs(nir_shader *nir, 399 bool is_scalar) 400{ 401 nir_foreach_variable(var, &nir->outputs) { 402 var->data.driver_location = var->data.location; 403 } 404 405 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0); 406} 407 408void 409brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map, 410 GLenum tes_primitive_mode) 411{ 412 nir_foreach_variable(var, &nir->outputs) { 413 var->data.driver_location = var->data.location; 414 } 415 416 nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0); 417 418 /* This pass needs actual constants */ 419 nir_opt_constant_folding(nir); 420 421 add_const_offset_to_base(nir, nir_var_shader_out); 422 423 nir_foreach_function(function, nir) { 424 if (function->impl) { 425 nir_builder b; 426 nir_builder_init(&b, function->impl); 427 nir_foreach_block(block, function->impl) { 428 remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode); 429 } 430 } 431 } 432} 433 434void 435brw_nir_lower_fs_outputs(nir_shader *nir) 436{ 437 nir_foreach_variable(var, &nir->outputs) { 438 var->data.driver_location = 439 SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) | 440 SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION); 441 } 442 443 nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0); 444} 445 446void 447brw_nir_lower_cs_shared(nir_shader *nir) 448{ 449 nir_assign_var_locations(&nir->shared, &nir->num_shared, 450 type_size_scalar_bytes); 451 nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0); 452} 453 454#define OPT(pass, ...) ({ \ 455 bool this_progress = false; \ 456 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \ 457 if (this_progress) \ 458 progress = true; \ 459 this_progress; \ 460}) 461 462#define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__) 463 464static nir_shader * 465nir_optimize(nir_shader *nir, const struct brw_compiler *compiler, 466 bool is_scalar) 467{ 468 nir_variable_mode indirect_mask = 0; 469 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput) 470 indirect_mask |= nir_var_shader_in; 471 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput) 472 indirect_mask |= nir_var_shader_out; 473 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp) 474 indirect_mask |= nir_var_local; 475 476 bool progress; 477 do { 478 progress = false; 479 OPT_V(nir_lower_vars_to_ssa); 480 OPT(nir_opt_copy_prop_vars); 481 482 if (is_scalar) { 483 OPT(nir_lower_alu_to_scalar); 484 } 485 486 OPT(nir_copy_prop); 487 488 if (is_scalar) { 489 OPT(nir_lower_phis_to_scalar); 490 } 491 492 OPT(nir_copy_prop); 493 OPT(nir_opt_dce); 494 OPT(nir_opt_cse); 495 OPT(nir_opt_peephole_select, 0); 496 OPT(nir_opt_algebraic); 497 OPT(nir_opt_constant_folding); 498 OPT(nir_opt_dead_cf); 499 if (OPT(nir_opt_trivial_continues)) { 500 /* If nir_opt_trivial_continues makes progress, then we need to clean 501 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll 502 * to make progress. 503 */ 504 OPT(nir_copy_prop); 505 OPT(nir_opt_dce); 506 } 507 OPT(nir_opt_if); 508 if (nir->options->max_unroll_iterations != 0) { 509 OPT(nir_opt_loop_unroll, indirect_mask); 510 } 511 OPT(nir_opt_remove_phis); 512 OPT(nir_opt_undef); 513 OPT_V(nir_lower_doubles, nir_lower_drcp | 514 nir_lower_dsqrt | 515 nir_lower_drsq | 516 nir_lower_dtrunc | 517 nir_lower_dfloor | 518 nir_lower_dceil | 519 nir_lower_dfract | 520 nir_lower_dround_even | 521 nir_lower_dmod); 522 OPT_V(nir_lower_double_pack); 523 } while (progress); 524 525 return nir; 526} 527 528/* Does some simple lowering and runs the standard suite of optimizations 529 * 530 * This is intended to be called more-or-less directly after you get the 531 * shader out of GLSL or some other source. While it is geared towards i965, 532 * it is not at all generator-specific except for the is_scalar flag. Even 533 * there, it is safe to call with is_scalar = false for a shader that is 534 * intended for the FS backend as long as nir_optimize is called again with 535 * is_scalar = true to scalarize everything prior to code gen. 536 */ 537nir_shader * 538brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) 539{ 540 const struct gen_device_info *devinfo = compiler->devinfo; 541 bool progress; /* Written by OPT and OPT_V */ 542 (void)progress; 543 544 const bool is_scalar = compiler->scalar_stage[nir->stage]; 545 546 if (nir->stage == MESA_SHADER_GEOMETRY) 547 OPT(nir_lower_gs_intrinsics); 548 549 /* See also brw_nir_trig_workarounds.py */ 550 if (compiler->precise_trig && 551 !(devinfo->gen >= 10 || devinfo->is_kabylake)) 552 OPT(brw_nir_apply_trig_workarounds); 553 554 static const nir_lower_tex_options tex_options = { 555 .lower_txp = ~0, 556 .lower_txf_offset = true, 557 .lower_rect_offset = true, 558 .lower_txd_cube_map = true, 559 }; 560 561 OPT(nir_lower_tex, &tex_options); 562 OPT(nir_normalize_cubemap_coords); 563 564 OPT(nir_lower_global_vars_to_local); 565 566 OPT(nir_split_var_copies); 567 568 nir = nir_optimize(nir, compiler, is_scalar); 569 570 if (is_scalar) { 571 OPT_V(nir_lower_load_const_to_scalar); 572 } 573 574 /* Lower a bunch of stuff */ 575 OPT_V(nir_lower_var_copies); 576 577 OPT_V(nir_lower_clip_cull_distance_arrays); 578 579 nir_variable_mode indirect_mask = 0; 580 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput) 581 indirect_mask |= nir_var_shader_in; 582 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput) 583 indirect_mask |= nir_var_shader_out; 584 if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp) 585 indirect_mask |= nir_var_local; 586 587 nir_lower_indirect_derefs(nir, indirect_mask); 588 589 /* Get rid of split copies */ 590 nir = nir_optimize(nir, compiler, is_scalar); 591 592 OPT(nir_remove_dead_variables, nir_var_local); 593 594 return nir; 595} 596 597/* Prepare the given shader for codegen 598 * 599 * This function is intended to be called right before going into the actual 600 * backend and is highly backend-specific. Also, once this function has been 601 * called on a shader, it will no longer be in SSA form so most optimizations 602 * will not work. 603 */ 604nir_shader * 605brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler, 606 bool is_scalar) 607{ 608 const struct gen_device_info *devinfo = compiler->devinfo; 609 bool debug_enabled = 610 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage)); 611 612 bool progress; /* Written by OPT and OPT_V */ 613 (void)progress; 614 615 nir = nir_optimize(nir, compiler, is_scalar); 616 617 if (devinfo->gen >= 6) { 618 /* Try and fuse multiply-adds */ 619 OPT(brw_nir_opt_peephole_ffma); 620 } 621 622 OPT(nir_opt_algebraic_late); 623 624 OPT(nir_lower_locals_to_regs); 625 626 OPT_V(nir_lower_to_source_mods); 627 OPT(nir_copy_prop); 628 OPT(nir_opt_dce); 629 630 if (unlikely(debug_enabled)) { 631 /* Re-index SSA defs so we print more sensible numbers. */ 632 nir_foreach_function(function, nir) { 633 if (function->impl) 634 nir_index_ssa_defs(function->impl); 635 } 636 637 fprintf(stderr, "NIR (SSA form) for %s shader:\n", 638 _mesa_shader_stage_to_string(nir->stage)); 639 nir_print_shader(nir, stderr); 640 } 641 642 OPT_V(nir_convert_from_ssa, true); 643 644 if (!is_scalar) { 645 OPT_V(nir_move_vec_src_uses_to_dest); 646 OPT(nir_lower_vec_to_movs); 647 } 648 649 /* This is the last pass we run before we start emitting stuff. It 650 * determines when we need to insert boolean resolves on Gen <= 5. We 651 * run it last because it stashes data in instr->pass_flags and we don't 652 * want that to be squashed by other NIR passes. 653 */ 654 if (devinfo->gen <= 5) 655 brw_nir_analyze_boolean_resolves(nir); 656 657 nir_sweep(nir); 658 659 if (unlikely(debug_enabled)) { 660 fprintf(stderr, "NIR (final form) for %s shader:\n", 661 _mesa_shader_stage_to_string(nir->stage)); 662 nir_print_shader(nir, stderr); 663 } 664 665 return nir; 666} 667 668nir_shader * 669brw_nir_apply_sampler_key(nir_shader *nir, 670 const struct brw_compiler *compiler, 671 const struct brw_sampler_prog_key_data *key_tex, 672 bool is_scalar) 673{ 674 const struct gen_device_info *devinfo = compiler->devinfo; 675 nir_lower_tex_options tex_options = { 0 }; 676 677 /* Iron Lake and prior require lowering of all rectangle textures */ 678 if (devinfo->gen < 6) 679 tex_options.lower_rect = true; 680 681 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */ 682 if (devinfo->gen < 8) { 683 tex_options.saturate_s = key_tex->gl_clamp_mask[0]; 684 tex_options.saturate_t = key_tex->gl_clamp_mask[1]; 685 tex_options.saturate_r = key_tex->gl_clamp_mask[2]; 686 } 687 688 /* Prior to Haswell, we have to fake texture swizzle */ 689 for (unsigned s = 0; s < MAX_SAMPLERS; s++) { 690 if (key_tex->swizzles[s] == SWIZZLE_NOOP) 691 continue; 692 693 tex_options.swizzle_result |= (1 << s); 694 for (unsigned c = 0; c < 4; c++) 695 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c); 696 } 697 698 /* Prior to Haswell, we have to lower gradients on shadow samplers */ 699 tex_options.lower_txd_shadow = devinfo->gen < 8 && !devinfo->is_haswell; 700 701 tex_options.lower_y_uv_external = key_tex->y_uv_image_mask; 702 tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask; 703 tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask; 704 705 if (nir_lower_tex(nir, &tex_options)) { 706 nir_validate_shader(nir); 707 nir = nir_optimize(nir, compiler, is_scalar); 708 } 709 710 return nir; 711} 712 713enum brw_reg_type 714brw_type_for_nir_type(nir_alu_type type) 715{ 716 switch (type) { 717 case nir_type_uint: 718 case nir_type_uint32: 719 return BRW_REGISTER_TYPE_UD; 720 case nir_type_bool: 721 case nir_type_int: 722 case nir_type_bool32: 723 case nir_type_int32: 724 return BRW_REGISTER_TYPE_D; 725 case nir_type_float: 726 case nir_type_float32: 727 return BRW_REGISTER_TYPE_F; 728 case nir_type_float64: 729 return BRW_REGISTER_TYPE_DF; 730 case nir_type_int64: 731 case nir_type_uint64: 732 /* TODO we should only see these in moves, so for now it's ok, but when 733 * we add actual 64-bit integer support we should fix this. 734 */ 735 return BRW_REGISTER_TYPE_DF; 736 default: 737 unreachable("unknown type"); 738 } 739 740 return BRW_REGISTER_TYPE_F; 741} 742 743/* Returns the glsl_base_type corresponding to a nir_alu_type. 744 * This is used by both brw_vec4_nir and brw_fs_nir. 745 */ 746enum glsl_base_type 747brw_glsl_base_type_for_nir_type(nir_alu_type type) 748{ 749 switch (type) { 750 case nir_type_float: 751 case nir_type_float32: 752 return GLSL_TYPE_FLOAT; 753 754 case nir_type_float64: 755 return GLSL_TYPE_DOUBLE; 756 757 case nir_type_int: 758 case nir_type_int32: 759 return GLSL_TYPE_INT; 760 761 case nir_type_uint: 762 case nir_type_uint32: 763 return GLSL_TYPE_UINT; 764 765 default: 766 unreachable("bad type"); 767 } 768} 769