nir_lower_io.c revision 707ca00fce464af84212fa1fff573f5814a8c118
1/* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Connor Abbott (cwabbott0@gmail.com) 25 * Jason Ekstrand (jason@jlekstrand.net) 26 * 27 */ 28 29/* 30 * This lowering pass converts references to input/output variables with 31 * loads/stores to actual input/output intrinsics. 32 */ 33 34#include "nir.h" 35#include "nir_builder.h" 36 37struct lower_io_state { 38 nir_builder builder; 39 void *mem_ctx; 40 int (*type_size)(const struct glsl_type *type); 41 nir_variable_mode modes; 42}; 43 44void 45nir_assign_var_locations(struct exec_list *var_list, unsigned *size, 46 unsigned base_offset, 47 int (*type_size)(const struct glsl_type *)) 48{ 49 unsigned location = 0; 50 51 /* There are 32 regular and 32 patch varyings allowed */ 52 int locations[64][2]; 53 for (unsigned i = 0; i < 64; i++) { 54 for (unsigned j = 0; j < 2; j++) 55 locations[i][j] = -1; 56 } 57 58 nir_foreach_variable(var, var_list) { 59 /* 60 * UBO's have their own address spaces, so don't count them towards the 61 * number of global uniforms 62 */ 63 if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) && 64 var->interface_type != NULL) 65 continue; 66 67 /* Make sure we give the same location to varyings packed with 68 * ARB_enhanced_layouts. 69 */ 70 int idx = var->data.location - base_offset; 71 if (base_offset && idx >= 0) { 72 assert(idx < ARRAY_SIZE(locations)); 73 74 if (locations[idx][var->data.index] == -1) { 75 var->data.driver_location = location; 76 locations[idx][var->data.index] = location; 77 location += type_size(var->type); 78 } else { 79 var->data.driver_location = locations[idx][var->data.index]; 80 } 81 } else { 82 var->data.driver_location = location; 83 location += type_size(var->type); 84 } 85 } 86 87 *size = location; 88} 89 90/** 91 * Returns true if we're processing a stage whose inputs are arrays indexed 92 * by a vertex number (such as geometry shader inputs). 93 */ 94static bool 95is_per_vertex_input(struct lower_io_state *state, nir_variable *var) 96{ 97 gl_shader_stage stage = state->builder.shader->stage; 98 99 return var->data.mode == nir_var_shader_in && !var->data.patch && 100 (stage == MESA_SHADER_TESS_CTRL || 101 stage == MESA_SHADER_TESS_EVAL || 102 stage == MESA_SHADER_GEOMETRY); 103} 104 105static bool 106is_per_vertex_output(struct lower_io_state *state, nir_variable *var) 107{ 108 gl_shader_stage stage = state->builder.shader->stage; 109 return var->data.mode == nir_var_shader_out && !var->data.patch && 110 stage == MESA_SHADER_TESS_CTRL; 111} 112 113static nir_ssa_def * 114get_io_offset(nir_builder *b, nir_deref_var *deref, 115 nir_ssa_def **vertex_index, 116 int (*type_size)(const struct glsl_type *)) 117{ 118 nir_deref *tail = &deref->deref; 119 120 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the 121 * outermost array index separate. Process the rest normally. 122 */ 123 if (vertex_index != NULL) { 124 tail = tail->child; 125 assert(tail->deref_type == nir_deref_type_array); 126 nir_deref_array *deref_array = nir_deref_as_array(tail); 127 128 nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset); 129 if (deref_array->deref_array_type == nir_deref_array_type_indirect) { 130 vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1)); 131 } 132 *vertex_index = vtx; 133 } 134 135 /* Just emit code and let constant-folding go to town */ 136 nir_ssa_def *offset = nir_imm_int(b, 0); 137 138 while (tail->child != NULL) { 139 const struct glsl_type *parent_type = tail->type; 140 tail = tail->child; 141 142 if (tail->deref_type == nir_deref_type_array) { 143 nir_deref_array *deref_array = nir_deref_as_array(tail); 144 unsigned size = type_size(tail->type); 145 146 offset = nir_iadd(b, offset, 147 nir_imm_int(b, size * deref_array->base_offset)); 148 149 if (deref_array->deref_array_type == nir_deref_array_type_indirect) { 150 nir_ssa_def *mul = 151 nir_imul(b, nir_imm_int(b, size), 152 nir_ssa_for_src(b, deref_array->indirect, 1)); 153 154 offset = nir_iadd(b, offset, mul); 155 } 156 } else if (tail->deref_type == nir_deref_type_struct) { 157 nir_deref_struct *deref_struct = nir_deref_as_struct(tail); 158 159 unsigned field_offset = 0; 160 for (unsigned i = 0; i < deref_struct->index; i++) { 161 field_offset += type_size(glsl_get_struct_field(parent_type, i)); 162 } 163 offset = nir_iadd(b, offset, nir_imm_int(b, field_offset)); 164 } 165 } 166 167 return offset; 168} 169 170static nir_intrinsic_instr * 171lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state, 172 nir_ssa_def *vertex_index, nir_ssa_def *offset) 173{ 174 const nir_shader *nir = state->builder.shader; 175 nir_variable *var = intrin->variables[0]->var; 176 nir_variable_mode mode = var->data.mode; 177 nir_ssa_def *barycentric = NULL; 178 179 nir_intrinsic_op op; 180 switch (mode) { 181 case nir_var_shader_in: 182 if (nir->stage == MESA_SHADER_FRAGMENT && 183 nir->options->use_interpolated_input_intrinsics && 184 var->data.interpolation != INTERP_MODE_FLAT) { 185 assert(vertex_index == NULL); 186 187 nir_intrinsic_op bary_op; 188 if (var->data.sample) 189 bary_op = nir_intrinsic_load_barycentric_sample; 190 else if (var->data.centroid) 191 bary_op = nir_intrinsic_load_barycentric_centroid; 192 else 193 bary_op = nir_intrinsic_load_barycentric_pixel; 194 195 barycentric = nir_load_barycentric(&state->builder, bary_op, 196 var->data.interpolation); 197 op = nir_intrinsic_load_interpolated_input; 198 } else { 199 op = vertex_index ? nir_intrinsic_load_per_vertex_input : 200 nir_intrinsic_load_input; 201 } 202 break; 203 case nir_var_shader_out: 204 op = vertex_index ? nir_intrinsic_load_per_vertex_output : 205 nir_intrinsic_load_output; 206 break; 207 case nir_var_uniform: 208 op = nir_intrinsic_load_uniform; 209 break; 210 case nir_var_shared: 211 op = nir_intrinsic_load_shared; 212 break; 213 default: 214 unreachable("Unknown variable mode"); 215 } 216 217 nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, op); 218 load->num_components = intrin->num_components; 219 220 nir_intrinsic_set_base(load, var->data.driver_location); 221 if (mode == nir_var_shader_in || mode == nir_var_shader_out) 222 nir_intrinsic_set_component(load, var->data.location_frac); 223 224 if (load->intrinsic == nir_intrinsic_load_uniform) 225 nir_intrinsic_set_range(load, state->type_size(var->type)); 226 227 if (vertex_index) { 228 load->src[0] = nir_src_for_ssa(vertex_index); 229 load->src[1] = nir_src_for_ssa(offset); 230 } else if (barycentric) { 231 load->src[0] = nir_src_for_ssa(barycentric); 232 load->src[1] = nir_src_for_ssa(offset); 233 } else { 234 load->src[0] = nir_src_for_ssa(offset); 235 } 236 237 return load; 238} 239 240static nir_intrinsic_instr * 241lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state, 242 nir_ssa_def *vertex_index, nir_ssa_def *offset) 243{ 244 nir_variable *var = intrin->variables[0]->var; 245 nir_variable_mode mode = var->data.mode; 246 247 nir_intrinsic_op op; 248 if (mode == nir_var_shared) { 249 op = nir_intrinsic_store_shared; 250 } else { 251 assert(mode == nir_var_shader_out); 252 op = vertex_index ? nir_intrinsic_store_per_vertex_output : 253 nir_intrinsic_store_output; 254 } 255 256 nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, op); 257 store->num_components = intrin->num_components; 258 259 nir_src_copy(&store->src[0], &intrin->src[0], store); 260 261 nir_intrinsic_set_base(store, var->data.driver_location); 262 263 if (mode == nir_var_shader_out) 264 nir_intrinsic_set_component(store, var->data.location_frac); 265 266 nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin)); 267 268 if (vertex_index) 269 store->src[1] = nir_src_for_ssa(vertex_index); 270 271 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset); 272 273 return store; 274} 275 276static nir_intrinsic_instr * 277lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state, 278 nir_ssa_def *offset) 279{ 280 nir_variable *var = intrin->variables[0]->var; 281 282 assert(var->data.mode == nir_var_shared); 283 284 nir_intrinsic_op op; 285 switch (intrin->intrinsic) { 286#define OP(O) case nir_intrinsic_var_##O: op = nir_intrinsic_shared_##O; break; 287 OP(atomic_exchange) 288 OP(atomic_comp_swap) 289 OP(atomic_add) 290 OP(atomic_imin) 291 OP(atomic_umin) 292 OP(atomic_imax) 293 OP(atomic_umax) 294 OP(atomic_and) 295 OP(atomic_or) 296 OP(atomic_xor) 297#undef OP 298 default: 299 unreachable("Invalid atomic"); 300 } 301 302 nir_intrinsic_instr *atomic = 303 nir_intrinsic_instr_create(state->mem_ctx, op); 304 305 nir_intrinsic_set_base(atomic, var->data.driver_location); 306 307 atomic->src[0] = nir_src_for_ssa(offset); 308 for (unsigned i = 0; i < nir_op_infos[intrin->intrinsic].num_inputs; i++) { 309 nir_src_copy(&atomic->src[i+1], &intrin->src[i], atomic); 310 } 311 312 return atomic; 313} 314 315static nir_intrinsic_instr * 316lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state, 317 nir_ssa_def *offset) 318{ 319 nir_variable *var = intrin->variables[0]->var; 320 321 assert(var->data.mode == nir_var_shader_in); 322 323 nir_intrinsic_op bary_op; 324 switch (intrin->intrinsic) { 325 case nir_intrinsic_interp_var_at_centroid: 326 bary_op = nir_intrinsic_load_barycentric_centroid; 327 break; 328 case nir_intrinsic_interp_var_at_sample: 329 bary_op = nir_intrinsic_load_barycentric_at_sample; 330 break; 331 case nir_intrinsic_interp_var_at_offset: 332 bary_op = nir_intrinsic_load_barycentric_at_offset; 333 break; 334 default: 335 unreachable("Bogus interpolateAt() intrinsic."); 336 } 337 338 nir_intrinsic_instr *bary_setup = 339 nir_intrinsic_instr_create(state->mem_ctx, bary_op); 340 341 nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL); 342 nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation); 343 344 if (intrin->intrinsic != nir_intrinsic_interp_var_at_centroid) 345 nir_src_copy(&bary_setup->src[0], &intrin->src[0], bary_setup); 346 347 nir_builder_instr_insert(&state->builder, &bary_setup->instr); 348 349 nir_intrinsic_instr *load = 350 nir_intrinsic_instr_create(state->mem_ctx, 351 nir_intrinsic_load_interpolated_input); 352 load->num_components = intrin->num_components; 353 354 nir_intrinsic_set_base(load, var->data.driver_location); 355 nir_intrinsic_set_component(load, var->data.location_frac); 356 357 load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa); 358 load->src[1] = nir_src_for_ssa(offset); 359 360 return load; 361} 362 363static bool 364nir_lower_io_block(nir_block *block, 365 struct lower_io_state *state) 366{ 367 nir_builder *b = &state->builder; 368 const nir_shader_compiler_options *options = b->shader->options; 369 370 nir_foreach_instr_safe(instr, block) { 371 if (instr->type != nir_instr_type_intrinsic) 372 continue; 373 374 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 375 376 switch (intrin->intrinsic) { 377 case nir_intrinsic_load_var: 378 case nir_intrinsic_store_var: 379 case nir_intrinsic_var_atomic_add: 380 case nir_intrinsic_var_atomic_imin: 381 case nir_intrinsic_var_atomic_umin: 382 case nir_intrinsic_var_atomic_imax: 383 case nir_intrinsic_var_atomic_umax: 384 case nir_intrinsic_var_atomic_and: 385 case nir_intrinsic_var_atomic_or: 386 case nir_intrinsic_var_atomic_xor: 387 case nir_intrinsic_var_atomic_exchange: 388 case nir_intrinsic_var_atomic_comp_swap: 389 /* We can lower the io for this nir instrinsic */ 390 break; 391 case nir_intrinsic_interp_var_at_centroid: 392 case nir_intrinsic_interp_var_at_sample: 393 case nir_intrinsic_interp_var_at_offset: 394 /* We can optionally lower these to load_interpolated_input */ 395 if (options->use_interpolated_input_intrinsics) 396 break; 397 default: 398 /* We can't lower the io for this nir instrinsic, so skip it */ 399 continue; 400 } 401 402 nir_variable *var = intrin->variables[0]->var; 403 nir_variable_mode mode = var->data.mode; 404 405 if ((state->modes & mode) == 0) 406 continue; 407 408 if (mode != nir_var_shader_in && 409 mode != nir_var_shader_out && 410 mode != nir_var_shared && 411 mode != nir_var_uniform) 412 continue; 413 414 b->cursor = nir_before_instr(instr); 415 416 const bool per_vertex = 417 is_per_vertex_input(state, var) || is_per_vertex_output(state, var); 418 419 nir_ssa_def *offset; 420 nir_ssa_def *vertex_index = NULL; 421 422 offset = get_io_offset(b, intrin->variables[0], 423 per_vertex ? &vertex_index : NULL, 424 state->type_size); 425 426 nir_intrinsic_instr *replacement; 427 428 switch (intrin->intrinsic) { 429 case nir_intrinsic_load_var: 430 replacement = lower_load(intrin, state, vertex_index, offset); 431 break; 432 433 case nir_intrinsic_store_var: 434 replacement = lower_store(intrin, state, vertex_index, offset); 435 break; 436 437 case nir_intrinsic_var_atomic_add: 438 case nir_intrinsic_var_atomic_imin: 439 case nir_intrinsic_var_atomic_umin: 440 case nir_intrinsic_var_atomic_imax: 441 case nir_intrinsic_var_atomic_umax: 442 case nir_intrinsic_var_atomic_and: 443 case nir_intrinsic_var_atomic_or: 444 case nir_intrinsic_var_atomic_xor: 445 case nir_intrinsic_var_atomic_exchange: 446 case nir_intrinsic_var_atomic_comp_swap: 447 assert(vertex_index == NULL); 448 replacement = lower_atomic(intrin, state, offset); 449 break; 450 451 case nir_intrinsic_interp_var_at_centroid: 452 case nir_intrinsic_interp_var_at_sample: 453 case nir_intrinsic_interp_var_at_offset: 454 assert(vertex_index == NULL); 455 replacement = lower_interpolate_at(intrin, state, offset); 456 break; 457 458 default: 459 continue; 460 } 461 462 if (nir_intrinsic_infos[intrin->intrinsic].has_dest) { 463 if (intrin->dest.is_ssa) { 464 nir_ssa_dest_init(&replacement->instr, &replacement->dest, 465 intrin->dest.ssa.num_components, 466 intrin->dest.ssa.bit_size, NULL); 467 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, 468 nir_src_for_ssa(&replacement->dest.ssa)); 469 } else { 470 nir_dest_copy(&replacement->dest, &intrin->dest, state->mem_ctx); 471 } 472 } 473 474 nir_instr_insert_before(&intrin->instr, &replacement->instr); 475 nir_instr_remove(&intrin->instr); 476 } 477 478 return true; 479} 480 481static void 482nir_lower_io_impl(nir_function_impl *impl, 483 nir_variable_mode modes, 484 int (*type_size)(const struct glsl_type *)) 485{ 486 struct lower_io_state state; 487 488 nir_builder_init(&state.builder, impl); 489 state.mem_ctx = ralloc_parent(impl); 490 state.modes = modes; 491 state.type_size = type_size; 492 493 nir_foreach_block(block, impl) { 494 nir_lower_io_block(block, &state); 495 } 496 497 nir_metadata_preserve(impl, nir_metadata_block_index | 498 nir_metadata_dominance); 499} 500 501void 502nir_lower_io(nir_shader *shader, nir_variable_mode modes, 503 int (*type_size)(const struct glsl_type *)) 504{ 505 nir_foreach_function(function, shader) { 506 if (function->impl) 507 nir_lower_io_impl(function->impl, modes, type_size); 508 } 509} 510 511/** 512 * Return the offset soruce for a load/store intrinsic. 513 */ 514nir_src * 515nir_get_io_offset_src(nir_intrinsic_instr *instr) 516{ 517 switch (instr->intrinsic) { 518 case nir_intrinsic_load_input: 519 case nir_intrinsic_load_output: 520 case nir_intrinsic_load_uniform: 521 return &instr->src[0]; 522 case nir_intrinsic_load_ubo: 523 case nir_intrinsic_load_ssbo: 524 case nir_intrinsic_load_per_vertex_input: 525 case nir_intrinsic_load_per_vertex_output: 526 case nir_intrinsic_load_interpolated_input: 527 case nir_intrinsic_store_output: 528 return &instr->src[1]; 529 case nir_intrinsic_store_ssbo: 530 case nir_intrinsic_store_per_vertex_output: 531 return &instr->src[2]; 532 default: 533 return NULL; 534 } 535} 536 537/** 538 * Return the vertex index source for a load/store per_vertex intrinsic. 539 */ 540nir_src * 541nir_get_io_vertex_index_src(nir_intrinsic_instr *instr) 542{ 543 switch (instr->intrinsic) { 544 case nir_intrinsic_load_per_vertex_input: 545 case nir_intrinsic_load_per_vertex_output: 546 return &instr->src[0]; 547 case nir_intrinsic_store_per_vertex_output: 548 return &instr->src[1]; 549 default: 550 return NULL; 551 } 552} 553