nir_lower_io.c revision f0f466214e65aa462f80d3608296685011862714
1/* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Connor Abbott (cwabbott0@gmail.com) 25 * Jason Ekstrand (jason@jlekstrand.net) 26 * 27 */ 28 29/* 30 * This lowering pass converts references to input/output variables with 31 * loads/stores to actual input/output intrinsics. 32 */ 33 34#include "nir.h" 35#include "nir_builder.h" 36 37struct lower_io_state { 38 nir_builder builder; 39 void *mem_ctx; 40 int (*type_size)(const struct glsl_type *type); 41 nir_variable_mode modes; 42}; 43 44void 45nir_assign_var_locations(struct exec_list *var_list, unsigned *size, 46 unsigned base_offset, 47 int (*type_size)(const struct glsl_type *)) 48{ 49 unsigned location = 0; 50 51 /* There are 32 regular and 32 patch varyings allowed */ 52 int locations[64][2]; 53 for (unsigned i = 0; i < 64; i++) { 54 for (unsigned j = 0; j < 2; j++) 55 locations[i][j] = -1; 56 } 57 58 nir_foreach_variable(var, var_list) { 59 /* 60 * UBO's have their own address spaces, so don't count them towards the 61 * number of global uniforms 62 */ 63 if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) && 64 var->interface_type != NULL) 65 continue; 66 67 /* Make sure we give the same location to varyings packed with 68 * ARB_enhanced_layouts. 69 */ 70 int idx = var->data.location - base_offset; 71 if (base_offset && idx >= 0) { 72 assert(idx < ARRAY_SIZE(locations)); 73 74 if (locations[idx][var->data.index] == -1) { 75 var->data.driver_location = location; 76 locations[idx][var->data.index] = location; 77 location += type_size(var->type); 78 } else { 79 var->data.driver_location = locations[idx][var->data.index]; 80 } 81 } else { 82 var->data.driver_location = location; 83 location += type_size(var->type); 84 } 85 } 86 87 *size = location; 88} 89 90/** 91 * Returns true if we're processing a stage whose inputs are arrays indexed 92 * by a vertex number (such as geometry shader inputs). 93 */ 94static bool 95is_per_vertex_input(struct lower_io_state *state, nir_variable *var) 96{ 97 gl_shader_stage stage = state->builder.shader->stage; 98 99 return var->data.mode == nir_var_shader_in && !var->data.patch && 100 (stage == MESA_SHADER_TESS_CTRL || 101 stage == MESA_SHADER_TESS_EVAL || 102 stage == MESA_SHADER_GEOMETRY); 103} 104 105static bool 106is_per_vertex_output(struct lower_io_state *state, nir_variable *var) 107{ 108 gl_shader_stage stage = state->builder.shader->stage; 109 return var->data.mode == nir_var_shader_out && !var->data.patch && 110 stage == MESA_SHADER_TESS_CTRL; 111} 112 113static nir_ssa_def * 114get_io_offset(nir_builder *b, nir_deref_var *deref, 115 nir_ssa_def **vertex_index, 116 int (*type_size)(const struct glsl_type *)) 117{ 118 nir_deref *tail = &deref->deref; 119 120 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the 121 * outermost array index separate. Process the rest normally. 122 */ 123 if (vertex_index != NULL) { 124 tail = tail->child; 125 assert(tail->deref_type == nir_deref_type_array); 126 nir_deref_array *deref_array = nir_deref_as_array(tail); 127 128 nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset); 129 if (deref_array->deref_array_type == nir_deref_array_type_indirect) { 130 vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1)); 131 } 132 *vertex_index = vtx; 133 } 134 135 /* Just emit code and let constant-folding go to town */ 136 nir_ssa_def *offset = nir_imm_int(b, 0); 137 138 while (tail->child != NULL) { 139 const struct glsl_type *parent_type = tail->type; 140 tail = tail->child; 141 142 if (tail->deref_type == nir_deref_type_array) { 143 nir_deref_array *deref_array = nir_deref_as_array(tail); 144 unsigned size = type_size(tail->type); 145 146 offset = nir_iadd(b, offset, 147 nir_imm_int(b, size * deref_array->base_offset)); 148 149 if (deref_array->deref_array_type == nir_deref_array_type_indirect) { 150 nir_ssa_def *mul = 151 nir_imul(b, nir_imm_int(b, size), 152 nir_ssa_for_src(b, deref_array->indirect, 1)); 153 154 offset = nir_iadd(b, offset, mul); 155 } 156 } else if (tail->deref_type == nir_deref_type_struct) { 157 nir_deref_struct *deref_struct = nir_deref_as_struct(tail); 158 159 unsigned field_offset = 0; 160 for (unsigned i = 0; i < deref_struct->index; i++) { 161 field_offset += type_size(glsl_get_struct_field(parent_type, i)); 162 } 163 offset = nir_iadd(b, offset, nir_imm_int(b, field_offset)); 164 } 165 } 166 167 return offset; 168} 169 170static nir_intrinsic_instr * 171lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state, 172 nir_ssa_def *vertex_index, nir_ssa_def *offset) 173{ 174 nir_variable *var = intrin->variables[0]->var; 175 nir_variable_mode mode = var->data.mode; 176 177 nir_intrinsic_op op; 178 switch (mode) { 179 case nir_var_shader_in: 180 op = vertex_index ? nir_intrinsic_load_per_vertex_input : 181 nir_intrinsic_load_input; 182 break; 183 case nir_var_shader_out: 184 op = vertex_index ? nir_intrinsic_load_per_vertex_output : 185 nir_intrinsic_load_output; 186 break; 187 case nir_var_uniform: 188 op = nir_intrinsic_load_uniform; 189 break; 190 case nir_var_shared: 191 op = nir_intrinsic_load_shared; 192 break; 193 default: 194 unreachable("Unknown variable mode"); 195 } 196 197 nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, op); 198 load->num_components = intrin->num_components; 199 200 nir_intrinsic_set_base(load, var->data.driver_location); 201 if (mode == nir_var_shader_in || mode == nir_var_shader_out) 202 nir_intrinsic_set_component(load, var->data.location_frac); 203 204 if (load->intrinsic == nir_intrinsic_load_uniform) 205 nir_intrinsic_set_range(load, state->type_size(var->type)); 206 207 if (vertex_index) 208 load->src[0] = nir_src_for_ssa(vertex_index); 209 210 load->src[vertex_index ? 1 : 0] = nir_src_for_ssa(offset); 211 212 return load; 213} 214 215static nir_intrinsic_instr * 216lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state, 217 nir_ssa_def *vertex_index, nir_ssa_def *offset) 218{ 219 nir_variable *var = intrin->variables[0]->var; 220 nir_variable_mode mode = var->data.mode; 221 222 nir_intrinsic_op op; 223 if (mode == nir_var_shared) { 224 op = nir_intrinsic_store_shared; 225 } else { 226 assert(mode == nir_var_shader_out); 227 op = vertex_index ? nir_intrinsic_store_per_vertex_output : 228 nir_intrinsic_store_output; 229 } 230 231 nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, op); 232 store->num_components = intrin->num_components; 233 234 nir_src_copy(&store->src[0], &intrin->src[0], store); 235 236 nir_intrinsic_set_base(store, var->data.driver_location); 237 238 if (mode == nir_var_shader_out) 239 nir_intrinsic_set_component(store, var->data.location_frac); 240 241 nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin)); 242 243 if (vertex_index) 244 store->src[1] = nir_src_for_ssa(vertex_index); 245 246 store->src[vertex_index ? 2 : 1] = nir_src_for_ssa(offset); 247 248 return store; 249} 250 251static nir_intrinsic_instr * 252lower_atomic(nir_intrinsic_instr *intrin, struct lower_io_state *state, 253 nir_ssa_def *offset) 254{ 255 nir_variable *var = intrin->variables[0]->var; 256 257 assert(var->data.mode == nir_var_shared); 258 259 nir_intrinsic_op op; 260 switch (intrin->intrinsic) { 261#define OP(O) case nir_intrinsic_var_##O: op = nir_intrinsic_shared_##O; break; 262 OP(atomic_exchange) 263 OP(atomic_comp_swap) 264 OP(atomic_add) 265 OP(atomic_imin) 266 OP(atomic_umin) 267 OP(atomic_imax) 268 OP(atomic_umax) 269 OP(atomic_and) 270 OP(atomic_or) 271 OP(atomic_xor) 272#undef OP 273 default: 274 unreachable("Invalid atomic"); 275 } 276 277 nir_intrinsic_instr *atomic = 278 nir_intrinsic_instr_create(state->mem_ctx, op); 279 280 nir_intrinsic_set_base(atomic, var->data.driver_location); 281 282 atomic->src[0] = nir_src_for_ssa(offset); 283 for (unsigned i = 0; i < nir_op_infos[intrin->intrinsic].num_inputs; i++) { 284 nir_src_copy(&atomic->src[i+1], &intrin->src[i], atomic); 285 } 286 287 return atomic; 288} 289 290static bool 291nir_lower_io_block(nir_block *block, 292 struct lower_io_state *state) 293{ 294 nir_builder *b = &state->builder; 295 296 nir_foreach_instr_safe(instr, block) { 297 if (instr->type != nir_instr_type_intrinsic) 298 continue; 299 300 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 301 302 switch (intrin->intrinsic) { 303 case nir_intrinsic_load_var: 304 case nir_intrinsic_store_var: 305 case nir_intrinsic_var_atomic_add: 306 case nir_intrinsic_var_atomic_imin: 307 case nir_intrinsic_var_atomic_umin: 308 case nir_intrinsic_var_atomic_imax: 309 case nir_intrinsic_var_atomic_umax: 310 case nir_intrinsic_var_atomic_and: 311 case nir_intrinsic_var_atomic_or: 312 case nir_intrinsic_var_atomic_xor: 313 case nir_intrinsic_var_atomic_exchange: 314 case nir_intrinsic_var_atomic_comp_swap: 315 /* We can lower the io for this nir instrinsic */ 316 break; 317 default: 318 /* We can't lower the io for this nir instrinsic, so skip it */ 319 continue; 320 } 321 322 nir_variable *var = intrin->variables[0]->var; 323 nir_variable_mode mode = var->data.mode; 324 325 if ((state->modes & mode) == 0) 326 continue; 327 328 if (mode != nir_var_shader_in && 329 mode != nir_var_shader_out && 330 mode != nir_var_shared && 331 mode != nir_var_uniform) 332 continue; 333 334 b->cursor = nir_before_instr(instr); 335 336 const bool per_vertex = 337 is_per_vertex_input(state, var) || is_per_vertex_output(state, var); 338 339 nir_ssa_def *offset; 340 nir_ssa_def *vertex_index = NULL; 341 342 offset = get_io_offset(b, intrin->variables[0], 343 per_vertex ? &vertex_index : NULL, 344 state->type_size); 345 346 nir_intrinsic_instr *replacement; 347 348 switch (intrin->intrinsic) { 349 case nir_intrinsic_load_var: 350 replacement = lower_load(intrin, state, vertex_index, offset); 351 break; 352 353 case nir_intrinsic_store_var: 354 replacement = lower_store(intrin, state, vertex_index, offset); 355 break; 356 357 case nir_intrinsic_var_atomic_add: 358 case nir_intrinsic_var_atomic_imin: 359 case nir_intrinsic_var_atomic_umin: 360 case nir_intrinsic_var_atomic_imax: 361 case nir_intrinsic_var_atomic_umax: 362 case nir_intrinsic_var_atomic_and: 363 case nir_intrinsic_var_atomic_or: 364 case nir_intrinsic_var_atomic_xor: 365 case nir_intrinsic_var_atomic_exchange: 366 case nir_intrinsic_var_atomic_comp_swap: 367 assert(vertex_index == NULL); 368 replacement = lower_atomic(intrin, state, offset); 369 break; 370 371 default: 372 continue; 373 } 374 375 if (nir_intrinsic_infos[intrin->intrinsic].has_dest) { 376 if (intrin->dest.is_ssa) { 377 nir_ssa_dest_init(&replacement->instr, &replacement->dest, 378 intrin->dest.ssa.num_components, 379 intrin->dest.ssa.bit_size, NULL); 380 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, 381 nir_src_for_ssa(&replacement->dest.ssa)); 382 } else { 383 nir_dest_copy(&replacement->dest, &intrin->dest, state->mem_ctx); 384 } 385 } 386 387 nir_instr_insert_before(&intrin->instr, &replacement->instr); 388 nir_instr_remove(&intrin->instr); 389 } 390 391 return true; 392} 393 394static void 395nir_lower_io_impl(nir_function_impl *impl, 396 nir_variable_mode modes, 397 int (*type_size)(const struct glsl_type *)) 398{ 399 struct lower_io_state state; 400 401 nir_builder_init(&state.builder, impl); 402 state.mem_ctx = ralloc_parent(impl); 403 state.modes = modes; 404 state.type_size = type_size; 405 406 nir_foreach_block(block, impl) { 407 nir_lower_io_block(block, &state); 408 } 409 410 nir_metadata_preserve(impl, nir_metadata_block_index | 411 nir_metadata_dominance); 412} 413 414void 415nir_lower_io(nir_shader *shader, nir_variable_mode modes, 416 int (*type_size)(const struct glsl_type *)) 417{ 418 nir_foreach_function(function, shader) { 419 if (function->impl) 420 nir_lower_io_impl(function->impl, modes, type_size); 421 } 422} 423 424/** 425 * Return the offset soruce for a load/store intrinsic. 426 */ 427nir_src * 428nir_get_io_offset_src(nir_intrinsic_instr *instr) 429{ 430 switch (instr->intrinsic) { 431 case nir_intrinsic_load_input: 432 case nir_intrinsic_load_output: 433 case nir_intrinsic_load_uniform: 434 return &instr->src[0]; 435 case nir_intrinsic_load_ubo: 436 case nir_intrinsic_load_ssbo: 437 case nir_intrinsic_load_per_vertex_input: 438 case nir_intrinsic_load_per_vertex_output: 439 case nir_intrinsic_store_output: 440 return &instr->src[1]; 441 case nir_intrinsic_store_ssbo: 442 case nir_intrinsic_store_per_vertex_output: 443 return &instr->src[2]; 444 default: 445 return NULL; 446 } 447} 448 449/** 450 * Return the vertex index source for a load/store per_vertex intrinsic. 451 */ 452nir_src * 453nir_get_io_vertex_index_src(nir_intrinsic_instr *instr) 454{ 455 switch (instr->intrinsic) { 456 case nir_intrinsic_load_per_vertex_input: 457 case nir_intrinsic_load_per_vertex_output: 458 return &instr->src[0]; 459 case nir_intrinsic_store_per_vertex_output: 460 return &instr->src[1]; 461 default: 462 return NULL; 463 } 464} 465