1/* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Christian König <christian.koenig@amd.com> 25 * Marek Olšák <maraeo@gmail.com> 26 */ 27 28#include "si_pipe.h" 29#include "sid.h" 30#include "radeon/r600_cs.h" 31 32#include "tgsi/tgsi_parse.h" 33#include "tgsi/tgsi_ureg.h" 34#include "util/hash_table.h" 35#include "util/crc32.h" 36#include "util/u_memory.h" 37#include "util/u_prim.h" 38 39/* SHADER_CACHE */ 40 41/** 42 * Return the TGSI binary in a buffer. The first 4 bytes contain its size as 43 * integer. 44 */ 45static void *si_get_tgsi_binary(struct si_shader_selector *sel) 46{ 47 unsigned tgsi_size = tgsi_num_tokens(sel->tokens) * 48 sizeof(struct tgsi_token); 49 unsigned size = 4 + tgsi_size + sizeof(sel->so); 50 char *result = (char*)MALLOC(size); 51 52 if (!result) 53 return NULL; 54 55 *((uint32_t*)result) = size; 56 memcpy(result + 4, sel->tokens, tgsi_size); 57 memcpy(result + 4 + tgsi_size, &sel->so, sizeof(sel->so)); 58 return result; 59} 60 61/** Copy "data" to "ptr" and return the next dword following copied data. */ 62static uint32_t *write_data(uint32_t *ptr, const void *data, unsigned size) 63{ 64 /* data may be NULL if size == 0 */ 65 if (size) 66 memcpy(ptr, data, size); 67 ptr += DIV_ROUND_UP(size, 4); 68 return ptr; 69} 70 71/** Read data from "ptr". Return the next dword following the data. */ 72static uint32_t *read_data(uint32_t *ptr, void *data, unsigned size) 73{ 74 memcpy(data, ptr, size); 75 ptr += DIV_ROUND_UP(size, 4); 76 return ptr; 77} 78 79/** 80 * Write the size as uint followed by the data. Return the next dword 81 * following the copied data. 82 */ 83static uint32_t *write_chunk(uint32_t *ptr, const void *data, unsigned size) 84{ 85 *ptr++ = size; 86 return write_data(ptr, data, size); 87} 88 89/** 90 * Read the size as uint followed by the data. Return both via parameters. 91 * Return the next dword following the data. 92 */ 93static uint32_t *read_chunk(uint32_t *ptr, void **data, unsigned *size) 94{ 95 *size = *ptr++; 96 assert(*data == NULL); 97 if (!*size) 98 return ptr; 99 *data = malloc(*size); 100 return read_data(ptr, *data, *size); 101} 102 103/** 104 * Return the shader binary in a buffer. The first 4 bytes contain its size 105 * as integer. 106 */ 107static void *si_get_shader_binary(struct si_shader *shader) 108{ 109 /* There is always a size of data followed by the data itself. */ 110 unsigned relocs_size = shader->binary.reloc_count * 111 sizeof(shader->binary.relocs[0]); 112 unsigned disasm_size = strlen(shader->binary.disasm_string) + 1; 113 unsigned llvm_ir_size = shader->binary.llvm_ir_string ? 114 strlen(shader->binary.llvm_ir_string) + 1 : 0; 115 unsigned size = 116 4 + /* total size */ 117 4 + /* CRC32 of the data below */ 118 align(sizeof(shader->config), 4) + 119 align(sizeof(shader->info), 4) + 120 4 + align(shader->binary.code_size, 4) + 121 4 + align(shader->binary.rodata_size, 4) + 122 4 + align(relocs_size, 4) + 123 4 + align(disasm_size, 4) + 124 4 + align(llvm_ir_size, 4); 125 void *buffer = CALLOC(1, size); 126 uint32_t *ptr = (uint32_t*)buffer; 127 128 if (!buffer) 129 return NULL; 130 131 *ptr++ = size; 132 ptr++; /* CRC32 is calculated at the end. */ 133 134 ptr = write_data(ptr, &shader->config, sizeof(shader->config)); 135 ptr = write_data(ptr, &shader->info, sizeof(shader->info)); 136 ptr = write_chunk(ptr, shader->binary.code, shader->binary.code_size); 137 ptr = write_chunk(ptr, shader->binary.rodata, shader->binary.rodata_size); 138 ptr = write_chunk(ptr, shader->binary.relocs, relocs_size); 139 ptr = write_chunk(ptr, shader->binary.disasm_string, disasm_size); 140 ptr = write_chunk(ptr, shader->binary.llvm_ir_string, llvm_ir_size); 141 assert((char *)ptr - (char *)buffer == size); 142 143 /* Compute CRC32. */ 144 ptr = (uint32_t*)buffer; 145 ptr++; 146 *ptr = util_hash_crc32(ptr + 1, size - 8); 147 148 return buffer; 149} 150 151static bool si_load_shader_binary(struct si_shader *shader, void *binary) 152{ 153 uint32_t *ptr = (uint32_t*)binary; 154 uint32_t size = *ptr++; 155 uint32_t crc32 = *ptr++; 156 unsigned chunk_size; 157 158 if (util_hash_crc32(ptr, size - 8) != crc32) { 159 fprintf(stderr, "radeonsi: binary shader has invalid CRC32\n"); 160 return false; 161 } 162 163 ptr = read_data(ptr, &shader->config, sizeof(shader->config)); 164 ptr = read_data(ptr, &shader->info, sizeof(shader->info)); 165 ptr = read_chunk(ptr, (void**)&shader->binary.code, 166 &shader->binary.code_size); 167 ptr = read_chunk(ptr, (void**)&shader->binary.rodata, 168 &shader->binary.rodata_size); 169 ptr = read_chunk(ptr, (void**)&shader->binary.relocs, &chunk_size); 170 shader->binary.reloc_count = chunk_size / sizeof(shader->binary.relocs[0]); 171 ptr = read_chunk(ptr, (void**)&shader->binary.disasm_string, &chunk_size); 172 ptr = read_chunk(ptr, (void**)&shader->binary.llvm_ir_string, &chunk_size); 173 174 return true; 175} 176 177/** 178 * Insert a shader into the cache. It's assumed the shader is not in the cache. 179 * Use si_shader_cache_load_shader before calling this. 180 * 181 * Returns false on failure, in which case the tgsi_binary should be freed. 182 */ 183static bool si_shader_cache_insert_shader(struct si_screen *sscreen, 184 void *tgsi_binary, 185 struct si_shader *shader) 186{ 187 void *hw_binary; 188 struct hash_entry *entry; 189 190 entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary); 191 if (entry) 192 return false; /* already added */ 193 194 hw_binary = si_get_shader_binary(shader); 195 if (!hw_binary) 196 return false; 197 198 if (_mesa_hash_table_insert(sscreen->shader_cache, tgsi_binary, 199 hw_binary) == NULL) { 200 FREE(hw_binary); 201 return false; 202 } 203 204 return true; 205} 206 207static bool si_shader_cache_load_shader(struct si_screen *sscreen, 208 void *tgsi_binary, 209 struct si_shader *shader) 210{ 211 struct hash_entry *entry = 212 _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary); 213 if (!entry) 214 return false; 215 216 if (!si_load_shader_binary(shader, entry->data)) 217 return false; 218 219 p_atomic_inc(&sscreen->b.num_shader_cache_hits); 220 return true; 221} 222 223static uint32_t si_shader_cache_key_hash(const void *key) 224{ 225 /* The first dword is the key size. */ 226 return util_hash_crc32(key, *(uint32_t*)key); 227} 228 229static bool si_shader_cache_key_equals(const void *a, const void *b) 230{ 231 uint32_t *keya = (uint32_t*)a; 232 uint32_t *keyb = (uint32_t*)b; 233 234 /* The first dword is the key size. */ 235 if (*keya != *keyb) 236 return false; 237 238 return memcmp(keya, keyb, *keya) == 0; 239} 240 241static void si_destroy_shader_cache_entry(struct hash_entry *entry) 242{ 243 FREE((void*)entry->key); 244 FREE(entry->data); 245} 246 247bool si_init_shader_cache(struct si_screen *sscreen) 248{ 249 pipe_mutex_init(sscreen->shader_cache_mutex); 250 sscreen->shader_cache = 251 _mesa_hash_table_create(NULL, 252 si_shader_cache_key_hash, 253 si_shader_cache_key_equals); 254 return sscreen->shader_cache != NULL; 255} 256 257void si_destroy_shader_cache(struct si_screen *sscreen) 258{ 259 if (sscreen->shader_cache) 260 _mesa_hash_table_destroy(sscreen->shader_cache, 261 si_destroy_shader_cache_entry); 262 pipe_mutex_destroy(sscreen->shader_cache_mutex); 263} 264 265/* SHADER STATES */ 266 267static void si_set_tesseval_regs(struct si_screen *sscreen, 268 struct si_shader *shader, 269 struct si_pm4_state *pm4) 270{ 271 struct tgsi_shader_info *info = &shader->selector->info; 272 unsigned tes_prim_mode = info->properties[TGSI_PROPERTY_TES_PRIM_MODE]; 273 unsigned tes_spacing = info->properties[TGSI_PROPERTY_TES_SPACING]; 274 bool tes_vertex_order_cw = info->properties[TGSI_PROPERTY_TES_VERTEX_ORDER_CW]; 275 bool tes_point_mode = info->properties[TGSI_PROPERTY_TES_POINT_MODE]; 276 unsigned type, partitioning, topology, distribution_mode; 277 278 switch (tes_prim_mode) { 279 case PIPE_PRIM_LINES: 280 type = V_028B6C_TESS_ISOLINE; 281 break; 282 case PIPE_PRIM_TRIANGLES: 283 type = V_028B6C_TESS_TRIANGLE; 284 break; 285 case PIPE_PRIM_QUADS: 286 type = V_028B6C_TESS_QUAD; 287 break; 288 default: 289 assert(0); 290 return; 291 } 292 293 switch (tes_spacing) { 294 case PIPE_TESS_SPACING_FRACTIONAL_ODD: 295 partitioning = V_028B6C_PART_FRAC_ODD; 296 break; 297 case PIPE_TESS_SPACING_FRACTIONAL_EVEN: 298 partitioning = V_028B6C_PART_FRAC_EVEN; 299 break; 300 case PIPE_TESS_SPACING_EQUAL: 301 partitioning = V_028B6C_PART_INTEGER; 302 break; 303 default: 304 assert(0); 305 return; 306 } 307 308 if (tes_point_mode) 309 topology = V_028B6C_OUTPUT_POINT; 310 else if (tes_prim_mode == PIPE_PRIM_LINES) 311 topology = V_028B6C_OUTPUT_LINE; 312 else if (tes_vertex_order_cw) 313 /* for some reason, this must be the other way around */ 314 topology = V_028B6C_OUTPUT_TRIANGLE_CCW; 315 else 316 topology = V_028B6C_OUTPUT_TRIANGLE_CW; 317 318 if (sscreen->has_distributed_tess) { 319 if (sscreen->b.family == CHIP_FIJI || 320 sscreen->b.family >= CHIP_POLARIS10) 321 distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS; 322 else 323 distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS; 324 } else 325 distribution_mode = V_028B6C_DISTRIBUTION_MODE_NO_DIST; 326 327 si_pm4_set_reg(pm4, R_028B6C_VGT_TF_PARAM, 328 S_028B6C_TYPE(type) | 329 S_028B6C_PARTITIONING(partitioning) | 330 S_028B6C_TOPOLOGY(topology) | 331 S_028B6C_DISTRIBUTION_MODE(distribution_mode)); 332} 333 334static struct si_pm4_state *si_get_shader_pm4_state(struct si_shader *shader) 335{ 336 if (shader->pm4) 337 si_pm4_clear_state(shader->pm4); 338 else 339 shader->pm4 = CALLOC_STRUCT(si_pm4_state); 340 341 return shader->pm4; 342} 343 344static void si_shader_ls(struct si_shader *shader) 345{ 346 struct si_pm4_state *pm4; 347 unsigned vgpr_comp_cnt; 348 uint64_t va; 349 350 pm4 = si_get_shader_pm4_state(shader); 351 if (!pm4) 352 return; 353 354 va = shader->bo->gpu_address; 355 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 356 357 /* We need at least 2 components for LS. 358 * VGPR0-3: (VertexID, RelAutoindex, ???, InstanceID). */ 359 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 1; 360 361 si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8); 362 si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS, va >> 40); 363 364 shader->config.rsrc1 = S_00B528_VGPRS((shader->config.num_vgprs - 1) / 4) | 365 S_00B528_SGPRS((shader->config.num_sgprs - 1) / 8) | 366 S_00B528_VGPR_COMP_CNT(vgpr_comp_cnt) | 367 S_00B528_DX10_CLAMP(1) | 368 S_00B528_FLOAT_MODE(shader->config.float_mode); 369 shader->config.rsrc2 = S_00B52C_USER_SGPR(SI_LS_NUM_USER_SGPR) | 370 S_00B52C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0); 371} 372 373static void si_shader_hs(struct si_shader *shader) 374{ 375 struct si_pm4_state *pm4; 376 uint64_t va; 377 378 pm4 = si_get_shader_pm4_state(shader); 379 if (!pm4) 380 return; 381 382 va = shader->bo->gpu_address; 383 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 384 385 si_pm4_set_reg(pm4, R_00B420_SPI_SHADER_PGM_LO_HS, va >> 8); 386 si_pm4_set_reg(pm4, R_00B424_SPI_SHADER_PGM_HI_HS, va >> 40); 387 si_pm4_set_reg(pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 388 S_00B428_VGPRS((shader->config.num_vgprs - 1) / 4) | 389 S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8) | 390 S_00B428_DX10_CLAMP(1) | 391 S_00B428_FLOAT_MODE(shader->config.float_mode)); 392 si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, 393 S_00B42C_USER_SGPR(SI_TCS_NUM_USER_SGPR) | 394 S_00B42C_OC_LDS_EN(1) | 395 S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); 396} 397 398static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) 399{ 400 struct si_pm4_state *pm4; 401 unsigned num_user_sgprs; 402 unsigned vgpr_comp_cnt; 403 uint64_t va; 404 unsigned oc_lds_en; 405 406 pm4 = si_get_shader_pm4_state(shader); 407 if (!pm4) 408 return; 409 410 va = shader->bo->gpu_address; 411 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 412 413 if (shader->selector->type == PIPE_SHADER_VERTEX) { 414 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : 0; 415 num_user_sgprs = SI_ES_NUM_USER_SGPR; 416 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) { 417 vgpr_comp_cnt = 3; /* all components are needed for TES */ 418 num_user_sgprs = SI_TES_NUM_USER_SGPR; 419 } else 420 unreachable("invalid shader selector type"); 421 422 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0; 423 424 si_pm4_set_reg(pm4, R_028AAC_VGT_ESGS_RING_ITEMSIZE, 425 shader->selector->esgs_itemsize / 4); 426 si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8); 427 si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES, va >> 40); 428 si_pm4_set_reg(pm4, R_00B328_SPI_SHADER_PGM_RSRC1_ES, 429 S_00B328_VGPRS((shader->config.num_vgprs - 1) / 4) | 430 S_00B328_SGPRS((shader->config.num_sgprs - 1) / 8) | 431 S_00B328_VGPR_COMP_CNT(vgpr_comp_cnt) | 432 S_00B328_DX10_CLAMP(1) | 433 S_00B328_FLOAT_MODE(shader->config.float_mode)); 434 si_pm4_set_reg(pm4, R_00B32C_SPI_SHADER_PGM_RSRC2_ES, 435 S_00B32C_USER_SGPR(num_user_sgprs) | 436 S_00B32C_OC_LDS_EN(oc_lds_en) | 437 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); 438 439 if (shader->selector->type == PIPE_SHADER_TESS_EVAL) 440 si_set_tesseval_regs(sscreen, shader, pm4); 441} 442 443/** 444 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a 445 * geometry shader. 446 */ 447static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel) 448{ 449 unsigned gs_max_vert_out = sel->gs_max_out_vertices; 450 unsigned cut_mode; 451 452 if (gs_max_vert_out <= 128) { 453 cut_mode = V_028A40_GS_CUT_128; 454 } else if (gs_max_vert_out <= 256) { 455 cut_mode = V_028A40_GS_CUT_256; 456 } else if (gs_max_vert_out <= 512) { 457 cut_mode = V_028A40_GS_CUT_512; 458 } else { 459 assert(gs_max_vert_out <= 1024); 460 cut_mode = V_028A40_GS_CUT_1024; 461 } 462 463 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) | 464 S_028A40_CUT_MODE(cut_mode)| 465 S_028A40_ES_WRITE_OPTIMIZE(1) | 466 S_028A40_GS_WRITE_OPTIMIZE(1); 467} 468 469static void si_shader_gs(struct si_shader *shader) 470{ 471 struct si_shader_selector *sel = shader->selector; 472 const ubyte *num_components = sel->info.num_stream_output_components; 473 unsigned gs_num_invocations = sel->gs_num_invocations; 474 struct si_pm4_state *pm4; 475 uint64_t va; 476 unsigned max_stream = sel->max_gs_stream; 477 unsigned offset; 478 479 pm4 = si_get_shader_pm4_state(shader); 480 if (!pm4) 481 return; 482 483 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(shader->selector)); 484 485 offset = num_components[0] * sel->gs_max_out_vertices; 486 si_pm4_set_reg(pm4, R_028A60_VGT_GSVS_RING_OFFSET_1, offset); 487 if (max_stream >= 1) 488 offset += num_components[1] * sel->gs_max_out_vertices; 489 si_pm4_set_reg(pm4, R_028A64_VGT_GSVS_RING_OFFSET_2, offset); 490 if (max_stream >= 2) 491 offset += num_components[2] * sel->gs_max_out_vertices; 492 si_pm4_set_reg(pm4, R_028A68_VGT_GSVS_RING_OFFSET_3, offset); 493 if (max_stream >= 3) 494 offset += num_components[3] * sel->gs_max_out_vertices; 495 si_pm4_set_reg(pm4, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset); 496 497 /* The GSVS_RING_ITEMSIZE register takes 15 bits */ 498 assert(offset < (1 << 15)); 499 500 si_pm4_set_reg(pm4, R_028B38_VGT_GS_MAX_VERT_OUT, shader->selector->gs_max_out_vertices); 501 502 si_pm4_set_reg(pm4, R_028B5C_VGT_GS_VERT_ITEMSIZE, num_components[0]); 503 si_pm4_set_reg(pm4, R_028B60_VGT_GS_VERT_ITEMSIZE_1, (max_stream >= 1) ? num_components[1] : 0); 504 si_pm4_set_reg(pm4, R_028B64_VGT_GS_VERT_ITEMSIZE_2, (max_stream >= 2) ? num_components[2] : 0); 505 si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, (max_stream >= 3) ? num_components[3] : 0); 506 507 si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT, 508 S_028B90_CNT(MIN2(gs_num_invocations, 127)) | 509 S_028B90_ENABLE(gs_num_invocations > 0)); 510 511 va = shader->bo->gpu_address; 512 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 513 si_pm4_set_reg(pm4, R_00B220_SPI_SHADER_PGM_LO_GS, va >> 8); 514 si_pm4_set_reg(pm4, R_00B224_SPI_SHADER_PGM_HI_GS, va >> 40); 515 516 si_pm4_set_reg(pm4, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 517 S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | 518 S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8) | 519 S_00B228_DX10_CLAMP(1) | 520 S_00B228_FLOAT_MODE(shader->config.float_mode)); 521 si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS, 522 S_00B22C_USER_SGPR(SI_GS_NUM_USER_SGPR) | 523 S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); 524} 525 526/** 527 * Compute the state for \p shader, which will run as a vertex shader on the 528 * hardware. 529 * 530 * If \p gs is non-NULL, it points to the geometry shader for which this shader 531 * is the copy shader. 532 */ 533static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, 534 struct si_shader_selector *gs) 535{ 536 struct si_pm4_state *pm4; 537 unsigned num_user_sgprs; 538 unsigned nparams, vgpr_comp_cnt; 539 uint64_t va; 540 unsigned oc_lds_en; 541 unsigned window_space = 542 shader->selector->info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; 543 bool enable_prim_id = si_vs_exports_prim_id(shader); 544 545 pm4 = si_get_shader_pm4_state(shader); 546 if (!pm4) 547 return; 548 549 /* We always write VGT_GS_MODE in the VS state, because every switch 550 * between different shader pipelines involving a different GS or no 551 * GS at all involves a switch of the VS (different GS use different 552 * copy shaders). On the other hand, when the API switches from a GS to 553 * no GS and then back to the same GS used originally, the GS state is 554 * not sent again. 555 */ 556 if (!gs) { 557 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, 558 S_028A40_MODE(enable_prim_id ? V_028A40_GS_SCENARIO_A : 0)); 559 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, enable_prim_id); 560 } else { 561 si_pm4_set_reg(pm4, R_028A40_VGT_GS_MODE, si_vgt_gs_mode(gs)); 562 si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0); 563 } 564 565 va = shader->bo->gpu_address; 566 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 567 568 if (gs) { 569 vgpr_comp_cnt = 0; /* only VertexID is needed for GS-COPY. */ 570 num_user_sgprs = SI_GSCOPY_NUM_USER_SGPR; 571 } else if (shader->selector->type == PIPE_SHADER_VERTEX) { 572 vgpr_comp_cnt = shader->info.uses_instanceid ? 3 : (enable_prim_id ? 2 : 0); 573 num_user_sgprs = SI_VS_NUM_USER_SGPR; 574 } else if (shader->selector->type == PIPE_SHADER_TESS_EVAL) { 575 vgpr_comp_cnt = 3; /* all components are needed for TES */ 576 num_user_sgprs = SI_TES_NUM_USER_SGPR; 577 } else 578 unreachable("invalid shader selector type"); 579 580 /* VS is required to export at least one param. */ 581 nparams = MAX2(shader->info.nr_param_exports, 1); 582 si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG, 583 S_0286C4_VS_EXPORT_COUNT(nparams - 1)); 584 585 si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT, 586 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | 587 S_02870C_POS1_EXPORT_FORMAT(shader->info.nr_pos_exports > 1 ? 588 V_02870C_SPI_SHADER_4COMP : 589 V_02870C_SPI_SHADER_NONE) | 590 S_02870C_POS2_EXPORT_FORMAT(shader->info.nr_pos_exports > 2 ? 591 V_02870C_SPI_SHADER_4COMP : 592 V_02870C_SPI_SHADER_NONE) | 593 S_02870C_POS3_EXPORT_FORMAT(shader->info.nr_pos_exports > 3 ? 594 V_02870C_SPI_SHADER_4COMP : 595 V_02870C_SPI_SHADER_NONE)); 596 597 oc_lds_en = shader->selector->type == PIPE_SHADER_TESS_EVAL ? 1 : 0; 598 599 si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8); 600 si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40); 601 si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS, 602 S_00B128_VGPRS((shader->config.num_vgprs - 1) / 4) | 603 S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8) | 604 S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt) | 605 S_00B128_DX10_CLAMP(1) | 606 S_00B128_FLOAT_MODE(shader->config.float_mode)); 607 si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS, 608 S_00B12C_USER_SGPR(num_user_sgprs) | 609 S_00B12C_OC_LDS_EN(oc_lds_en) | 610 S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) | 611 S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) | 612 S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) | 613 S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) | 614 S_00B12C_SO_EN(!!shader->selector->so.num_outputs) | 615 S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); 616 if (window_space) 617 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL, 618 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); 619 else 620 si_pm4_set_reg(pm4, R_028818_PA_CL_VTE_CNTL, 621 S_028818_VTX_W0_FMT(1) | 622 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 623 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 624 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 625 626 if (shader->selector->type == PIPE_SHADER_TESS_EVAL) 627 si_set_tesseval_regs(sscreen, shader, pm4); 628} 629 630static unsigned si_get_ps_num_interp(struct si_shader *ps) 631{ 632 struct tgsi_shader_info *info = &ps->selector->info; 633 unsigned num_colors = !!(info->colors_read & 0x0f) + 634 !!(info->colors_read & 0xf0); 635 unsigned num_interp = ps->selector->info.num_inputs + 636 (ps->key.part.ps.prolog.color_two_side ? num_colors : 0); 637 638 assert(num_interp <= 32); 639 return MIN2(num_interp, 32); 640} 641 642static unsigned si_get_spi_shader_col_format(struct si_shader *shader) 643{ 644 unsigned value = shader->key.part.ps.epilog.spi_shader_col_format; 645 unsigned i, num_targets = (util_last_bit(value) + 3) / 4; 646 647 /* If the i-th target format is set, all previous target formats must 648 * be non-zero to avoid hangs. 649 */ 650 for (i = 0; i < num_targets; i++) 651 if (!(value & (0xf << (i * 4)))) 652 value |= V_028714_SPI_SHADER_32_R << (i * 4); 653 654 return value; 655} 656 657static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format) 658{ 659 unsigned i, cb_shader_mask = 0; 660 661 for (i = 0; i < 8; i++) { 662 switch ((spi_shader_col_format >> (i * 4)) & 0xf) { 663 case V_028714_SPI_SHADER_ZERO: 664 break; 665 case V_028714_SPI_SHADER_32_R: 666 cb_shader_mask |= 0x1 << (i * 4); 667 break; 668 case V_028714_SPI_SHADER_32_GR: 669 cb_shader_mask |= 0x3 << (i * 4); 670 break; 671 case V_028714_SPI_SHADER_32_AR: 672 cb_shader_mask |= 0x9 << (i * 4); 673 break; 674 case V_028714_SPI_SHADER_FP16_ABGR: 675 case V_028714_SPI_SHADER_UNORM16_ABGR: 676 case V_028714_SPI_SHADER_SNORM16_ABGR: 677 case V_028714_SPI_SHADER_UINT16_ABGR: 678 case V_028714_SPI_SHADER_SINT16_ABGR: 679 case V_028714_SPI_SHADER_32_ABGR: 680 cb_shader_mask |= 0xf << (i * 4); 681 break; 682 default: 683 assert(0); 684 } 685 } 686 return cb_shader_mask; 687} 688 689static void si_shader_ps(struct si_shader *shader) 690{ 691 struct tgsi_shader_info *info = &shader->selector->info; 692 struct si_pm4_state *pm4; 693 unsigned spi_ps_in_control, spi_shader_col_format, cb_shader_mask; 694 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); 695 uint64_t va; 696 unsigned input_ena = shader->config.spi_ps_input_ena; 697 698 /* we need to enable at least one of them, otherwise we hang the GPU */ 699 assert(G_0286CC_PERSP_SAMPLE_ENA(input_ena) || 700 G_0286CC_PERSP_CENTER_ENA(input_ena) || 701 G_0286CC_PERSP_CENTROID_ENA(input_ena) || 702 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena) || 703 G_0286CC_LINEAR_SAMPLE_ENA(input_ena) || 704 G_0286CC_LINEAR_CENTER_ENA(input_ena) || 705 G_0286CC_LINEAR_CENTROID_ENA(input_ena) || 706 G_0286CC_LINE_STIPPLE_TEX_ENA(input_ena)); 707 /* POS_W_FLOAT_ENA requires one of the perspective weights. */ 708 assert(!G_0286CC_POS_W_FLOAT_ENA(input_ena) || 709 G_0286CC_PERSP_SAMPLE_ENA(input_ena) || 710 G_0286CC_PERSP_CENTER_ENA(input_ena) || 711 G_0286CC_PERSP_CENTROID_ENA(input_ena) || 712 G_0286CC_PERSP_PULL_MODEL_ENA(input_ena)); 713 714 /* Validate interpolation optimization flags (read as implications). */ 715 assert(!shader->key.part.ps.prolog.bc_optimize_for_persp || 716 (G_0286CC_PERSP_CENTER_ENA(input_ena) && 717 G_0286CC_PERSP_CENTROID_ENA(input_ena))); 718 assert(!shader->key.part.ps.prolog.bc_optimize_for_linear || 719 (G_0286CC_LINEAR_CENTER_ENA(input_ena) && 720 G_0286CC_LINEAR_CENTROID_ENA(input_ena))); 721 assert(!shader->key.part.ps.prolog.force_persp_center_interp || 722 (!G_0286CC_PERSP_SAMPLE_ENA(input_ena) && 723 !G_0286CC_PERSP_CENTROID_ENA(input_ena))); 724 assert(!shader->key.part.ps.prolog.force_linear_center_interp || 725 (!G_0286CC_LINEAR_SAMPLE_ENA(input_ena) && 726 !G_0286CC_LINEAR_CENTROID_ENA(input_ena))); 727 assert(!shader->key.part.ps.prolog.force_persp_sample_interp || 728 (!G_0286CC_PERSP_CENTER_ENA(input_ena) && 729 !G_0286CC_PERSP_CENTROID_ENA(input_ena))); 730 assert(!shader->key.part.ps.prolog.force_linear_sample_interp || 731 (!G_0286CC_LINEAR_CENTER_ENA(input_ena) && 732 !G_0286CC_LINEAR_CENTROID_ENA(input_ena))); 733 734 /* Validate cases when the optimizations are off (read as implications). */ 735 assert(shader->key.part.ps.prolog.bc_optimize_for_persp || 736 !G_0286CC_PERSP_CENTER_ENA(input_ena) || 737 !G_0286CC_PERSP_CENTROID_ENA(input_ena)); 738 assert(shader->key.part.ps.prolog.bc_optimize_for_linear || 739 !G_0286CC_LINEAR_CENTER_ENA(input_ena) || 740 !G_0286CC_LINEAR_CENTROID_ENA(input_ena)); 741 742 pm4 = si_get_shader_pm4_state(shader); 743 if (!pm4) 744 return; 745 746 /* SPI_BARYC_CNTL.POS_FLOAT_LOCATION 747 * Possible vaules: 748 * 0 -> Position = pixel center 749 * 1 -> Position = pixel centroid 750 * 2 -> Position = at sample position 751 * 752 * From GLSL 4.5 specification, section 7.1: 753 * "The variable gl_FragCoord is available as an input variable from 754 * within fragment shaders and it holds the window relative coordinates 755 * (x, y, z, 1/w) values for the fragment. If multi-sampling, this 756 * value can be for any location within the pixel, or one of the 757 * fragment samples. The use of centroid does not further restrict 758 * this value to be inside the current primitive." 759 * 760 * Meaning that centroid has no effect and we can return anything within 761 * the pixel. Thus, return the value at sample position, because that's 762 * the most accurate one shaders can get. 763 */ 764 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2); 765 766 if (info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] == 767 TGSI_FS_COORD_PIXEL_CENTER_INTEGER) 768 spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1); 769 770 spi_shader_col_format = si_get_spi_shader_col_format(shader); 771 cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format); 772 773 /* Ensure that some export memory is always allocated, for two reasons: 774 * 775 * 1) Correctness: The hardware ignores the EXEC mask if no export 776 * memory is allocated, so KILL and alpha test do not work correctly 777 * without this. 778 * 2) Performance: Every shader needs at least a NULL export, even when 779 * it writes no color/depth output. The NULL export instruction 780 * stalls without this setting. 781 * 782 * Don't add this to CB_SHADER_MASK. 783 */ 784 if (!spi_shader_col_format && 785 !info->writes_z && !info->writes_stencil && !info->writes_samplemask) 786 spi_shader_col_format = V_028714_SPI_SHADER_32_R; 787 788 si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, input_ena); 789 si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, 790 shader->config.spi_ps_input_addr); 791 792 /* Set interpolation controls. */ 793 spi_ps_in_control = S_0286D8_NUM_INTERP(si_get_ps_num_interp(shader)); 794 795 /* Set registers. */ 796 si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 797 si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control); 798 799 si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, 800 si_get_spi_shader_z_format(info->writes_z, 801 info->writes_stencil, 802 info->writes_samplemask)); 803 804 si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT, spi_shader_col_format); 805 si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, cb_shader_mask); 806 807 va = shader->bo->gpu_address; 808 si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); 809 si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8); 810 si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40); 811 812 si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS, 813 S_00B028_VGPRS((shader->config.num_vgprs - 1) / 4) | 814 S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8) | 815 S_00B028_DX10_CLAMP(1) | 816 S_00B028_FLOAT_MODE(shader->config.float_mode)); 817 si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 818 S_00B02C_EXTRA_LDS_SIZE(shader->config.lds_size) | 819 S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) | 820 S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0)); 821} 822 823static void si_shader_init_pm4_state(struct si_screen *sscreen, 824 struct si_shader *shader) 825{ 826 switch (shader->selector->type) { 827 case PIPE_SHADER_VERTEX: 828 if (shader->key.as_ls) 829 si_shader_ls(shader); 830 else if (shader->key.as_es) 831 si_shader_es(sscreen, shader); 832 else 833 si_shader_vs(sscreen, shader, NULL); 834 break; 835 case PIPE_SHADER_TESS_CTRL: 836 si_shader_hs(shader); 837 break; 838 case PIPE_SHADER_TESS_EVAL: 839 if (shader->key.as_es) 840 si_shader_es(sscreen, shader); 841 else 842 si_shader_vs(sscreen, shader, NULL); 843 break; 844 case PIPE_SHADER_GEOMETRY: 845 si_shader_gs(shader); 846 break; 847 case PIPE_SHADER_FRAGMENT: 848 si_shader_ps(shader); 849 break; 850 default: 851 assert(0); 852 } 853} 854 855static unsigned si_get_alpha_test_func(struct si_context *sctx) 856{ 857 /* Alpha-test should be disabled if colorbuffer 0 is integer. */ 858 if (sctx->queued.named.dsa) 859 return sctx->queued.named.dsa->alpha_func; 860 861 return PIPE_FUNC_ALWAYS; 862} 863 864static void si_shader_selector_key_hw_vs(struct si_context *sctx, 865 struct si_shader_selector *vs, 866 struct si_shader_key *key) 867{ 868 struct si_shader_selector *ps = sctx->ps_shader.cso; 869 870 key->opt.hw_vs.clip_disable = 871 sctx->queued.named.rasterizer->clip_plane_enable == 0 && 872 (vs->info.clipdist_writemask || 873 vs->info.writes_clipvertex) && 874 !vs->info.culldist_writemask; 875 876 /* Find out if PS is disabled. */ 877 bool ps_disabled = true; 878 if (ps) { 879 bool ps_modifies_zs = ps->info.uses_kill || 880 ps->info.writes_z || 881 ps->info.writes_stencil || 882 ps->info.writes_samplemask || 883 si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS; 884 885 unsigned ps_colormask = sctx->framebuffer.colorbuf_enabled_4bit & 886 sctx->queued.named.blend->cb_target_mask; 887 if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS]) 888 ps_colormask &= ps->colors_written_4bit; 889 890 ps_disabled = sctx->queued.named.rasterizer->rasterizer_discard || 891 (!ps_colormask && 892 !ps_modifies_zs && 893 !ps->info.writes_memory); 894 } 895 896 /* Find out which VS outputs aren't used by the PS. */ 897 uint64_t outputs_written = vs->outputs_written; 898 uint32_t outputs_written2 = vs->outputs_written2; 899 uint64_t inputs_read = 0; 900 uint32_t inputs_read2 = 0; 901 902 outputs_written &= ~0x3; /* ignore POSITION, PSIZE */ 903 904 if (!ps_disabled) { 905 inputs_read = ps->inputs_read; 906 inputs_read2 = ps->inputs_read2; 907 } 908 909 uint64_t linked = outputs_written & inputs_read; 910 uint32_t linked2 = outputs_written2 & inputs_read2; 911 912 key->opt.hw_vs.kill_outputs = ~linked & outputs_written; 913 key->opt.hw_vs.kill_outputs2 = ~linked2 & outputs_written2; 914} 915 916/* Compute the key for the hw shader variant */ 917static inline void si_shader_selector_key(struct pipe_context *ctx, 918 struct si_shader_selector *sel, 919 struct si_shader_key *key) 920{ 921 struct si_context *sctx = (struct si_context *)ctx; 922 unsigned i; 923 924 memset(key, 0, sizeof(*key)); 925 926 switch (sel->type) { 927 case PIPE_SHADER_VERTEX: 928 if (sctx->vertex_elements) { 929 unsigned count = MIN2(sel->info.num_inputs, 930 sctx->vertex_elements->count); 931 for (i = 0; i < count; ++i) 932 key->part.vs.prolog.instance_divisors[i] = 933 sctx->vertex_elements->elements[i].instance_divisor; 934 935 key->mono.vs.fix_fetch = 936 sctx->vertex_elements->fix_fetch & 937 u_bit_consecutive64(0, 4 * count); 938 } 939 if (sctx->tes_shader.cso) 940 key->as_ls = 1; 941 else if (sctx->gs_shader.cso) 942 key->as_es = 1; 943 else { 944 si_shader_selector_key_hw_vs(sctx, sel, key); 945 946 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid) 947 key->part.vs.epilog.export_prim_id = 1; 948 } 949 break; 950 case PIPE_SHADER_TESS_CTRL: 951 key->part.tcs.epilog.prim_mode = 952 sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]; 953 954 if (sel == sctx->fixed_func_tcs_shader.cso) 955 key->mono.tcs.inputs_to_copy = sctx->vs_shader.cso->outputs_written; 956 break; 957 case PIPE_SHADER_TESS_EVAL: 958 if (sctx->gs_shader.cso) 959 key->as_es = 1; 960 else { 961 si_shader_selector_key_hw_vs(sctx, sel, key); 962 963 if (sctx->ps_shader.cso && sctx->ps_shader.cso->info.uses_primid) 964 key->part.tes.epilog.export_prim_id = 1; 965 } 966 break; 967 case PIPE_SHADER_GEOMETRY: 968 key->part.gs.prolog.tri_strip_adj_fix = sctx->gs_tri_strip_adj_fix; 969 break; 970 case PIPE_SHADER_FRAGMENT: { 971 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; 972 struct si_state_blend *blend = sctx->queued.named.blend; 973 974 if (sel->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] && 975 sel->info.colors_written == 0x1) 976 key->part.ps.epilog.last_cbuf = MAX2(sctx->framebuffer.state.nr_cbufs, 1) - 1; 977 978 if (blend) { 979 /* Select the shader color format based on whether 980 * blending or alpha are needed. 981 */ 982 key->part.ps.epilog.spi_shader_col_format = 983 (blend->blend_enable_4bit & blend->need_src_alpha_4bit & 984 sctx->framebuffer.spi_shader_col_format_blend_alpha) | 985 (blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & 986 sctx->framebuffer.spi_shader_col_format_blend) | 987 (~blend->blend_enable_4bit & blend->need_src_alpha_4bit & 988 sctx->framebuffer.spi_shader_col_format_alpha) | 989 (~blend->blend_enable_4bit & ~blend->need_src_alpha_4bit & 990 sctx->framebuffer.spi_shader_col_format); 991 992 /* The output for dual source blending should have 993 * the same format as the first output. 994 */ 995 if (blend->dual_src_blend) 996 key->part.ps.epilog.spi_shader_col_format |= 997 (key->part.ps.epilog.spi_shader_col_format & 0xf) << 4; 998 } else 999 key->part.ps.epilog.spi_shader_col_format = sctx->framebuffer.spi_shader_col_format; 1000 1001 /* If alpha-to-coverage is enabled, we have to export alpha 1002 * even if there is no color buffer. 1003 */ 1004 if (!(key->part.ps.epilog.spi_shader_col_format & 0xf) && 1005 blend && blend->alpha_to_coverage) 1006 key->part.ps.epilog.spi_shader_col_format |= V_028710_SPI_SHADER_32_AR; 1007 1008 /* On SI and CIK except Hawaii, the CB doesn't clamp outputs 1009 * to the range supported by the type if a channel has less 1010 * than 16 bits and the export format is 16_ABGR. 1011 */ 1012 if (sctx->b.chip_class <= CIK && sctx->b.family != CHIP_HAWAII) { 1013 key->part.ps.epilog.color_is_int8 = sctx->framebuffer.color_is_int8; 1014 key->part.ps.epilog.color_is_int10 = sctx->framebuffer.color_is_int10; 1015 } 1016 1017 /* Disable unwritten outputs (if WRITE_ALL_CBUFS isn't enabled). */ 1018 if (!key->part.ps.epilog.last_cbuf) { 1019 key->part.ps.epilog.spi_shader_col_format &= sel->colors_written_4bit; 1020 key->part.ps.epilog.color_is_int8 &= sel->info.colors_written; 1021 key->part.ps.epilog.color_is_int10 &= sel->info.colors_written; 1022 } 1023 1024 if (rs) { 1025 bool is_poly = (sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES && 1026 sctx->current_rast_prim <= PIPE_PRIM_POLYGON) || 1027 sctx->current_rast_prim >= PIPE_PRIM_TRIANGLES_ADJACENCY; 1028 bool is_line = !is_poly && sctx->current_rast_prim != PIPE_PRIM_POINTS; 1029 1030 key->part.ps.prolog.color_two_side = rs->two_side && sel->info.colors_read; 1031 key->part.ps.prolog.flatshade_colors = rs->flatshade && sel->info.colors_read; 1032 1033 if (sctx->queued.named.blend) { 1034 key->part.ps.epilog.alpha_to_one = sctx->queued.named.blend->alpha_to_one && 1035 rs->multisample_enable; 1036 } 1037 1038 key->part.ps.prolog.poly_stipple = rs->poly_stipple_enable && is_poly; 1039 key->part.ps.epilog.poly_line_smoothing = ((is_poly && rs->poly_smooth) || 1040 (is_line && rs->line_smooth)) && 1041 sctx->framebuffer.nr_samples <= 1; 1042 key->part.ps.epilog.clamp_color = rs->clamp_fragment_color; 1043 1044 if (rs->force_persample_interp && 1045 rs->multisample_enable && 1046 sctx->framebuffer.nr_samples > 1 && 1047 sctx->ps_iter_samples > 1) { 1048 key->part.ps.prolog.force_persp_sample_interp = 1049 sel->info.uses_persp_center || 1050 sel->info.uses_persp_centroid; 1051 1052 key->part.ps.prolog.force_linear_sample_interp = 1053 sel->info.uses_linear_center || 1054 sel->info.uses_linear_centroid; 1055 } else if (rs->multisample_enable && 1056 sctx->framebuffer.nr_samples > 1) { 1057 key->part.ps.prolog.bc_optimize_for_persp = 1058 sel->info.uses_persp_center && 1059 sel->info.uses_persp_centroid; 1060 key->part.ps.prolog.bc_optimize_for_linear = 1061 sel->info.uses_linear_center && 1062 sel->info.uses_linear_centroid; 1063 } else { 1064 /* Make sure SPI doesn't compute more than 1 pair 1065 * of (i,j), which is the optimization here. */ 1066 key->part.ps.prolog.force_persp_center_interp = 1067 sel->info.uses_persp_center + 1068 sel->info.uses_persp_centroid + 1069 sel->info.uses_persp_sample > 1; 1070 1071 key->part.ps.prolog.force_linear_center_interp = 1072 sel->info.uses_linear_center + 1073 sel->info.uses_linear_centroid + 1074 sel->info.uses_linear_sample > 1; 1075 } 1076 } 1077 1078 key->part.ps.epilog.alpha_func = si_get_alpha_test_func(sctx); 1079 break; 1080 } 1081 default: 1082 assert(0); 1083 } 1084} 1085 1086static void si_build_shader_variant(void *job, int thread_index) 1087{ 1088 struct si_shader *shader = (struct si_shader *)job; 1089 struct si_shader_selector *sel = shader->selector; 1090 struct si_screen *sscreen = sel->screen; 1091 LLVMTargetMachineRef tm; 1092 struct pipe_debug_callback *debug = &shader->compiler_ctx_state.debug; 1093 int r; 1094 1095 if (thread_index >= 0) { 1096 assert(thread_index < ARRAY_SIZE(sscreen->tm)); 1097 tm = sscreen->tm[thread_index]; 1098 if (!debug->async) 1099 debug = NULL; 1100 } else { 1101 tm = shader->compiler_ctx_state.tm; 1102 } 1103 1104 r = si_shader_create(sscreen, tm, shader, debug); 1105 if (unlikely(r)) { 1106 R600_ERR("Failed to build shader variant (type=%u) %d\n", 1107 sel->type, r); 1108 shader->compilation_failed = true; 1109 return; 1110 } 1111 1112 if (shader->compiler_ctx_state.is_debug_context) { 1113 FILE *f = open_memstream(&shader->shader_log, 1114 &shader->shader_log_size); 1115 if (f) { 1116 si_shader_dump(sscreen, shader, NULL, sel->type, f, false); 1117 fclose(f); 1118 } 1119 } 1120 1121 si_shader_init_pm4_state(sscreen, shader); 1122} 1123 1124/* Select the hw shader variant depending on the current state. */ 1125static int si_shader_select_with_key(struct si_screen *sscreen, 1126 struct si_shader_ctx_state *state, 1127 struct si_compiler_ctx_state *compiler_state, 1128 struct si_shader_key *key, 1129 int thread_index) 1130{ 1131 static const struct si_shader_key zeroed; 1132 struct si_shader_selector *sel = state->cso; 1133 struct si_shader *current = state->current; 1134 struct si_shader *iter, *shader = NULL; 1135 1136 if (unlikely(sscreen->b.debug_flags & DBG_NO_OPT_VARIANT)) { 1137 memset(&key->opt, 0, sizeof(key->opt)); 1138 } 1139 1140again: 1141 /* Check if we don't need to change anything. 1142 * This path is also used for most shaders that don't need multiple 1143 * variants, it will cost just a computation of the key and this 1144 * test. */ 1145 if (likely(current && 1146 memcmp(¤t->key, key, sizeof(*key)) == 0 && 1147 (!current->is_optimized || 1148 util_queue_fence_is_signalled(¤t->optimized_ready)))) 1149 return current->compilation_failed ? -1 : 0; 1150 1151 /* This must be done before the mutex is locked, because async GS 1152 * compilation calls this function too, and therefore must enter 1153 * the mutex first. 1154 * 1155 * Only wait if we are in a draw call. Don't wait if we are 1156 * in a compiler thread. 1157 */ 1158 if (thread_index < 0) 1159 util_queue_job_wait(&sel->ready); 1160 1161 pipe_mutex_lock(sel->mutex); 1162 1163 /* Find the shader variant. */ 1164 for (iter = sel->first_variant; iter; iter = iter->next_variant) { 1165 /* Don't check the "current" shader. We checked it above. */ 1166 if (current != iter && 1167 memcmp(&iter->key, key, sizeof(*key)) == 0) { 1168 /* If it's an optimized shader and its compilation has 1169 * been started but isn't done, use the unoptimized 1170 * shader so as not to cause a stall due to compilation. 1171 */ 1172 if (iter->is_optimized && 1173 !util_queue_fence_is_signalled(&iter->optimized_ready)) { 1174 memset(&key->opt, 0, sizeof(key->opt)); 1175 pipe_mutex_unlock(sel->mutex); 1176 goto again; 1177 } 1178 1179 if (iter->compilation_failed) { 1180 pipe_mutex_unlock(sel->mutex); 1181 return -1; /* skip the draw call */ 1182 } 1183 1184 state->current = iter; 1185 pipe_mutex_unlock(sel->mutex); 1186 return 0; 1187 } 1188 } 1189 1190 /* Build a new shader. */ 1191 shader = CALLOC_STRUCT(si_shader); 1192 if (!shader) { 1193 pipe_mutex_unlock(sel->mutex); 1194 return -ENOMEM; 1195 } 1196 shader->selector = sel; 1197 shader->key = *key; 1198 shader->compiler_ctx_state = *compiler_state; 1199 1200 /* Monolithic-only shaders don't make a distinction between optimized 1201 * and unoptimized. */ 1202 shader->is_monolithic = 1203 !sel->main_shader_part || 1204 sel->main_shader_part->key.as_ls != key->as_ls || 1205 sel->main_shader_part->key.as_es != key->as_es || 1206 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0 || 1207 memcmp(&key->mono, &zeroed.mono, sizeof(key->mono)) != 0; 1208 1209 shader->is_optimized = 1210 !sscreen->use_monolithic_shaders && 1211 memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0; 1212 if (shader->is_optimized) 1213 util_queue_fence_init(&shader->optimized_ready); 1214 1215 if (!sel->last_variant) { 1216 sel->first_variant = shader; 1217 sel->last_variant = shader; 1218 } else { 1219 sel->last_variant->next_variant = shader; 1220 sel->last_variant = shader; 1221 } 1222 1223 /* If it's an optimized shader, compile it asynchronously. */ 1224 if (shader->is_optimized && 1225 thread_index < 0) { 1226 /* Compile it asynchronously. */ 1227 util_queue_add_job(&sscreen->shader_compiler_queue, 1228 shader, &shader->optimized_ready, 1229 si_build_shader_variant, NULL); 1230 1231 /* Use the default (unoptimized) shader for now. */ 1232 memset(&key->opt, 0, sizeof(key->opt)); 1233 pipe_mutex_unlock(sel->mutex); 1234 goto again; 1235 } 1236 1237 assert(!shader->is_optimized); 1238 si_build_shader_variant(shader, thread_index); 1239 1240 if (!shader->compilation_failed) 1241 state->current = shader; 1242 1243 pipe_mutex_unlock(sel->mutex); 1244 return shader->compilation_failed ? -1 : 0; 1245} 1246 1247static int si_shader_select(struct pipe_context *ctx, 1248 struct si_shader_ctx_state *state, 1249 struct si_compiler_ctx_state *compiler_state) 1250{ 1251 struct si_context *sctx = (struct si_context *)ctx; 1252 struct si_shader_key key; 1253 1254 si_shader_selector_key(ctx, state->cso, &key); 1255 return si_shader_select_with_key(sctx->screen, state, compiler_state, 1256 &key, -1); 1257} 1258 1259static void si_parse_next_shader_property(const struct tgsi_shader_info *info, 1260 struct si_shader_key *key) 1261{ 1262 unsigned next_shader = info->properties[TGSI_PROPERTY_NEXT_SHADER]; 1263 1264 switch (info->processor) { 1265 case PIPE_SHADER_VERTEX: 1266 switch (next_shader) { 1267 case PIPE_SHADER_GEOMETRY: 1268 key->as_es = 1; 1269 break; 1270 case PIPE_SHADER_TESS_CTRL: 1271 case PIPE_SHADER_TESS_EVAL: 1272 key->as_ls = 1; 1273 break; 1274 default: 1275 /* If POSITION isn't written, it can't be a HW VS. 1276 * Assume that it's a HW LS. (the next shader is TCS) 1277 * This heuristic is needed for separate shader objects. 1278 */ 1279 if (!info->writes_position) 1280 key->as_ls = 1; 1281 } 1282 break; 1283 1284 case PIPE_SHADER_TESS_EVAL: 1285 if (next_shader == PIPE_SHADER_GEOMETRY || 1286 !info->writes_position) 1287 key->as_es = 1; 1288 break; 1289 } 1290} 1291 1292/** 1293 * Compile the main shader part or the monolithic shader as part of 1294 * si_shader_selector initialization. Since it can be done asynchronously, 1295 * there is no way to report compile failures to applications. 1296 */ 1297void si_init_shader_selector_async(void *job, int thread_index) 1298{ 1299 struct si_shader_selector *sel = (struct si_shader_selector *)job; 1300 struct si_screen *sscreen = sel->screen; 1301 LLVMTargetMachineRef tm; 1302 struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug; 1303 unsigned i; 1304 1305 if (thread_index >= 0) { 1306 assert(thread_index < ARRAY_SIZE(sscreen->tm)); 1307 tm = sscreen->tm[thread_index]; 1308 if (!debug->async) 1309 debug = NULL; 1310 } else { 1311 tm = sel->compiler_ctx_state.tm; 1312 } 1313 1314 /* Compile the main shader part for use with a prolog and/or epilog. 1315 * If this fails, the driver will try to compile a monolithic shader 1316 * on demand. 1317 */ 1318 if (!sscreen->use_monolithic_shaders) { 1319 struct si_shader *shader = CALLOC_STRUCT(si_shader); 1320 void *tgsi_binary; 1321 1322 if (!shader) { 1323 fprintf(stderr, "radeonsi: can't allocate a main shader part\n"); 1324 return; 1325 } 1326 1327 shader->selector = sel; 1328 si_parse_next_shader_property(&sel->info, &shader->key); 1329 1330 tgsi_binary = si_get_tgsi_binary(sel); 1331 1332 /* Try to load the shader from the shader cache. */ 1333 pipe_mutex_lock(sscreen->shader_cache_mutex); 1334 1335 if (tgsi_binary && 1336 si_shader_cache_load_shader(sscreen, tgsi_binary, shader)) { 1337 FREE(tgsi_binary); 1338 pipe_mutex_unlock(sscreen->shader_cache_mutex); 1339 } else { 1340 pipe_mutex_unlock(sscreen->shader_cache_mutex); 1341 1342 /* Compile the shader if it hasn't been loaded from the cache. */ 1343 if (si_compile_tgsi_shader(sscreen, tm, shader, false, 1344 debug) != 0) { 1345 FREE(shader); 1346 FREE(tgsi_binary); 1347 fprintf(stderr, "radeonsi: can't compile a main shader part\n"); 1348 return; 1349 } 1350 1351 if (tgsi_binary) { 1352 pipe_mutex_lock(sscreen->shader_cache_mutex); 1353 if (!si_shader_cache_insert_shader(sscreen, tgsi_binary, shader)) 1354 FREE(tgsi_binary); 1355 pipe_mutex_unlock(sscreen->shader_cache_mutex); 1356 } 1357 } 1358 1359 sel->main_shader_part = shader; 1360 1361 /* Unset "outputs_written" flags for outputs converted to 1362 * DEFAULT_VAL, so that later inter-shader optimizations don't 1363 * try to eliminate outputs that don't exist in the final 1364 * shader. 1365 * 1366 * This is only done if non-monolithic shaders are enabled. 1367 */ 1368 if ((sel->type == PIPE_SHADER_VERTEX || 1369 sel->type == PIPE_SHADER_TESS_EVAL) && 1370 !shader->key.as_ls && 1371 !shader->key.as_es) { 1372 unsigned i; 1373 1374 for (i = 0; i < sel->info.num_outputs; i++) { 1375 unsigned offset = shader->info.vs_output_param_offset[i]; 1376 1377 if (offset <= EXP_PARAM_OFFSET_31) 1378 continue; 1379 1380 unsigned name = sel->info.output_semantic_name[i]; 1381 unsigned index = sel->info.output_semantic_index[i]; 1382 unsigned id; 1383 1384 switch (name) { 1385 case TGSI_SEMANTIC_GENERIC: 1386 /* don't process indices the function can't handle */ 1387 if (index >= 60) 1388 break; 1389 /* fall through */ 1390 case TGSI_SEMANTIC_CLIPDIST: 1391 id = si_shader_io_get_unique_index(name, index); 1392 sel->outputs_written &= ~(1ull << id); 1393 break; 1394 case TGSI_SEMANTIC_POSITION: /* ignore these */ 1395 case TGSI_SEMANTIC_PSIZE: 1396 case TGSI_SEMANTIC_CLIPVERTEX: 1397 case TGSI_SEMANTIC_EDGEFLAG: 1398 break; 1399 default: 1400 id = si_shader_io_get_unique_index2(name, index); 1401 sel->outputs_written2 &= ~(1u << id); 1402 } 1403 } 1404 } 1405 } 1406 1407 /* Pre-compilation. */ 1408 if (sscreen->b.debug_flags & DBG_PRECOMPILE) { 1409 struct si_shader_ctx_state state = {sel}; 1410 struct si_shader_key key; 1411 1412 memset(&key, 0, sizeof(key)); 1413 si_parse_next_shader_property(&sel->info, &key); 1414 1415 /* Set reasonable defaults, so that the shader key doesn't 1416 * cause any code to be eliminated. 1417 */ 1418 switch (sel->type) { 1419 case PIPE_SHADER_TESS_CTRL: 1420 key.part.tcs.epilog.prim_mode = PIPE_PRIM_TRIANGLES; 1421 break; 1422 case PIPE_SHADER_FRAGMENT: 1423 key.part.ps.prolog.bc_optimize_for_persp = 1424 sel->info.uses_persp_center && 1425 sel->info.uses_persp_centroid; 1426 key.part.ps.prolog.bc_optimize_for_linear = 1427 sel->info.uses_linear_center && 1428 sel->info.uses_linear_centroid; 1429 key.part.ps.epilog.alpha_func = PIPE_FUNC_ALWAYS; 1430 for (i = 0; i < 8; i++) 1431 if (sel->info.colors_written & (1 << i)) 1432 key.part.ps.epilog.spi_shader_col_format |= 1433 V_028710_SPI_SHADER_FP16_ABGR << (i * 4); 1434 break; 1435 } 1436 1437 if (si_shader_select_with_key(sscreen, &state, 1438 &sel->compiler_ctx_state, &key, 1439 thread_index)) 1440 fprintf(stderr, "radeonsi: can't create a monolithic shader\n"); 1441 } 1442 1443 /* The GS copy shader is always pre-compiled. */ 1444 if (sel->type == PIPE_SHADER_GEOMETRY) { 1445 sel->gs_copy_shader = si_generate_gs_copy_shader(sscreen, tm, sel, debug); 1446 if (!sel->gs_copy_shader) { 1447 fprintf(stderr, "radeonsi: can't create GS copy shader\n"); 1448 return; 1449 } 1450 1451 si_shader_vs(sscreen, sel->gs_copy_shader, sel); 1452 } 1453} 1454 1455static void *si_create_shader_selector(struct pipe_context *ctx, 1456 const struct pipe_shader_state *state) 1457{ 1458 struct si_screen *sscreen = (struct si_screen *)ctx->screen; 1459 struct si_context *sctx = (struct si_context*)ctx; 1460 struct si_shader_selector *sel = CALLOC_STRUCT(si_shader_selector); 1461 int i; 1462 1463 if (!sel) 1464 return NULL; 1465 1466 sel->screen = sscreen; 1467 sel->compiler_ctx_state.tm = sctx->tm; 1468 sel->compiler_ctx_state.debug = sctx->b.debug; 1469 sel->compiler_ctx_state.is_debug_context = sctx->is_debug; 1470 sel->tokens = tgsi_dup_tokens(state->tokens); 1471 if (!sel->tokens) { 1472 FREE(sel); 1473 return NULL; 1474 } 1475 1476 sel->so = state->stream_output; 1477 tgsi_scan_shader(state->tokens, &sel->info); 1478 sel->type = sel->info.processor; 1479 p_atomic_inc(&sscreen->b.num_shaders_created); 1480 1481 /* Set which opcode uses which (i,j) pair. */ 1482 if (sel->info.uses_persp_opcode_interp_centroid) 1483 sel->info.uses_persp_centroid = true; 1484 1485 if (sel->info.uses_linear_opcode_interp_centroid) 1486 sel->info.uses_linear_centroid = true; 1487 1488 if (sel->info.uses_persp_opcode_interp_offset || 1489 sel->info.uses_persp_opcode_interp_sample) 1490 sel->info.uses_persp_center = true; 1491 1492 if (sel->info.uses_linear_opcode_interp_offset || 1493 sel->info.uses_linear_opcode_interp_sample) 1494 sel->info.uses_linear_center = true; 1495 1496 switch (sel->type) { 1497 case PIPE_SHADER_GEOMETRY: 1498 sel->gs_output_prim = 1499 sel->info.properties[TGSI_PROPERTY_GS_OUTPUT_PRIM]; 1500 sel->gs_max_out_vertices = 1501 sel->info.properties[TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES]; 1502 sel->gs_num_invocations = 1503 sel->info.properties[TGSI_PROPERTY_GS_INVOCATIONS]; 1504 sel->gsvs_vertex_size = sel->info.num_outputs * 16; 1505 sel->max_gsvs_emit_size = sel->gsvs_vertex_size * 1506 sel->gs_max_out_vertices; 1507 1508 sel->max_gs_stream = 0; 1509 for (i = 0; i < sel->so.num_outputs; i++) 1510 sel->max_gs_stream = MAX2(sel->max_gs_stream, 1511 sel->so.output[i].stream); 1512 1513 sel->gs_input_verts_per_prim = 1514 u_vertices_per_prim(sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]); 1515 break; 1516 1517 case PIPE_SHADER_TESS_CTRL: 1518 /* Always reserve space for these. */ 1519 sel->patch_outputs_written |= 1520 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSINNER, 0)) | 1521 (1llu << si_shader_io_get_unique_index(TGSI_SEMANTIC_TESSOUTER, 0)); 1522 /* fall through */ 1523 case PIPE_SHADER_VERTEX: 1524 case PIPE_SHADER_TESS_EVAL: 1525 for (i = 0; i < sel->info.num_outputs; i++) { 1526 unsigned name = sel->info.output_semantic_name[i]; 1527 unsigned index = sel->info.output_semantic_index[i]; 1528 1529 switch (name) { 1530 case TGSI_SEMANTIC_TESSINNER: 1531 case TGSI_SEMANTIC_TESSOUTER: 1532 case TGSI_SEMANTIC_PATCH: 1533 sel->patch_outputs_written |= 1534 1llu << si_shader_io_get_unique_index(name, index); 1535 break; 1536 1537 case TGSI_SEMANTIC_GENERIC: 1538 /* don't process indices the function can't handle */ 1539 if (index >= 60) 1540 break; 1541 /* fall through */ 1542 case TGSI_SEMANTIC_POSITION: 1543 case TGSI_SEMANTIC_PSIZE: 1544 case TGSI_SEMANTIC_CLIPDIST: 1545 sel->outputs_written |= 1546 1llu << si_shader_io_get_unique_index(name, index); 1547 break; 1548 case TGSI_SEMANTIC_CLIPVERTEX: /* ignore these */ 1549 case TGSI_SEMANTIC_EDGEFLAG: 1550 break; 1551 default: 1552 sel->outputs_written2 |= 1553 1u << si_shader_io_get_unique_index2(name, index); 1554 } 1555 } 1556 sel->esgs_itemsize = util_last_bit64(sel->outputs_written) * 16; 1557 break; 1558 1559 case PIPE_SHADER_FRAGMENT: 1560 for (i = 0; i < sel->info.num_inputs; i++) { 1561 unsigned name = sel->info.input_semantic_name[i]; 1562 unsigned index = sel->info.input_semantic_index[i]; 1563 1564 switch (name) { 1565 case TGSI_SEMANTIC_CLIPDIST: 1566 case TGSI_SEMANTIC_GENERIC: 1567 sel->inputs_read |= 1568 1llu << si_shader_io_get_unique_index(name, index); 1569 break; 1570 case TGSI_SEMANTIC_PCOORD: /* ignore this */ 1571 break; 1572 default: 1573 sel->inputs_read2 |= 1574 1u << si_shader_io_get_unique_index2(name, index); 1575 } 1576 } 1577 1578 for (i = 0; i < 8; i++) 1579 if (sel->info.colors_written & (1 << i)) 1580 sel->colors_written_4bit |= 0xf << (4 * i); 1581 1582 for (i = 0; i < sel->info.num_inputs; i++) { 1583 if (sel->info.input_semantic_name[i] == TGSI_SEMANTIC_COLOR) { 1584 int index = sel->info.input_semantic_index[i]; 1585 sel->color_attr_index[index] = i; 1586 } 1587 } 1588 break; 1589 } 1590 1591 /* DB_SHADER_CONTROL */ 1592 sel->db_shader_control = 1593 S_02880C_Z_EXPORT_ENABLE(sel->info.writes_z) | 1594 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(sel->info.writes_stencil) | 1595 S_02880C_MASK_EXPORT_ENABLE(sel->info.writes_samplemask) | 1596 S_02880C_KILL_ENABLE(sel->info.uses_kill); 1597 1598 switch (sel->info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]) { 1599 case TGSI_FS_DEPTH_LAYOUT_GREATER: 1600 sel->db_shader_control |= 1601 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_GREATER_THAN_Z); 1602 break; 1603 case TGSI_FS_DEPTH_LAYOUT_LESS: 1604 sel->db_shader_control |= 1605 S_02880C_CONSERVATIVE_Z_EXPORT(V_02880C_EXPORT_LESS_THAN_Z); 1606 break; 1607 } 1608 1609 /* Z_ORDER, EXEC_ON_HIER_FAIL and EXEC_ON_NOOP should be set as following: 1610 * 1611 * | early Z/S | writes_mem | allow_ReZ? | Z_ORDER | EXEC_ON_HIER_FAIL | EXEC_ON_NOOP 1612 * --|-----------|------------|------------|--------------------|-------------------|------------- 1613 * 1a| false | false | true | EarlyZ_Then_ReZ | 0 | 0 1614 * 1b| false | false | false | EarlyZ_Then_LateZ | 0 | 0 1615 * 2 | false | true | n/a | LateZ | 1 | 0 1616 * 3 | true | false | n/a | EarlyZ_Then_LateZ | 0 | 0 1617 * 4 | true | true | n/a | EarlyZ_Then_LateZ | 0 | 1 1618 * 1619 * In cases 3 and 4, HW will force Z_ORDER to EarlyZ regardless of what's set in the register. 1620 * In case 2, NOOP_CULL is a don't care field. In case 2, 3 and 4, ReZ doesn't make sense. 1621 * 1622 * Don't use ReZ without profiling !!! 1623 * 1624 * ReZ decreases performance by 15% in DiRT: Showdown on Ultra settings, which has pretty complex 1625 * shaders. 1626 */ 1627 if (sel->info.properties[TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL]) { 1628 /* Cases 3, 4. */ 1629 sel->db_shader_control |= S_02880C_DEPTH_BEFORE_SHADER(1) | 1630 S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) | 1631 S_02880C_EXEC_ON_NOOP(sel->info.writes_memory); 1632 } else if (sel->info.writes_memory) { 1633 /* Case 2. */ 1634 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z) | 1635 S_02880C_EXEC_ON_HIER_FAIL(1); 1636 } else { 1637 /* Case 1. */ 1638 sel->db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); 1639 } 1640 1641 pipe_mutex_init(sel->mutex); 1642 util_queue_fence_init(&sel->ready); 1643 1644 if ((sctx->b.debug.debug_message && !sctx->b.debug.async) || 1645 sctx->is_debug || 1646 r600_can_dump_shader(&sscreen->b, sel->info.processor) || 1647 !util_queue_is_initialized(&sscreen->shader_compiler_queue)) 1648 si_init_shader_selector_async(sel, -1); 1649 else 1650 util_queue_add_job(&sscreen->shader_compiler_queue, sel, 1651 &sel->ready, si_init_shader_selector_async, 1652 NULL); 1653 1654 return sel; 1655} 1656 1657static void si_bind_vs_shader(struct pipe_context *ctx, void *state) 1658{ 1659 struct si_context *sctx = (struct si_context *)ctx; 1660 struct si_shader_selector *sel = state; 1661 1662 if (sctx->vs_shader.cso == sel) 1663 return; 1664 1665 sctx->vs_shader.cso = sel; 1666 sctx->vs_shader.current = sel ? sel->first_variant : NULL; 1667 sctx->do_update_shaders = true; 1668 si_mark_atom_dirty(sctx, &sctx->clip_regs); 1669 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx)); 1670} 1671 1672static void si_bind_gs_shader(struct pipe_context *ctx, void *state) 1673{ 1674 struct si_context *sctx = (struct si_context *)ctx; 1675 struct si_shader_selector *sel = state; 1676 bool enable_changed = !!sctx->gs_shader.cso != !!sel; 1677 1678 if (sctx->gs_shader.cso == sel) 1679 return; 1680 1681 sctx->gs_shader.cso = sel; 1682 sctx->gs_shader.current = sel ? sel->first_variant : NULL; 1683 sctx->do_update_shaders = true; 1684 si_mark_atom_dirty(sctx, &sctx->clip_regs); 1685 sctx->last_rast_prim = -1; /* reset this so that it gets updated */ 1686 1687 if (enable_changed) 1688 si_shader_change_notify(sctx); 1689 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx)); 1690} 1691 1692static void si_bind_tcs_shader(struct pipe_context *ctx, void *state) 1693{ 1694 struct si_context *sctx = (struct si_context *)ctx; 1695 struct si_shader_selector *sel = state; 1696 bool enable_changed = !!sctx->tcs_shader.cso != !!sel; 1697 1698 if (sctx->tcs_shader.cso == sel) 1699 return; 1700 1701 sctx->tcs_shader.cso = sel; 1702 sctx->tcs_shader.current = sel ? sel->first_variant : NULL; 1703 sctx->do_update_shaders = true; 1704 1705 if (enable_changed) 1706 sctx->last_tcs = NULL; /* invalidate derived tess state */ 1707} 1708 1709static void si_bind_tes_shader(struct pipe_context *ctx, void *state) 1710{ 1711 struct si_context *sctx = (struct si_context *)ctx; 1712 struct si_shader_selector *sel = state; 1713 bool enable_changed = !!sctx->tes_shader.cso != !!sel; 1714 1715 if (sctx->tes_shader.cso == sel) 1716 return; 1717 1718 sctx->tes_shader.cso = sel; 1719 sctx->tes_shader.current = sel ? sel->first_variant : NULL; 1720 sctx->do_update_shaders = true; 1721 si_mark_atom_dirty(sctx, &sctx->clip_regs); 1722 sctx->last_rast_prim = -1; /* reset this so that it gets updated */ 1723 1724 if (enable_changed) { 1725 si_shader_change_notify(sctx); 1726 sctx->last_tes_sh_base = -1; /* invalidate derived tess state */ 1727 } 1728 r600_update_vs_writes_viewport_index(&sctx->b, si_get_vs_info(sctx)); 1729} 1730 1731static void si_bind_ps_shader(struct pipe_context *ctx, void *state) 1732{ 1733 struct si_context *sctx = (struct si_context *)ctx; 1734 struct si_shader_selector *sel = state; 1735 1736 /* skip if supplied shader is one already in use */ 1737 if (sctx->ps_shader.cso == sel) 1738 return; 1739 1740 sctx->ps_shader.cso = sel; 1741 sctx->ps_shader.current = sel ? sel->first_variant : NULL; 1742 sctx->do_update_shaders = true; 1743 si_mark_atom_dirty(sctx, &sctx->cb_render_state); 1744} 1745 1746static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) 1747{ 1748 if (shader->is_optimized) { 1749 util_queue_job_wait(&shader->optimized_ready); 1750 util_queue_fence_destroy(&shader->optimized_ready); 1751 } 1752 1753 if (shader->pm4) { 1754 switch (shader->selector->type) { 1755 case PIPE_SHADER_VERTEX: 1756 if (shader->key.as_ls) 1757 si_pm4_delete_state(sctx, ls, shader->pm4); 1758 else if (shader->key.as_es) 1759 si_pm4_delete_state(sctx, es, shader->pm4); 1760 else 1761 si_pm4_delete_state(sctx, vs, shader->pm4); 1762 break; 1763 case PIPE_SHADER_TESS_CTRL: 1764 si_pm4_delete_state(sctx, hs, shader->pm4); 1765 break; 1766 case PIPE_SHADER_TESS_EVAL: 1767 if (shader->key.as_es) 1768 si_pm4_delete_state(sctx, es, shader->pm4); 1769 else 1770 si_pm4_delete_state(sctx, vs, shader->pm4); 1771 break; 1772 case PIPE_SHADER_GEOMETRY: 1773 if (shader->is_gs_copy_shader) 1774 si_pm4_delete_state(sctx, vs, shader->pm4); 1775 else 1776 si_pm4_delete_state(sctx, gs, shader->pm4); 1777 break; 1778 case PIPE_SHADER_FRAGMENT: 1779 si_pm4_delete_state(sctx, ps, shader->pm4); 1780 break; 1781 } 1782 } 1783 1784 si_shader_destroy(shader); 1785 free(shader); 1786} 1787 1788static void si_delete_shader_selector(struct pipe_context *ctx, void *state) 1789{ 1790 struct si_context *sctx = (struct si_context *)ctx; 1791 struct si_shader_selector *sel = (struct si_shader_selector *)state; 1792 struct si_shader *p = sel->first_variant, *c; 1793 struct si_shader_ctx_state *current_shader[SI_NUM_SHADERS] = { 1794 [PIPE_SHADER_VERTEX] = &sctx->vs_shader, 1795 [PIPE_SHADER_TESS_CTRL] = &sctx->tcs_shader, 1796 [PIPE_SHADER_TESS_EVAL] = &sctx->tes_shader, 1797 [PIPE_SHADER_GEOMETRY] = &sctx->gs_shader, 1798 [PIPE_SHADER_FRAGMENT] = &sctx->ps_shader, 1799 }; 1800 1801 util_queue_job_wait(&sel->ready); 1802 1803 if (current_shader[sel->type]->cso == sel) { 1804 current_shader[sel->type]->cso = NULL; 1805 current_shader[sel->type]->current = NULL; 1806 } 1807 1808 while (p) { 1809 c = p->next_variant; 1810 si_delete_shader(sctx, p); 1811 p = c; 1812 } 1813 1814 if (sel->main_shader_part) 1815 si_delete_shader(sctx, sel->main_shader_part); 1816 if (sel->gs_copy_shader) 1817 si_delete_shader(sctx, sel->gs_copy_shader); 1818 1819 util_queue_fence_destroy(&sel->ready); 1820 pipe_mutex_destroy(sel->mutex); 1821 free(sel->tokens); 1822 free(sel); 1823} 1824 1825static unsigned si_get_ps_input_cntl(struct si_context *sctx, 1826 struct si_shader *vs, unsigned name, 1827 unsigned index, unsigned interpolate) 1828{ 1829 struct tgsi_shader_info *vsinfo = &vs->selector->info; 1830 unsigned j, offset, ps_input_cntl = 0; 1831 1832 if (interpolate == TGSI_INTERPOLATE_CONSTANT || 1833 (interpolate == TGSI_INTERPOLATE_COLOR && sctx->flatshade)) 1834 ps_input_cntl |= S_028644_FLAT_SHADE(1); 1835 1836 if (name == TGSI_SEMANTIC_PCOORD || 1837 (name == TGSI_SEMANTIC_TEXCOORD && 1838 sctx->sprite_coord_enable & (1 << index))) { 1839 ps_input_cntl |= S_028644_PT_SPRITE_TEX(1); 1840 } 1841 1842 for (j = 0; j < vsinfo->num_outputs; j++) { 1843 if (name == vsinfo->output_semantic_name[j] && 1844 index == vsinfo->output_semantic_index[j]) { 1845 offset = vs->info.vs_output_param_offset[j]; 1846 1847 if (offset <= EXP_PARAM_OFFSET_31) { 1848 /* The input is loaded from parameter memory. */ 1849 ps_input_cntl |= S_028644_OFFSET(offset); 1850 } else if (!G_028644_PT_SPRITE_TEX(ps_input_cntl)) { 1851 if (offset == EXP_PARAM_UNDEFINED) { 1852 /* This can happen with depth-only rendering. */ 1853 offset = 0; 1854 } else { 1855 /* The input is a DEFAULT_VAL constant. */ 1856 assert(offset >= EXP_PARAM_DEFAULT_VAL_0000 && 1857 offset <= EXP_PARAM_DEFAULT_VAL_1111); 1858 offset -= EXP_PARAM_DEFAULT_VAL_0000; 1859 } 1860 1861 ps_input_cntl = S_028644_OFFSET(0x20) | 1862 S_028644_DEFAULT_VAL(offset); 1863 } 1864 break; 1865 } 1866 } 1867 1868 if (name == TGSI_SEMANTIC_PRIMID) 1869 /* PrimID is written after the last output. */ 1870 ps_input_cntl |= S_028644_OFFSET(vs->info.vs_output_param_offset[vsinfo->num_outputs]); 1871 else if (j == vsinfo->num_outputs && !G_028644_PT_SPRITE_TEX(ps_input_cntl)) { 1872 /* No corresponding output found, load defaults into input. 1873 * Don't set any other bits. 1874 * (FLAT_SHADE=1 completely changes behavior) */ 1875 ps_input_cntl = S_028644_OFFSET(0x20); 1876 /* D3D 9 behaviour. GL is undefined */ 1877 if (name == TGSI_SEMANTIC_COLOR && index == 0) 1878 ps_input_cntl |= S_028644_DEFAULT_VAL(3); 1879 } 1880 return ps_input_cntl; 1881} 1882 1883static void si_emit_spi_map(struct si_context *sctx, struct r600_atom *atom) 1884{ 1885 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 1886 struct si_shader *ps = sctx->ps_shader.current; 1887 struct si_shader *vs = si_get_vs_state(sctx); 1888 struct tgsi_shader_info *psinfo = ps ? &ps->selector->info : NULL; 1889 unsigned i, num_interp, num_written = 0, bcol_interp[2]; 1890 1891 if (!ps || !ps->selector->info.num_inputs) 1892 return; 1893 1894 num_interp = si_get_ps_num_interp(ps); 1895 assert(num_interp > 0); 1896 radeon_set_context_reg_seq(cs, R_028644_SPI_PS_INPUT_CNTL_0, num_interp); 1897 1898 for (i = 0; i < psinfo->num_inputs; i++) { 1899 unsigned name = psinfo->input_semantic_name[i]; 1900 unsigned index = psinfo->input_semantic_index[i]; 1901 unsigned interpolate = psinfo->input_interpolate[i]; 1902 1903 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, name, index, 1904 interpolate)); 1905 num_written++; 1906 1907 if (name == TGSI_SEMANTIC_COLOR) { 1908 assert(index < ARRAY_SIZE(bcol_interp)); 1909 bcol_interp[index] = interpolate; 1910 } 1911 } 1912 1913 if (ps->key.part.ps.prolog.color_two_side) { 1914 unsigned bcol = TGSI_SEMANTIC_BCOLOR; 1915 1916 for (i = 0; i < 2; i++) { 1917 if (!(psinfo->colors_read & (0xf << (i * 4)))) 1918 continue; 1919 1920 radeon_emit(cs, si_get_ps_input_cntl(sctx, vs, bcol, 1921 i, bcol_interp[i])); 1922 num_written++; 1923 } 1924 } 1925 assert(num_interp == num_written); 1926} 1927 1928/** 1929 * Writing CONFIG or UCONFIG VGT registers requires VGT_FLUSH before that. 1930 */ 1931static void si_init_config_add_vgt_flush(struct si_context *sctx) 1932{ 1933 if (sctx->init_config_has_vgt_flush) 1934 return; 1935 1936 /* Done by Vulkan before VGT_FLUSH. */ 1937 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE); 1938 si_pm4_cmd_add(sctx->init_config, 1939 EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 1940 si_pm4_cmd_end(sctx->init_config, false); 1941 1942 /* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */ 1943 si_pm4_cmd_begin(sctx->init_config, PKT3_EVENT_WRITE); 1944 si_pm4_cmd_add(sctx->init_config, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0)); 1945 si_pm4_cmd_end(sctx->init_config, false); 1946 sctx->init_config_has_vgt_flush = true; 1947} 1948 1949/* Initialize state related to ESGS / GSVS ring buffers */ 1950static bool si_update_gs_ring_buffers(struct si_context *sctx) 1951{ 1952 struct si_shader_selector *es = 1953 sctx->tes_shader.cso ? sctx->tes_shader.cso : sctx->vs_shader.cso; 1954 struct si_shader_selector *gs = sctx->gs_shader.cso; 1955 struct si_pm4_state *pm4; 1956 1957 /* Chip constants. */ 1958 unsigned num_se = sctx->screen->b.info.max_se; 1959 unsigned wave_size = 64; 1960 unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */ 1961 unsigned gs_vertex_reuse = 16 * num_se; /* GS_VERTEX_REUSE register (per SE) */ 1962 unsigned alignment = 256 * num_se; 1963 /* The maximum size is 63.999 MB per SE. */ 1964 unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se; 1965 1966 /* Calculate the minimum size. */ 1967 unsigned min_esgs_ring_size = align(es->esgs_itemsize * gs_vertex_reuse * 1968 wave_size, alignment); 1969 1970 /* These are recommended sizes, not minimum sizes. */ 1971 unsigned esgs_ring_size = max_gs_waves * 2 * wave_size * 1972 es->esgs_itemsize * gs->gs_input_verts_per_prim; 1973 unsigned gsvs_ring_size = max_gs_waves * 2 * wave_size * 1974 gs->max_gsvs_emit_size; 1975 1976 min_esgs_ring_size = align(min_esgs_ring_size, alignment); 1977 esgs_ring_size = align(esgs_ring_size, alignment); 1978 gsvs_ring_size = align(gsvs_ring_size, alignment); 1979 1980 esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size); 1981 gsvs_ring_size = MIN2(gsvs_ring_size, max_size); 1982 1983 /* Some rings don't have to be allocated if shaders don't use them. 1984 * (e.g. no varyings between ES and GS or GS and VS) 1985 */ 1986 bool update_esgs = esgs_ring_size && 1987 (!sctx->esgs_ring || 1988 sctx->esgs_ring->width0 < esgs_ring_size); 1989 bool update_gsvs = gsvs_ring_size && 1990 (!sctx->gsvs_ring || 1991 sctx->gsvs_ring->width0 < gsvs_ring_size); 1992 1993 if (!update_esgs && !update_gsvs) 1994 return true; 1995 1996 if (update_esgs) { 1997 pipe_resource_reference(&sctx->esgs_ring, NULL); 1998 sctx->esgs_ring = pipe_buffer_create(sctx->b.b.screen, 0, 1999 PIPE_USAGE_DEFAULT, 2000 esgs_ring_size); 2001 if (!sctx->esgs_ring) 2002 return false; 2003 } 2004 2005 if (update_gsvs) { 2006 pipe_resource_reference(&sctx->gsvs_ring, NULL); 2007 sctx->gsvs_ring = pipe_buffer_create(sctx->b.b.screen, 0, 2008 PIPE_USAGE_DEFAULT, 2009 gsvs_ring_size); 2010 if (!sctx->gsvs_ring) 2011 return false; 2012 } 2013 2014 /* Create the "init_config_gs_rings" state. */ 2015 pm4 = CALLOC_STRUCT(si_pm4_state); 2016 if (!pm4) 2017 return false; 2018 2019 if (sctx->b.chip_class >= CIK) { 2020 if (sctx->esgs_ring) 2021 si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE, 2022 sctx->esgs_ring->width0 / 256); 2023 if (sctx->gsvs_ring) 2024 si_pm4_set_reg(pm4, R_030904_VGT_GSVS_RING_SIZE, 2025 sctx->gsvs_ring->width0 / 256); 2026 } else { 2027 if (sctx->esgs_ring) 2028 si_pm4_set_reg(pm4, R_0088C8_VGT_ESGS_RING_SIZE, 2029 sctx->esgs_ring->width0 / 256); 2030 if (sctx->gsvs_ring) 2031 si_pm4_set_reg(pm4, R_0088CC_VGT_GSVS_RING_SIZE, 2032 sctx->gsvs_ring->width0 / 256); 2033 } 2034 2035 /* Set the state. */ 2036 if (sctx->init_config_gs_rings) 2037 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0); 2038 sctx->init_config_gs_rings = pm4; 2039 2040 if (!sctx->init_config_has_vgt_flush) { 2041 si_init_config_add_vgt_flush(sctx); 2042 si_pm4_upload_indirect_buffer(sctx, sctx->init_config); 2043 } 2044 2045 /* Flush the context to re-emit both init_config states. */ 2046 sctx->b.initial_gfx_cs_size = 0; /* force flush */ 2047 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL); 2048 2049 /* Set ring bindings. */ 2050 if (sctx->esgs_ring) { 2051 si_set_ring_buffer(&sctx->b.b, SI_ES_RING_ESGS, 2052 sctx->esgs_ring, 0, sctx->esgs_ring->width0, 2053 true, true, 4, 64, 0); 2054 si_set_ring_buffer(&sctx->b.b, SI_GS_RING_ESGS, 2055 sctx->esgs_ring, 0, sctx->esgs_ring->width0, 2056 false, false, 0, 0, 0); 2057 } 2058 if (sctx->gsvs_ring) { 2059 si_set_ring_buffer(&sctx->b.b, SI_RING_GSVS, 2060 sctx->gsvs_ring, 0, sctx->gsvs_ring->width0, 2061 false, false, 0, 0, 0); 2062 } 2063 2064 return true; 2065} 2066 2067/** 2068 * @returns 1 if \p sel has been updated to use a new scratch buffer 2069 * 0 if not 2070 * < 0 if there was a failure 2071 */ 2072static int si_update_scratch_buffer(struct si_context *sctx, 2073 struct si_shader *shader) 2074{ 2075 uint64_t scratch_va = sctx->scratch_buffer->gpu_address; 2076 int r; 2077 2078 if (!shader) 2079 return 0; 2080 2081 /* This shader doesn't need a scratch buffer */ 2082 if (shader->config.scratch_bytes_per_wave == 0) 2083 return 0; 2084 2085 /* This shader is already configured to use the current 2086 * scratch buffer. */ 2087 if (shader->scratch_bo == sctx->scratch_buffer) 2088 return 0; 2089 2090 assert(sctx->scratch_buffer); 2091 2092 si_shader_apply_scratch_relocs(sctx, shader, &shader->config, scratch_va); 2093 2094 /* Replace the shader bo with a new bo that has the relocs applied. */ 2095 r = si_shader_binary_upload(sctx->screen, shader); 2096 if (r) 2097 return r; 2098 2099 /* Update the shader state to use the new shader bo. */ 2100 si_shader_init_pm4_state(sctx->screen, shader); 2101 2102 r600_resource_reference(&shader->scratch_bo, sctx->scratch_buffer); 2103 2104 return 1; 2105} 2106 2107static unsigned si_get_current_scratch_buffer_size(struct si_context *sctx) 2108{ 2109 return sctx->scratch_buffer ? sctx->scratch_buffer->b.b.width0 : 0; 2110} 2111 2112static unsigned si_get_scratch_buffer_bytes_per_wave(struct si_shader *shader) 2113{ 2114 return shader ? shader->config.scratch_bytes_per_wave : 0; 2115} 2116 2117static unsigned si_get_max_scratch_bytes_per_wave(struct si_context *sctx) 2118{ 2119 unsigned bytes = 0; 2120 2121 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->ps_shader.current)); 2122 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->gs_shader.current)); 2123 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->vs_shader.current)); 2124 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tcs_shader.current)); 2125 bytes = MAX2(bytes, si_get_scratch_buffer_bytes_per_wave(sctx->tes_shader.current)); 2126 return bytes; 2127} 2128 2129static bool si_update_spi_tmpring_size(struct si_context *sctx) 2130{ 2131 unsigned current_scratch_buffer_size = 2132 si_get_current_scratch_buffer_size(sctx); 2133 unsigned scratch_bytes_per_wave = 2134 si_get_max_scratch_bytes_per_wave(sctx); 2135 unsigned scratch_needed_size = scratch_bytes_per_wave * 2136 sctx->scratch_waves; 2137 unsigned spi_tmpring_size; 2138 int r; 2139 2140 if (scratch_needed_size > 0) { 2141 if (scratch_needed_size > current_scratch_buffer_size) { 2142 /* Create a bigger scratch buffer */ 2143 r600_resource_reference(&sctx->scratch_buffer, NULL); 2144 2145 sctx->scratch_buffer = (struct r600_resource*) 2146 pipe_buffer_create(&sctx->screen->b.b, 0, 2147 PIPE_USAGE_DEFAULT, scratch_needed_size); 2148 if (!sctx->scratch_buffer) 2149 return false; 2150 sctx->emit_scratch_reloc = true; 2151 } 2152 2153 /* Update the shaders, so they are using the latest scratch. The 2154 * scratch buffer may have been changed since these shaders were 2155 * last used, so we still need to try to update them, even if 2156 * they require scratch buffers smaller than the current size. 2157 */ 2158 r = si_update_scratch_buffer(sctx, sctx->ps_shader.current); 2159 if (r < 0) 2160 return false; 2161 if (r == 1) 2162 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4); 2163 2164 r = si_update_scratch_buffer(sctx, sctx->gs_shader.current); 2165 if (r < 0) 2166 return false; 2167 if (r == 1) 2168 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4); 2169 2170 r = si_update_scratch_buffer(sctx, sctx->tcs_shader.current); 2171 if (r < 0) 2172 return false; 2173 if (r == 1) 2174 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4); 2175 2176 /* VS can be bound as LS, ES, or VS. */ 2177 r = si_update_scratch_buffer(sctx, sctx->vs_shader.current); 2178 if (r < 0) 2179 return false; 2180 if (r == 1) { 2181 if (sctx->tes_shader.current) 2182 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); 2183 else if (sctx->gs_shader.current) 2184 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); 2185 else 2186 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); 2187 } 2188 2189 /* TES can be bound as ES or VS. */ 2190 r = si_update_scratch_buffer(sctx, sctx->tes_shader.current); 2191 if (r < 0) 2192 return false; 2193 if (r == 1) { 2194 if (sctx->gs_shader.current) 2195 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); 2196 else 2197 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); 2198 } 2199 } 2200 2201 /* The LLVM shader backend should be reporting aligned scratch_sizes. */ 2202 assert((scratch_needed_size & ~0x3FF) == scratch_needed_size && 2203 "scratch size should already be aligned correctly."); 2204 2205 spi_tmpring_size = S_0286E8_WAVES(sctx->scratch_waves) | 2206 S_0286E8_WAVESIZE(scratch_bytes_per_wave >> 10); 2207 if (spi_tmpring_size != sctx->spi_tmpring_size) { 2208 sctx->spi_tmpring_size = spi_tmpring_size; 2209 sctx->emit_scratch_reloc = true; 2210 } 2211 return true; 2212} 2213 2214static void si_init_tess_factor_ring(struct si_context *sctx) 2215{ 2216 bool double_offchip_buffers = sctx->b.chip_class >= CIK && 2217 sctx->b.family != CHIP_CARRIZO && 2218 sctx->b.family != CHIP_STONEY; 2219 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64; 2220 unsigned max_offchip_buffers = max_offchip_buffers_per_se * 2221 sctx->screen->b.info.max_se; 2222 unsigned offchip_granularity; 2223 2224 switch (sctx->screen->tess_offchip_block_dw_size) { 2225 default: 2226 assert(0); 2227 /* fall through */ 2228 case 8192: 2229 offchip_granularity = V_03093C_X_8K_DWORDS; 2230 break; 2231 case 4096: 2232 offchip_granularity = V_03093C_X_4K_DWORDS; 2233 break; 2234 } 2235 2236 switch (sctx->b.chip_class) { 2237 case SI: 2238 max_offchip_buffers = MIN2(max_offchip_buffers, 126); 2239 break; 2240 case CIK: 2241 max_offchip_buffers = MIN2(max_offchip_buffers, 508); 2242 break; 2243 case VI: 2244 default: 2245 max_offchip_buffers = MIN2(max_offchip_buffers, 512); 2246 break; 2247 } 2248 2249 assert(!sctx->tf_ring); 2250 sctx->tf_ring = pipe_buffer_create(sctx->b.b.screen, 0, 2251 PIPE_USAGE_DEFAULT, 2252 32768 * sctx->screen->b.info.max_se); 2253 if (!sctx->tf_ring) 2254 return; 2255 2256 assert(((sctx->tf_ring->width0 / 4) & C_030938_SIZE) == 0); 2257 2258 sctx->tess_offchip_ring = pipe_buffer_create(sctx->b.b.screen, 0, 2259 PIPE_USAGE_DEFAULT, 2260 max_offchip_buffers * 2261 sctx->screen->tess_offchip_block_dw_size * 4); 2262 if (!sctx->tess_offchip_ring) 2263 return; 2264 2265 si_init_config_add_vgt_flush(sctx); 2266 2267 /* Append these registers to the init config state. */ 2268 if (sctx->b.chip_class >= CIK) { 2269 if (sctx->b.chip_class >= VI) 2270 --max_offchip_buffers; 2271 2272 si_pm4_set_reg(sctx->init_config, R_030938_VGT_TF_RING_SIZE, 2273 S_030938_SIZE(sctx->tf_ring->width0 / 4)); 2274 si_pm4_set_reg(sctx->init_config, R_030940_VGT_TF_MEMORY_BASE, 2275 r600_resource(sctx->tf_ring)->gpu_address >> 8); 2276 si_pm4_set_reg(sctx->init_config, R_03093C_VGT_HS_OFFCHIP_PARAM, 2277 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) | 2278 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity)); 2279 } else { 2280 assert(offchip_granularity == V_03093C_X_8K_DWORDS); 2281 si_pm4_set_reg(sctx->init_config, R_008988_VGT_TF_RING_SIZE, 2282 S_008988_SIZE(sctx->tf_ring->width0 / 4)); 2283 si_pm4_set_reg(sctx->init_config, R_0089B8_VGT_TF_MEMORY_BASE, 2284 r600_resource(sctx->tf_ring)->gpu_address >> 8); 2285 si_pm4_set_reg(sctx->init_config, R_0089B0_VGT_HS_OFFCHIP_PARAM, 2286 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers)); 2287 } 2288 2289 /* Flush the context to re-emit the init_config state. 2290 * This is done only once in a lifetime of a context. 2291 */ 2292 si_pm4_upload_indirect_buffer(sctx, sctx->init_config); 2293 sctx->b.initial_gfx_cs_size = 0; /* force flush */ 2294 si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL); 2295 2296 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_FACTOR, sctx->tf_ring, 2297 0, sctx->tf_ring->width0, false, false, 0, 0, 0); 2298 2299 si_set_ring_buffer(&sctx->b.b, SI_HS_RING_TESS_OFFCHIP, 2300 sctx->tess_offchip_ring, 0, 2301 sctx->tess_offchip_ring->width0, false, false, 0, 0, 0); 2302} 2303 2304/** 2305 * This is used when TCS is NULL in the VS->TCS->TES chain. In this case, 2306 * VS passes its outputs to TES directly, so the fixed-function shader only 2307 * has to write TESSOUTER and TESSINNER. 2308 */ 2309static void si_generate_fixed_func_tcs(struct si_context *sctx) 2310{ 2311 struct ureg_src outer, inner; 2312 struct ureg_dst tessouter, tessinner; 2313 struct ureg_program *ureg = ureg_create(PIPE_SHADER_TESS_CTRL); 2314 2315 if (!ureg) 2316 return; /* if we get here, we're screwed */ 2317 2318 assert(!sctx->fixed_func_tcs_shader.cso); 2319 2320 outer = ureg_DECL_system_value(ureg, 2321 TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI, 0); 2322 inner = ureg_DECL_system_value(ureg, 2323 TGSI_SEMANTIC_DEFAULT_TESSINNER_SI, 0); 2324 2325 tessouter = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSOUTER, 0); 2326 tessinner = ureg_DECL_output(ureg, TGSI_SEMANTIC_TESSINNER, 0); 2327 2328 ureg_MOV(ureg, tessouter, outer); 2329 ureg_MOV(ureg, tessinner, inner); 2330 ureg_END(ureg); 2331 2332 sctx->fixed_func_tcs_shader.cso = 2333 ureg_create_shader_and_destroy(ureg, &sctx->b.b); 2334} 2335 2336static void si_update_vgt_shader_config(struct si_context *sctx) 2337{ 2338 /* Calculate the index of the config. 2339 * 0 = VS, 1 = VS+GS, 2 = VS+Tess, 3 = VS+Tess+GS */ 2340 unsigned index = 2*!!sctx->tes_shader.cso + !!sctx->gs_shader.cso; 2341 struct si_pm4_state **pm4 = &sctx->vgt_shader_config[index]; 2342 2343 if (!*pm4) { 2344 uint32_t stages = 0; 2345 2346 *pm4 = CALLOC_STRUCT(si_pm4_state); 2347 2348 if (sctx->tes_shader.cso) { 2349 stages |= S_028B54_LS_EN(V_028B54_LS_STAGE_ON) | 2350 S_028B54_HS_EN(1) | S_028B54_DYNAMIC_HS(1); 2351 2352 if (sctx->gs_shader.cso) 2353 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) | 2354 S_028B54_GS_EN(1) | 2355 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); 2356 else 2357 stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS); 2358 } else if (sctx->gs_shader.cso) { 2359 stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) | 2360 S_028B54_GS_EN(1) | 2361 S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER); 2362 } 2363 2364 si_pm4_set_reg(*pm4, R_028B54_VGT_SHADER_STAGES_EN, stages); 2365 } 2366 si_pm4_bind_state(sctx, vgt_shader_config, *pm4); 2367} 2368 2369static void si_update_so(struct si_context *sctx, struct si_shader_selector *shader) 2370{ 2371 struct pipe_stream_output_info *so = &shader->so; 2372 uint32_t enabled_stream_buffers_mask = 0; 2373 int i; 2374 2375 for (i = 0; i < so->num_outputs; i++) 2376 enabled_stream_buffers_mask |= (1 << so->output[i].output_buffer) << (so->output[i].stream * 4); 2377 sctx->b.streamout.enabled_stream_buffers_mask = enabled_stream_buffers_mask; 2378 sctx->b.streamout.stride_in_dw = shader->so.stride; 2379} 2380 2381bool si_update_shaders(struct si_context *sctx) 2382{ 2383 struct pipe_context *ctx = (struct pipe_context*)sctx; 2384 struct si_compiler_ctx_state compiler_state; 2385 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; 2386 struct si_shader *old_vs = si_get_vs_state(sctx); 2387 bool old_clip_disable = old_vs ? old_vs->key.opt.hw_vs.clip_disable : false; 2388 int r; 2389 2390 compiler_state.tm = sctx->tm; 2391 compiler_state.debug = sctx->b.debug; 2392 compiler_state.is_debug_context = sctx->is_debug; 2393 2394 /* Update stages before GS. */ 2395 if (sctx->tes_shader.cso) { 2396 if (!sctx->tf_ring) { 2397 si_init_tess_factor_ring(sctx); 2398 if (!sctx->tf_ring) 2399 return false; 2400 } 2401 2402 /* VS as LS */ 2403 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state); 2404 if (r) 2405 return false; 2406 si_pm4_bind_state(sctx, ls, sctx->vs_shader.current->pm4); 2407 2408 if (sctx->tcs_shader.cso) { 2409 r = si_shader_select(ctx, &sctx->tcs_shader, 2410 &compiler_state); 2411 if (r) 2412 return false; 2413 si_pm4_bind_state(sctx, hs, sctx->tcs_shader.current->pm4); 2414 } else { 2415 if (!sctx->fixed_func_tcs_shader.cso) { 2416 si_generate_fixed_func_tcs(sctx); 2417 if (!sctx->fixed_func_tcs_shader.cso) 2418 return false; 2419 } 2420 2421 r = si_shader_select(ctx, &sctx->fixed_func_tcs_shader, 2422 &compiler_state); 2423 if (r) 2424 return false; 2425 si_pm4_bind_state(sctx, hs, 2426 sctx->fixed_func_tcs_shader.current->pm4); 2427 } 2428 2429 r = si_shader_select(ctx, &sctx->tes_shader, &compiler_state); 2430 if (r) 2431 return false; 2432 2433 if (sctx->gs_shader.cso) { 2434 /* TES as ES */ 2435 si_pm4_bind_state(sctx, es, sctx->tes_shader.current->pm4); 2436 } else { 2437 /* TES as VS */ 2438 si_pm4_bind_state(sctx, vs, sctx->tes_shader.current->pm4); 2439 si_update_so(sctx, sctx->tes_shader.cso); 2440 } 2441 } else if (sctx->gs_shader.cso) { 2442 /* VS as ES */ 2443 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state); 2444 if (r) 2445 return false; 2446 si_pm4_bind_state(sctx, es, sctx->vs_shader.current->pm4); 2447 } else { 2448 /* VS as VS */ 2449 r = si_shader_select(ctx, &sctx->vs_shader, &compiler_state); 2450 if (r) 2451 return false; 2452 si_pm4_bind_state(sctx, vs, sctx->vs_shader.current->pm4); 2453 si_update_so(sctx, sctx->vs_shader.cso); 2454 } 2455 2456 /* Update GS. */ 2457 if (sctx->gs_shader.cso) { 2458 r = si_shader_select(ctx, &sctx->gs_shader, &compiler_state); 2459 if (r) 2460 return false; 2461 si_pm4_bind_state(sctx, gs, sctx->gs_shader.current->pm4); 2462 si_pm4_bind_state(sctx, vs, sctx->gs_shader.cso->gs_copy_shader->pm4); 2463 si_update_so(sctx, sctx->gs_shader.cso); 2464 2465 if (!si_update_gs_ring_buffers(sctx)) 2466 return false; 2467 } else { 2468 si_pm4_bind_state(sctx, gs, NULL); 2469 si_pm4_bind_state(sctx, es, NULL); 2470 } 2471 2472 si_update_vgt_shader_config(sctx); 2473 2474 if (old_clip_disable != si_get_vs_state(sctx)->key.opt.hw_vs.clip_disable) 2475 si_mark_atom_dirty(sctx, &sctx->clip_regs); 2476 2477 if (sctx->ps_shader.cso) { 2478 unsigned db_shader_control; 2479 2480 r = si_shader_select(ctx, &sctx->ps_shader, &compiler_state); 2481 if (r) 2482 return false; 2483 si_pm4_bind_state(sctx, ps, sctx->ps_shader.current->pm4); 2484 2485 db_shader_control = 2486 sctx->ps_shader.cso->db_shader_control | 2487 S_02880C_KILL_ENABLE(si_get_alpha_test_func(sctx) != PIPE_FUNC_ALWAYS); 2488 2489 if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs) || 2490 sctx->sprite_coord_enable != rs->sprite_coord_enable || 2491 sctx->flatshade != rs->flatshade) { 2492 sctx->sprite_coord_enable = rs->sprite_coord_enable; 2493 sctx->flatshade = rs->flatshade; 2494 si_mark_atom_dirty(sctx, &sctx->spi_map); 2495 } 2496 2497 if (sctx->b.family == CHIP_STONEY && si_pm4_state_changed(sctx, ps)) 2498 si_mark_atom_dirty(sctx, &sctx->cb_render_state); 2499 2500 if (sctx->ps_db_shader_control != db_shader_control) { 2501 sctx->ps_db_shader_control = db_shader_control; 2502 si_mark_atom_dirty(sctx, &sctx->db_render_state); 2503 } 2504 2505 if (sctx->smoothing_enabled != sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing) { 2506 sctx->smoothing_enabled = sctx->ps_shader.current->key.part.ps.epilog.poly_line_smoothing; 2507 si_mark_atom_dirty(sctx, &sctx->msaa_config); 2508 2509 if (sctx->b.chip_class == SI) 2510 si_mark_atom_dirty(sctx, &sctx->db_render_state); 2511 2512 if (sctx->framebuffer.nr_samples <= 1) 2513 si_mark_atom_dirty(sctx, &sctx->msaa_sample_locs.atom); 2514 } 2515 } 2516 2517 if (si_pm4_state_changed(sctx, ls) || 2518 si_pm4_state_changed(sctx, hs) || 2519 si_pm4_state_changed(sctx, es) || 2520 si_pm4_state_changed(sctx, gs) || 2521 si_pm4_state_changed(sctx, vs) || 2522 si_pm4_state_changed(sctx, ps)) { 2523 if (!si_update_spi_tmpring_size(sctx)) 2524 return false; 2525 } 2526 2527 sctx->do_update_shaders = false; 2528 return true; 2529} 2530 2531void si_init_shader_functions(struct si_context *sctx) 2532{ 2533 si_init_atom(sctx, &sctx->spi_map, &sctx->atoms.s.spi_map, si_emit_spi_map); 2534 2535 sctx->b.b.create_vs_state = si_create_shader_selector; 2536 sctx->b.b.create_tcs_state = si_create_shader_selector; 2537 sctx->b.b.create_tes_state = si_create_shader_selector; 2538 sctx->b.b.create_gs_state = si_create_shader_selector; 2539 sctx->b.b.create_fs_state = si_create_shader_selector; 2540 2541 sctx->b.b.bind_vs_state = si_bind_vs_shader; 2542 sctx->b.b.bind_tcs_state = si_bind_tcs_shader; 2543 sctx->b.b.bind_tes_state = si_bind_tes_shader; 2544 sctx->b.b.bind_gs_state = si_bind_gs_shader; 2545 sctx->b.b.bind_fs_state = si_bind_ps_shader; 2546 2547 sctx->b.b.delete_vs_state = si_delete_shader_selector; 2548 sctx->b.b.delete_tcs_state = si_delete_shader_selector; 2549 sctx->b.b.delete_tes_state = si_delete_shader_selector; 2550 sctx->b.b.delete_gs_state = si_delete_shader_selector; 2551 sctx->b.b.delete_fs_state = si_delete_shader_selector; 2552} 2553