radv_cmd_buffer.c revision 94a7434bbc26590943fed5879d49bbc3395da6e5
1/* 2 * Copyright © 2016 Red Hat. 3 * Copyright © 2016 Bas Nieuwenhuizen 4 * 5 * based in part on anv driver which is: 6 * Copyright © 2015 Intel Corporation 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * IN THE SOFTWARE. 26 */ 27 28#include "radv_private.h" 29#include "radv_radeon_winsys.h" 30#include "radv_cs.h" 31#include "sid.h" 32#include "vk_format.h" 33#include "radv_meta.h" 34 35static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 36 struct radv_image *image, 37 VkImageLayout src_layout, 38 VkImageLayout dst_layout, 39 VkImageSubresourceRange range, 40 VkImageAspectFlags pending_clears); 41 42const struct radv_dynamic_state default_dynamic_state = { 43 .viewport = { 44 .count = 0, 45 }, 46 .scissor = { 47 .count = 0, 48 }, 49 .line_width = 1.0f, 50 .depth_bias = { 51 .bias = 0.0f, 52 .clamp = 0.0f, 53 .slope = 0.0f, 54 }, 55 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f }, 56 .depth_bounds = { 57 .min = 0.0f, 58 .max = 1.0f, 59 }, 60 .stencil_compare_mask = { 61 .front = ~0u, 62 .back = ~0u, 63 }, 64 .stencil_write_mask = { 65 .front = ~0u, 66 .back = ~0u, 67 }, 68 .stencil_reference = { 69 .front = 0u, 70 .back = 0u, 71 }, 72}; 73 74void 75radv_dynamic_state_copy(struct radv_dynamic_state *dest, 76 const struct radv_dynamic_state *src, 77 uint32_t copy_mask) 78{ 79 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { 80 dest->viewport.count = src->viewport.count; 81 typed_memcpy(dest->viewport.viewports, src->viewport.viewports, 82 src->viewport.count); 83 } 84 85 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) { 86 dest->scissor.count = src->scissor.count; 87 typed_memcpy(dest->scissor.scissors, src->scissor.scissors, 88 src->scissor.count); 89 } 90 91 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) 92 dest->line_width = src->line_width; 93 94 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) 95 dest->depth_bias = src->depth_bias; 96 97 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) 98 typed_memcpy(dest->blend_constants, src->blend_constants, 4); 99 100 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) 101 dest->depth_bounds = src->depth_bounds; 102 103 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) 104 dest->stencil_compare_mask = src->stencil_compare_mask; 105 106 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) 107 dest->stencil_write_mask = src->stencil_write_mask; 108 109 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) 110 dest->stencil_reference = src->stencil_reference; 111} 112 113bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) 114{ 115 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && 116 cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK; 117} 118 119enum ring_type radv_queue_family_to_ring(int f) { 120 switch (f) { 121 case RADV_QUEUE_GENERAL: 122 return RING_GFX; 123 case RADV_QUEUE_COMPUTE: 124 return RING_COMPUTE; 125 case RADV_QUEUE_TRANSFER: 126 return RING_DMA; 127 default: 128 unreachable("Unknown queue family"); 129 } 130} 131 132static VkResult radv_create_cmd_buffer( 133 struct radv_device * device, 134 struct radv_cmd_pool * pool, 135 VkCommandBufferLevel level, 136 VkCommandBuffer* pCommandBuffer) 137{ 138 struct radv_cmd_buffer *cmd_buffer; 139 VkResult result; 140 unsigned ring; 141 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8, 142 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 143 if (cmd_buffer == NULL) 144 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 145 146 memset(cmd_buffer, 0, sizeof(*cmd_buffer)); 147 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; 148 cmd_buffer->device = device; 149 cmd_buffer->pool = pool; 150 cmd_buffer->level = level; 151 152 if (pool) { 153 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); 154 cmd_buffer->queue_family_index = pool->queue_family_index; 155 156 } else { 157 /* Init the pool_link so we can safefly call list_del when we destroy 158 * the command buffer 159 */ 160 list_inithead(&cmd_buffer->pool_link); 161 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL; 162 } 163 164 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index); 165 166 cmd_buffer->cs = device->ws->cs_create(device->ws, ring); 167 if (!cmd_buffer->cs) { 168 result = VK_ERROR_OUT_OF_HOST_MEMORY; 169 goto fail; 170 } 171 172 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer); 173 174 cmd_buffer->upload.offset = 0; 175 cmd_buffer->upload.size = 0; 176 list_inithead(&cmd_buffer->upload.list); 177 178 return VK_SUCCESS; 179 180fail: 181 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 182 183 return result; 184} 185 186static bool 187radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, 188 uint64_t min_needed) 189{ 190 uint64_t new_size; 191 struct radeon_winsys_bo *bo; 192 struct radv_cmd_buffer_upload *upload; 193 struct radv_device *device = cmd_buffer->device; 194 195 new_size = MAX2(min_needed, 16 * 1024); 196 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size); 197 198 bo = device->ws->buffer_create(device->ws, 199 new_size, 4096, 200 RADEON_DOMAIN_GTT, 201 RADEON_FLAG_CPU_ACCESS); 202 203 if (!bo) { 204 cmd_buffer->record_fail = true; 205 return false; 206 } 207 208 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); 209 if (cmd_buffer->upload.upload_bo) { 210 upload = malloc(sizeof(*upload)); 211 212 if (!upload) { 213 cmd_buffer->record_fail = true; 214 device->ws->buffer_destroy(bo); 215 return false; 216 } 217 218 memcpy(upload, &cmd_buffer->upload, sizeof(*upload)); 219 list_add(&upload->list, &cmd_buffer->upload.list); 220 } 221 222 cmd_buffer->upload.upload_bo = bo; 223 cmd_buffer->upload.size = new_size; 224 cmd_buffer->upload.offset = 0; 225 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo); 226 227 if (!cmd_buffer->upload.map) { 228 cmd_buffer->record_fail = true; 229 return false; 230 } 231 232 return true; 233} 234 235bool 236radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, 237 unsigned size, 238 unsigned alignment, 239 unsigned *out_offset, 240 void **ptr) 241{ 242 uint64_t offset = align(cmd_buffer->upload.offset, alignment); 243 if (offset + size > cmd_buffer->upload.size) { 244 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) 245 return false; 246 offset = 0; 247 } 248 249 *out_offset = offset; 250 *ptr = cmd_buffer->upload.map + offset; 251 252 cmd_buffer->upload.offset = offset + size; 253 return true; 254} 255 256bool 257radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer, 258 unsigned size, unsigned alignment, 259 const void *data, unsigned *out_offset) 260{ 261 uint8_t *ptr; 262 263 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment, 264 out_offset, (void **)&ptr)) 265 return false; 266 267 if (ptr) 268 memcpy(ptr, data, size); 269 270 return true; 271} 272 273static void 274radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer, 275 struct radv_pipeline *pipeline) 276{ 277 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8); 278 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control, 279 8); 280 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control); 281 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask); 282} 283 284static void 285radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer, 286 struct radv_pipeline *pipeline) 287{ 288 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds; 289 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control); 290 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control); 291 292 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control); 293 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2); 294} 295 296/* 12.4 fixed-point */ 297static unsigned radv_pack_float_12p4(float x) 298{ 299 return x <= 0 ? 0 : 300 x >= 4096 ? 0xffff : x * 16; 301} 302 303static uint32_t 304shader_stage_to_user_data_0(gl_shader_stage stage) 305{ 306 switch (stage) { 307 case MESA_SHADER_FRAGMENT: 308 return R_00B030_SPI_SHADER_USER_DATA_PS_0; 309 case MESA_SHADER_VERTEX: 310 return R_00B130_SPI_SHADER_USER_DATA_VS_0; 311 case MESA_SHADER_COMPUTE: 312 return R_00B900_COMPUTE_USER_DATA_0; 313 default: 314 unreachable("unknown shader"); 315 } 316} 317 318static struct ac_userdata_info * 319radv_lookup_user_sgpr(struct radv_pipeline *pipeline, 320 gl_shader_stage stage, 321 int idx) 322{ 323 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx]; 324} 325 326static void 327radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer, 328 struct radv_pipeline *pipeline, 329 gl_shader_stage stage, 330 int idx, uint64_t va) 331{ 332 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); 333 uint32_t base_reg = shader_stage_to_user_data_0(stage); 334 if (loc->sgpr_idx == -1) 335 return; 336 assert(loc->num_sgprs == 2); 337 assert(!loc->indirect); 338 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2); 339 radeon_emit(cmd_buffer->cs, va); 340 radeon_emit(cmd_buffer->cs, va >> 32); 341} 342 343static void 344radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, 345 struct radv_pipeline *pipeline) 346{ 347 int num_samples = pipeline->graphics.ms.num_samples; 348 struct radv_multisample_state *ms = &pipeline->graphics.ms; 349 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; 350 351 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); 352 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]); 353 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]); 354 355 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa); 356 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); 357 358 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) 359 return; 360 361 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); 362 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); 363 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); 364 365 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); 366 367 uint32_t samples_offset; 368 void *samples_ptr; 369 void *src; 370 radv_cmd_buffer_upload_alloc(cmd_buffer, num_samples * 4 * 2, 256, &samples_offset, 371 &samples_ptr); 372 switch (num_samples) { 373 case 1: 374 src = cmd_buffer->device->sample_locations_1x; 375 break; 376 case 2: 377 src = cmd_buffer->device->sample_locations_2x; 378 break; 379 case 4: 380 src = cmd_buffer->device->sample_locations_4x; 381 break; 382 case 8: 383 src = cmd_buffer->device->sample_locations_8x; 384 break; 385 case 16: 386 src = cmd_buffer->device->sample_locations_16x; 387 break; 388 } 389 memcpy(samples_ptr, src, num_samples * 4 * 2); 390 391 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 392 va += samples_offset; 393 394 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 395 AC_UD_PS_SAMPLE_POS, va); 396} 397 398static void 399radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer, 400 struct radv_pipeline *pipeline) 401{ 402 struct radv_raster_state *raster = &pipeline->graphics.raster; 403 404 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL, 405 raster->pa_cl_clip_cntl); 406 407 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0, 408 raster->spi_interp_control); 409 410 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2); 411 radeon_emit(cmd_buffer->cs, 0); 412 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | 413 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */ 414 415 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL, 416 raster->pa_su_vtx_cntl); 417 418 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL, 419 raster->pa_su_sc_mode_cntl); 420} 421 422static void 423radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer, 424 struct radv_pipeline *pipeline) 425{ 426 struct radeon_winsys *ws = cmd_buffer->device->ws; 427 struct radv_shader_variant *vs; 428 uint64_t va; 429 unsigned export_count; 430 unsigned clip_dist_mask, cull_dist_mask, total_mask; 431 432 assert (pipeline->shaders[MESA_SHADER_VERTEX]); 433 434 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 435 va = ws->buffer_get_va(vs->bo); 436 ws->cs_add_buffer(cmd_buffer->cs, vs->bo, 8); 437 438 clip_dist_mask = vs->info.vs.clip_dist_mask; 439 cull_dist_mask = vs->info.vs.cull_dist_mask; 440 total_mask = clip_dist_mask | cull_dist_mask; 441 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, 0); 442 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0); 443 444 export_count = MAX2(1, vs->info.vs.param_exports); 445 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, 446 S_0286C4_VS_EXPORT_COUNT(export_count - 1)); 447 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT, 448 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | 449 S_02870C_POS1_EXPORT_FORMAT(vs->info.vs.pos_exports > 1 ? 450 V_02870C_SPI_SHADER_4COMP : 451 V_02870C_SPI_SHADER_NONE) | 452 S_02870C_POS2_EXPORT_FORMAT(vs->info.vs.pos_exports > 2 ? 453 V_02870C_SPI_SHADER_4COMP : 454 V_02870C_SPI_SHADER_NONE) | 455 S_02870C_POS3_EXPORT_FORMAT(vs->info.vs.pos_exports > 3 ? 456 V_02870C_SPI_SHADER_4COMP : 457 V_02870C_SPI_SHADER_NONE)); 458 459 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4); 460 radeon_emit(cmd_buffer->cs, va >> 8); 461 radeon_emit(cmd_buffer->cs, va >> 40); 462 radeon_emit(cmd_buffer->cs, vs->rsrc1); 463 radeon_emit(cmd_buffer->cs, vs->rsrc2); 464 465 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL, 466 S_028818_VTX_W0_FMT(1) | 467 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 468 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 469 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 470 471 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, 472 S_02881C_USE_VTX_POINT_SIZE(vs->info.vs.writes_pointsize) | 473 S_02881C_VS_OUT_MISC_VEC_ENA(vs->info.vs.writes_pointsize) | 474 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) | 475 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) | 476 pipeline->graphics.raster.pa_cl_vs_out_cntl | 477 cull_dist_mask << 8 | 478 clip_dist_mask); 479 480} 481 482 483 484static void 485radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer, 486 struct radv_pipeline *pipeline) 487{ 488 struct radeon_winsys *ws = cmd_buffer->device->ws; 489 struct radv_shader_variant *ps, *vs; 490 uint64_t va; 491 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); 492 struct radv_blend_state *blend = &pipeline->graphics.blend; 493 unsigned ps_offset = 0; 494 unsigned z_order; 495 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]); 496 497 ps = pipeline->shaders[MESA_SHADER_FRAGMENT]; 498 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 499 va = ws->buffer_get_va(ps->bo); 500 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8); 501 502 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4); 503 radeon_emit(cmd_buffer->cs, va >> 8); 504 radeon_emit(cmd_buffer->cs, va >> 40); 505 radeon_emit(cmd_buffer->cs, ps->rsrc1); 506 radeon_emit(cmd_buffer->cs, ps->rsrc2); 507 508 if (ps->info.fs.early_fragment_test || !ps->info.fs.writes_memory) 509 z_order = V_02880C_EARLY_Z_THEN_LATE_Z; 510 else 511 z_order = V_02880C_LATE_Z; 512 513 514 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL, 515 S_02880C_Z_EXPORT_ENABLE(ps->info.fs.writes_z) | 516 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.fs.writes_stencil) | 517 S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) | 518 S_02880C_Z_ORDER(z_order) | 519 S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) | 520 S_02880C_EXEC_ON_HIER_FAIL(ps->info.fs.writes_memory) | 521 S_02880C_EXEC_ON_NOOP(ps->info.fs.writes_memory)); 522 523 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA, 524 ps->config.spi_ps_input_ena); 525 526 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR, 527 ps->config.spi_ps_input_addr); 528 529 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(0); 530 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL, 531 S_0286D8_NUM_INTERP(ps->info.fs.num_interp)); 532 533 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 534 535 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT, 536 ps->info.fs.writes_stencil ? V_028710_SPI_SHADER_32_GR : 537 ps->info.fs.writes_z ? V_028710_SPI_SHADER_32_R : 538 V_028710_SPI_SHADER_ZERO); 539 540 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); 541 542 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); 543 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); 544 545 if (ps->info.fs.has_pcoord) { 546 unsigned val; 547 val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20); 548 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 549 ps_offset = 1; 550 } 551 552 for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) { 553 unsigned vs_offset, flat_shade; 554 unsigned val; 555 556 if (!(ps->info.fs.input_mask & (1u << i))) 557 continue; 558 559 560 if (!(vs->info.vs.export_mask & (1u << i))) { 561 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, 562 S_028644_OFFSET(0x20)); 563 ++ps_offset; 564 continue; 565 } 566 567 vs_offset = util_bitcount(vs->info.vs.export_mask & ((1u << i) - 1)); 568 flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset)); 569 570 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade); 571 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 572 ++ps_offset; 573 } 574} 575 576static void 577radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer, 578 struct radv_pipeline *pipeline) 579{ 580 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline) 581 return; 582 583 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline); 584 radv_emit_graphics_blend_state(cmd_buffer, pipeline); 585 radv_emit_graphics_raster_state(cmd_buffer, pipeline); 586 radv_update_multisample_state(cmd_buffer, pipeline); 587 radv_emit_vertex_shader(cmd_buffer, pipeline); 588 radv_emit_fragment_shader(cmd_buffer, pipeline); 589 590 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 591 pipeline->graphics.prim_restart_enable); 592 593 cmd_buffer->state.emitted_pipeline = pipeline; 594} 595 596static void 597radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer) 598{ 599 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count, 600 cmd_buffer->state.dynamic.viewport.viewports); 601} 602 603static void 604radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) 605{ 606 uint32_t count = cmd_buffer->state.dynamic.scissor.count; 607 si_write_scissors(cmd_buffer->cs, 0, count, 608 cmd_buffer->state.dynamic.scissor.scissors); 609 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, 610 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0)); 611} 612 613static void 614radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, 615 int index, 616 struct radv_color_buffer_info *cb) 617{ 618 bool is_vi = cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= VI; 619 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); 620 radeon_emit(cmd_buffer->cs, cb->cb_color_base); 621 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch); 622 radeon_emit(cmd_buffer->cs, cb->cb_color_slice); 623 radeon_emit(cmd_buffer->cs, cb->cb_color_view); 624 radeon_emit(cmd_buffer->cs, cb->cb_color_info); 625 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); 626 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); 627 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); 628 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice); 629 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); 630 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice); 631 632 if (is_vi) { /* DCC BASE */ 633 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base); 634 } 635} 636 637static void 638radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, 639 struct radv_ds_buffer_info *ds, 640 struct radv_image *image, 641 VkImageLayout layout) 642{ 643 uint32_t db_z_info = ds->db_z_info; 644 645 if (!radv_layout_has_htile(image, layout)) 646 db_z_info &= C_028040_TILE_SURFACE_ENABLE; 647 648 if (!radv_layout_can_expclear(image, layout)) 649 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR; 650 651 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); 652 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); 653 654 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9); 655 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */ 656 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */ 657 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ 658 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */ 659 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */ 660 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */ 661 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */ 662 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ 663 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ 664 665 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); 666 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 667 ds->pa_su_poly_offset_db_fmt_cntl); 668} 669 670/* 671 * To hw resolve multisample images both src and dst need to have the same 672 * micro tiling mode. However we don't always know in advance when creating 673 * the images. This function gets called if we have a resolve attachment, 674 * and tests if the attachment image has the same tiling mode, then it 675 * checks if the generated framebuffer data has the same tiling mode, and 676 * updates it if not. 677 */ 678static void radv_set_optimal_micro_tile_mode(struct radv_device *device, 679 struct radv_attachment_info *att, 680 uint32_t micro_tile_mode) 681{ 682 struct radv_image *image = att->attachment->image; 683 uint32_t tile_mode_index; 684 if (image->surface.nsamples <= 1) 685 return; 686 687 if (image->surface.micro_tile_mode != micro_tile_mode) { 688 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode); 689 } 690 691 if (att->cb.micro_tile_mode != micro_tile_mode) { 692 tile_mode_index = image->surface.tiling_index[0]; 693 694 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX; 695 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index); 696 att->cb.micro_tile_mode = micro_tile_mode; 697 } 698} 699 700void 701radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 702 struct radv_image *image, 703 VkClearDepthStencilValue ds_clear_value, 704 VkImageAspectFlags aspects) 705{ 706 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 707 va += image->offset + image->clear_value_offset; 708 unsigned reg_offset = 0, reg_count = 0; 709 710 if (!image->htile.size || !aspects) 711 return; 712 713 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { 714 ++reg_count; 715 } else { 716 ++reg_offset; 717 va += 4; 718 } 719 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 720 ++reg_count; 721 722 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 723 724 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); 725 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 726 S_370_WR_CONFIRM(1) | 727 S_370_ENGINE_SEL(V_370_PFP)); 728 radeon_emit(cmd_buffer->cs, va); 729 radeon_emit(cmd_buffer->cs, va >> 32); 730 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 731 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); 732 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 733 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); 734 735 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count); 736 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 737 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */ 738 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 739 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */ 740} 741 742static void 743radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 744 struct radv_image *image) 745{ 746 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 747 va += image->offset + image->clear_value_offset; 748 749 if (!image->htile.size) 750 return; 751 752 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 753 754 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 755 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 756 COPY_DATA_DST_SEL(COPY_DATA_REG) | 757 COPY_DATA_COUNT_SEL); 758 radeon_emit(cmd_buffer->cs, va); 759 radeon_emit(cmd_buffer->cs, va >> 32); 760 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2); 761 radeon_emit(cmd_buffer->cs, 0); 762 763 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 764 radeon_emit(cmd_buffer->cs, 0); 765} 766 767void 768radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 769 struct radv_image *image, 770 int idx, 771 uint32_t color_values[2]) 772{ 773 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 774 va += image->offset + image->clear_value_offset; 775 776 if (!image->cmask.size && !image->surface.dcc_size) 777 return; 778 779 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 780 781 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); 782 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 783 S_370_WR_CONFIRM(1) | 784 S_370_ENGINE_SEL(V_370_PFP)); 785 radeon_emit(cmd_buffer->cs, va); 786 radeon_emit(cmd_buffer->cs, va >> 32); 787 radeon_emit(cmd_buffer->cs, color_values[0]); 788 radeon_emit(cmd_buffer->cs, color_values[1]); 789 790 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2); 791 radeon_emit(cmd_buffer->cs, color_values[0]); 792 radeon_emit(cmd_buffer->cs, color_values[1]); 793} 794 795static void 796radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 797 struct radv_image *image, 798 int idx) 799{ 800 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 801 va += image->offset + image->clear_value_offset; 802 803 if (!image->cmask.size && !image->surface.dcc_size) 804 return; 805 806 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c; 807 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 808 809 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 810 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 811 COPY_DATA_DST_SEL(COPY_DATA_REG) | 812 COPY_DATA_COUNT_SEL); 813 radeon_emit(cmd_buffer->cs, va); 814 radeon_emit(cmd_buffer->cs, va >> 32); 815 radeon_emit(cmd_buffer->cs, reg >> 2); 816 radeon_emit(cmd_buffer->cs, 0); 817 818 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 819 radeon_emit(cmd_buffer->cs, 0); 820} 821 822void 823radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) 824{ 825 int i; 826 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; 827 const struct radv_subpass *subpass = cmd_buffer->state.subpass; 828 int dst_resolve_micro_tile_mode = -1; 829 830 if (subpass->has_resolve) { 831 uint32_t a = subpass->resolve_attachments[0].attachment; 832 const struct radv_image *image = framebuffer->attachments[a].attachment->image; 833 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode; 834 } 835 for (i = 0; i < subpass->color_count; ++i) { 836 int idx = subpass->color_attachments[i].attachment; 837 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 838 839 if (dst_resolve_micro_tile_mode != -1) { 840 radv_set_optimal_micro_tile_mode(cmd_buffer->device, 841 att, dst_resolve_micro_tile_mode); 842 } 843 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 844 845 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); 846 radv_emit_fb_color_state(cmd_buffer, i, &att->cb); 847 848 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i); 849 } 850 851 for (i = subpass->color_count; i < 8; i++) 852 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 853 S_028C70_FORMAT(V_028C70_COLOR_INVALID)); 854 855 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 856 int idx = subpass->depth_stencil_attachment.attachment; 857 VkImageLayout layout = subpass->depth_stencil_attachment.layout; 858 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 859 struct radv_image *image = att->attachment->image; 860 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 861 862 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); 863 864 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { 865 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 866 cmd_buffer->state.offset_scale = att->ds.offset_scale; 867 } 868 radv_load_depth_clear_regs(cmd_buffer, image); 869 } else { 870 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); 871 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ 872 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ 873 } 874 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR, 875 S_028208_BR_X(framebuffer->width) | 876 S_028208_BR_Y(framebuffer->height)); 877} 878 879void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) 880{ 881 uint32_t db_count_control; 882 883 if(!cmd_buffer->state.active_occlusion_queries) { 884 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 885 db_count_control = 0; 886 } else { 887 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1); 888 } 889 } else { 890 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 891 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 892 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */ 893 S_028004_ZPASS_ENABLE(1) | 894 S_028004_SLICE_EVEN_ENABLE(1) | 895 S_028004_SLICE_ODD_ENABLE(1); 896 } else { 897 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 898 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */ 899 } 900 } 901 902 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); 903} 904 905static void 906radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) 907{ 908 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; 909 910 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) { 911 unsigned width = cmd_buffer->state.dynamic.line_width * 8; 912 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL, 913 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF))); 914 } 915 916 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) { 917 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4); 918 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4); 919 } 920 921 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | 922 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | 923 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) { 924 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2); 925 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) | 926 S_028430_STENCILMASK(d->stencil_compare_mask.front) | 927 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) | 928 S_028430_STENCILOPVAL(1)); 929 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) | 930 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) | 931 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) | 932 S_028434_STENCILOPVAL_BF(1)); 933 } 934 935 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 936 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) { 937 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min)); 938 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max)); 939 } 940 941 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 942 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { 943 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster; 944 unsigned slope = fui(d->depth_bias.slope * 16.0f); 945 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale); 946 947 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) { 948 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5); 949 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */ 950 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */ 951 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */ 952 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */ 953 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */ 954 } 955 } 956 957 cmd_buffer->state.dirty = 0; 958} 959 960static void 961emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 962 struct radv_pipeline *pipeline, 963 int idx, 964 uint64_t va, 965 gl_shader_stage stage) 966{ 967 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx]; 968 uint32_t base_reg = shader_stage_to_user_data_0(stage); 969 970 if (desc_set_loc->sgpr_idx == -1) 971 return; 972 973 assert(!desc_set_loc->indirect); 974 assert(desc_set_loc->num_sgprs == 2); 975 radeon_set_sh_reg_seq(cmd_buffer->cs, 976 base_reg + desc_set_loc->sgpr_idx * 4, 2); 977 radeon_emit(cmd_buffer->cs, va); 978 radeon_emit(cmd_buffer->cs, va >> 32); 979} 980 981static void 982radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 983 struct radv_pipeline *pipeline, 984 VkShaderStageFlags stages, 985 struct radv_descriptor_set *set, 986 unsigned idx) 987{ 988 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 989 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 990 idx, set->va, 991 MESA_SHADER_FRAGMENT); 992 993 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 994 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 995 idx, set->va, 996 MESA_SHADER_VERTEX); 997 998 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 999 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1000 idx, set->va, 1001 MESA_SHADER_COMPUTE); 1002} 1003 1004static void 1005radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, 1006 struct radv_pipeline *pipeline, 1007 VkShaderStageFlags stages) 1008{ 1009 unsigned i; 1010 if (!cmd_buffer->state.descriptors_dirty) 1011 return; 1012 1013 for (i = 0; i < MAX_SETS; i++) { 1014 if (!(cmd_buffer->state.descriptors_dirty & (1 << i))) 1015 continue; 1016 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i]; 1017 if (!set) 1018 continue; 1019 1020 radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i); 1021 } 1022 cmd_buffer->state.descriptors_dirty = 0; 1023} 1024 1025static void 1026radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, 1027 struct radv_pipeline *pipeline, 1028 VkShaderStageFlags stages) 1029{ 1030 struct radv_pipeline_layout *layout = pipeline->layout; 1031 unsigned offset; 1032 void *ptr; 1033 uint64_t va; 1034 1035 stages &= cmd_buffer->push_constant_stages; 1036 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count)) 1037 return; 1038 1039 radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + 1040 16 * layout->dynamic_offset_count, 1041 256, &offset, &ptr); 1042 1043 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); 1044 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers, 1045 16 * layout->dynamic_offset_count); 1046 1047 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1048 va += offset; 1049 1050 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 1051 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1052 AC_UD_PUSH_CONSTANTS, va); 1053 1054 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 1055 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 1056 AC_UD_PUSH_CONSTANTS, va); 1057 1058 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 1059 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE, 1060 AC_UD_PUSH_CONSTANTS, va); 1061 1062 cmd_buffer->push_constant_stages &= ~stages; 1063} 1064 1065static void 1066radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer) 1067{ 1068 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; 1069 struct radv_device *device = cmd_buffer->device; 1070 uint32_t ia_multi_vgt_param; 1071 uint32_t ls_hs_config = 0; 1072 1073 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1074 cmd_buffer->cs, 4096); 1075 1076 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) && 1077 cmd_buffer->state.pipeline->num_vertex_attribs) { 1078 unsigned vb_offset; 1079 void *vb_ptr; 1080 uint32_t i = 0; 1081 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs; 1082 uint64_t va; 1083 1084 /* allocate some descriptor state for vertex buffers */ 1085 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256, 1086 &vb_offset, &vb_ptr); 1087 1088 for (i = 0; i < num_attribs; i++) { 1089 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; 1090 uint32_t offset; 1091 int vb = cmd_buffer->state.pipeline->va_binding[i]; 1092 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; 1093 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; 1094 1095 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 1096 va = device->ws->buffer_get_va(buffer->bo); 1097 1098 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i]; 1099 va += offset + buffer->offset; 1100 desc[0] = va; 1101 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); 1102 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class <= CIK && stride) 1103 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1; 1104 else 1105 desc[2] = buffer->size - offset; 1106 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i]; 1107 } 1108 1109 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1110 va += vb_offset; 1111 1112 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1113 AC_UD_VS_VERTEX_BUFFERS, va); 1114 } 1115 1116 cmd_buffer->state.vertex_descriptors_dirty = false; 1117 cmd_buffer->state.vb_dirty = 0; 1118 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) 1119 radv_emit_graphics_pipeline(cmd_buffer, pipeline); 1120 1121 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS) 1122 radv_emit_framebuffer_state(cmd_buffer); 1123 1124 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT)) 1125 radv_emit_viewport(cmd_buffer); 1126 1127 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) 1128 radv_emit_scissor(cmd_buffer); 1129 1130 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) { 1131 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, 0); 1132 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer); 1133 1134 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 1135 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); 1136 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config); 1137 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim); 1138 } else { 1139 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim); 1140 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param); 1141 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config); 1142 } 1143 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out); 1144 } 1145 1146 radv_cmd_buffer_flush_dynamic_state(cmd_buffer); 1147 1148 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.pipeline, 1149 VK_SHADER_STAGE_ALL_GRAPHICS); 1150 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline, 1151 VK_SHADER_STAGE_ALL_GRAPHICS); 1152 1153 assert(cmd_buffer->cs->cdw <= cdw_max); 1154 1155 si_emit_cache_flush(cmd_buffer); 1156} 1157 1158static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, 1159 VkPipelineStageFlags src_stage_mask) 1160{ 1161 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | 1162 VK_PIPELINE_STAGE_TRANSFER_BIT | 1163 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1164 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1165 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH; 1166 } 1167 1168 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 1169 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 1170 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 1171 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 1172 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | 1173 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | 1174 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | 1175 VK_PIPELINE_STAGE_TRANSFER_BIT | 1176 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1177 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | 1178 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1179 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 1180 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | 1181 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | 1182 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | 1183 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) { 1184 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; 1185 } 1186} 1187 1188static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier) 1189{ 1190 radv_stage_flush(cmd_buffer, barrier->src_stage_mask); 1191 1192 /* TODO: actual cache flushes */ 1193} 1194 1195static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, 1196 VkAttachmentReference att) 1197{ 1198 unsigned idx = att.attachment; 1199 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; 1200 VkImageSubresourceRange range; 1201 range.aspectMask = 0; 1202 range.baseMipLevel = view->base_mip; 1203 range.levelCount = 1; 1204 range.baseArrayLayer = view->base_layer; 1205 range.layerCount = cmd_buffer->state.framebuffer->layers; 1206 1207 radv_handle_image_transition(cmd_buffer, 1208 view->image, 1209 cmd_buffer->state.attachments[idx].current_layout, 1210 att.layout, range, 1211 cmd_buffer->state.attachments[idx].pending_clear_aspects); 1212 1213 cmd_buffer->state.attachments[idx].current_layout = att.layout; 1214 1215 1216} 1217 1218void 1219radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, 1220 const struct radv_subpass *subpass, bool transitions) 1221{ 1222 if (transitions) { 1223 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); 1224 1225 for (unsigned i = 0; i < subpass->color_count; ++i) { 1226 radv_handle_subpass_image_transition(cmd_buffer, 1227 subpass->color_attachments[i]); 1228 } 1229 1230 for (unsigned i = 0; i < subpass->input_count; ++i) { 1231 radv_handle_subpass_image_transition(cmd_buffer, 1232 subpass->input_attachments[i]); 1233 } 1234 1235 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 1236 radv_handle_subpass_image_transition(cmd_buffer, 1237 subpass->depth_stencil_attachment); 1238 } 1239 } 1240 1241 cmd_buffer->state.subpass = subpass; 1242 1243 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS; 1244} 1245 1246static void 1247radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, 1248 struct radv_render_pass *pass, 1249 const VkRenderPassBeginInfo *info) 1250{ 1251 struct radv_cmd_state *state = &cmd_buffer->state; 1252 1253 if (pass->attachment_count == 0) { 1254 state->attachments = NULL; 1255 return; 1256 } 1257 1258 state->attachments = vk_alloc(&cmd_buffer->pool->alloc, 1259 pass->attachment_count * 1260 sizeof(state->attachments[0]), 1261 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1262 if (state->attachments == NULL) { 1263 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ 1264 abort(); 1265 } 1266 1267 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 1268 struct radv_render_pass_attachment *att = &pass->attachments[i]; 1269 VkImageAspectFlags att_aspects = vk_format_aspects(att->format); 1270 VkImageAspectFlags clear_aspects = 0; 1271 1272 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { 1273 /* color attachment */ 1274 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1275 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; 1276 } 1277 } else { 1278 /* depthstencil attachment */ 1279 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && 1280 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1281 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 1282 } 1283 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && 1284 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1285 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 1286 } 1287 } 1288 1289 state->attachments[i].pending_clear_aspects = clear_aspects; 1290 if (clear_aspects && info) { 1291 assert(info->clearValueCount > i); 1292 state->attachments[i].clear_value = info->pClearValues[i]; 1293 } 1294 1295 state->attachments[i].current_layout = att->initial_layout; 1296 } 1297} 1298 1299VkResult radv_AllocateCommandBuffers( 1300 VkDevice _device, 1301 const VkCommandBufferAllocateInfo *pAllocateInfo, 1302 VkCommandBuffer *pCommandBuffers) 1303{ 1304 RADV_FROM_HANDLE(radv_device, device, _device); 1305 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool); 1306 1307 VkResult result = VK_SUCCESS; 1308 uint32_t i; 1309 1310 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { 1311 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level, 1312 &pCommandBuffers[i]); 1313 if (result != VK_SUCCESS) 1314 break; 1315 } 1316 1317 if (result != VK_SUCCESS) 1318 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool, 1319 i, pCommandBuffers); 1320 1321 return result; 1322} 1323 1324static void 1325radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) 1326{ 1327 list_del(&cmd_buffer->pool_link); 1328 1329 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1330 &cmd_buffer->upload.list, list) { 1331 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1332 list_del(&up->list); 1333 free(up); 1334 } 1335 1336 if (cmd_buffer->upload.upload_bo) 1337 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); 1338 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); 1339 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 1340} 1341 1342void radv_FreeCommandBuffers( 1343 VkDevice device, 1344 VkCommandPool commandPool, 1345 uint32_t commandBufferCount, 1346 const VkCommandBuffer *pCommandBuffers) 1347{ 1348 for (uint32_t i = 0; i < commandBufferCount; i++) { 1349 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]); 1350 1351 if (cmd_buffer) 1352 radv_cmd_buffer_destroy(cmd_buffer); 1353 } 1354} 1355 1356static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) 1357{ 1358 1359 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); 1360 1361 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1362 &cmd_buffer->upload.list, list) { 1363 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1364 list_del(&up->list); 1365 free(up); 1366 } 1367 1368 if (cmd_buffer->upload.upload_bo) 1369 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, 1370 cmd_buffer->upload.upload_bo, 8); 1371 cmd_buffer->upload.offset = 0; 1372 1373 cmd_buffer->record_fail = false; 1374} 1375 1376VkResult radv_ResetCommandBuffer( 1377 VkCommandBuffer commandBuffer, 1378 VkCommandBufferResetFlags flags) 1379{ 1380 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1381 radv_reset_cmd_buffer(cmd_buffer); 1382 return VK_SUCCESS; 1383} 1384 1385VkResult radv_BeginCommandBuffer( 1386 VkCommandBuffer commandBuffer, 1387 const VkCommandBufferBeginInfo *pBeginInfo) 1388{ 1389 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1390 radv_reset_cmd_buffer(cmd_buffer); 1391 1392 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state)); 1393 1394 /* setup initial configuration into command buffer */ 1395 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 1396 /* Flush read caches at the beginning of CS not flushed by the kernel. */ 1397 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_ICACHE | 1398 RADV_CMD_FLAG_PS_PARTIAL_FLUSH | 1399 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1400 RADV_CMD_FLAG_INV_VMEM_L1 | 1401 RADV_CMD_FLAG_INV_SMEM_L1 | 1402 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 1403 RADV_CMD_FLAG_INV_GLOBAL_L2; 1404 si_init_config(&cmd_buffer->device->instance->physicalDevice, cmd_buffer); 1405 radv_set_db_count_control(cmd_buffer); 1406 si_emit_cache_flush(cmd_buffer); 1407 } 1408 1409 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 1410 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer); 1411 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); 1412 1413 struct radv_subpass *subpass = 1414 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; 1415 1416 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); 1417 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); 1418 } 1419 1420 return VK_SUCCESS; 1421} 1422 1423void radv_CmdBindVertexBuffers( 1424 VkCommandBuffer commandBuffer, 1425 uint32_t firstBinding, 1426 uint32_t bindingCount, 1427 const VkBuffer* pBuffers, 1428 const VkDeviceSize* pOffsets) 1429{ 1430 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1431 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings; 1432 1433 /* We have to defer setting up vertex buffer since we need the buffer 1434 * stride from the pipeline. */ 1435 1436 assert(firstBinding + bindingCount < MAX_VBS); 1437 for (uint32_t i = 0; i < bindingCount; i++) { 1438 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]); 1439 vb[firstBinding + i].offset = pOffsets[i]; 1440 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i); 1441 } 1442} 1443 1444void radv_CmdBindIndexBuffer( 1445 VkCommandBuffer commandBuffer, 1446 VkBuffer buffer, 1447 VkDeviceSize offset, 1448 VkIndexType indexType) 1449{ 1450 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1451 1452 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer); 1453 cmd_buffer->state.index_offset = offset; 1454 cmd_buffer->state.index_type = indexType; /* vk matches hw */ 1455 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; 1456 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8); 1457} 1458 1459 1460void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, 1461 struct radv_descriptor_set *set, 1462 unsigned idx) 1463{ 1464 struct radeon_winsys *ws = cmd_buffer->device->ws; 1465 1466 cmd_buffer->state.descriptors[idx] = set; 1467 cmd_buffer->state.descriptors_dirty |= (1 << idx); 1468 if (!set) 1469 return; 1470 1471 for (unsigned j = 0; j < set->layout->buffer_count; ++j) 1472 if (set->descriptors[j]) 1473 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7); 1474 1475 if(set->bo) 1476 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8); 1477} 1478 1479void radv_CmdBindDescriptorSets( 1480 VkCommandBuffer commandBuffer, 1481 VkPipelineBindPoint pipelineBindPoint, 1482 VkPipelineLayout _layout, 1483 uint32_t firstSet, 1484 uint32_t descriptorSetCount, 1485 const VkDescriptorSet* pDescriptorSets, 1486 uint32_t dynamicOffsetCount, 1487 const uint32_t* pDynamicOffsets) 1488{ 1489 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1490 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout); 1491 unsigned dyn_idx = 0; 1492 1493 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1494 cmd_buffer->cs, MAX_SETS * 4 * 6); 1495 1496 for (unsigned i = 0; i < descriptorSetCount; ++i) { 1497 unsigned idx = i + firstSet; 1498 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); 1499 radv_bind_descriptor_set(cmd_buffer, set, idx); 1500 1501 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { 1502 unsigned idx = j + layout->set[i].dynamic_offset_start; 1503 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4; 1504 assert(dyn_idx < dynamicOffsetCount); 1505 1506 struct radv_descriptor_range *range = set->dynamic_descriptors + j; 1507 uint64_t va = range->va + pDynamicOffsets[dyn_idx]; 1508 dst[0] = va; 1509 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); 1510 dst[2] = range->size; 1511 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | 1512 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | 1513 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | 1514 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | 1515 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | 1516 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); 1517 cmd_buffer->push_constant_stages |= 1518 set->layout->dynamic_shader_stages; 1519 } 1520 } 1521 1522 assert(cmd_buffer->cs->cdw <= cdw_max); 1523} 1524 1525void radv_CmdPushConstants(VkCommandBuffer commandBuffer, 1526 VkPipelineLayout layout, 1527 VkShaderStageFlags stageFlags, 1528 uint32_t offset, 1529 uint32_t size, 1530 const void* pValues) 1531{ 1532 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1533 memcpy(cmd_buffer->push_constants + offset, pValues, size); 1534 cmd_buffer->push_constant_stages |= stageFlags; 1535} 1536 1537VkResult radv_EndCommandBuffer( 1538 VkCommandBuffer commandBuffer) 1539{ 1540 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1541 1542 si_emit_cache_flush(cmd_buffer); 1543 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) || 1544 cmd_buffer->record_fail) 1545 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1546 return VK_SUCCESS; 1547} 1548 1549static void 1550radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) 1551{ 1552 struct radeon_winsys *ws = cmd_buffer->device->ws; 1553 struct radv_shader_variant *compute_shader; 1554 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 1555 uint64_t va; 1556 1557 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) 1558 return; 1559 1560 cmd_buffer->state.emitted_compute_pipeline = pipeline; 1561 1562 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 1563 va = ws->buffer_get_va(compute_shader->bo); 1564 1565 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8); 1566 1567 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1568 cmd_buffer->cs, 16); 1569 1570 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2); 1571 radeon_emit(cmd_buffer->cs, va >> 8); 1572 radeon_emit(cmd_buffer->cs, va >> 40); 1573 1574 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2); 1575 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1); 1576 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2); 1577 1578 /* change these once we have scratch support */ 1579 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE, 1580 S_00B860_WAVES(32) | S_00B860_WAVESIZE(0)); 1581 1582 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 1583 radeon_emit(cmd_buffer->cs, 1584 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0])); 1585 radeon_emit(cmd_buffer->cs, 1586 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1])); 1587 radeon_emit(cmd_buffer->cs, 1588 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2])); 1589 1590 assert(cmd_buffer->cs->cdw <= cdw_max); 1591} 1592 1593 1594void radv_CmdBindPipeline( 1595 VkCommandBuffer commandBuffer, 1596 VkPipelineBindPoint pipelineBindPoint, 1597 VkPipeline _pipeline) 1598{ 1599 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1600 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline); 1601 1602 for (unsigned i = 0; i < MAX_SETS; i++) { 1603 if (cmd_buffer->state.descriptors[i]) 1604 cmd_buffer->state.descriptors_dirty |= (1 << i); 1605 } 1606 1607 switch (pipelineBindPoint) { 1608 case VK_PIPELINE_BIND_POINT_COMPUTE: 1609 cmd_buffer->state.compute_pipeline = pipeline; 1610 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 1611 break; 1612 case VK_PIPELINE_BIND_POINT_GRAPHICS: 1613 cmd_buffer->state.pipeline = pipeline; 1614 cmd_buffer->state.vertex_descriptors_dirty = true; 1615 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1616 cmd_buffer->push_constant_stages |= pipeline->active_stages; 1617 1618 /* Apply the dynamic state from the pipeline */ 1619 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask; 1620 radv_dynamic_state_copy(&cmd_buffer->state.dynamic, 1621 &pipeline->dynamic_state, 1622 pipeline->dynamic_state_mask); 1623 break; 1624 default: 1625 assert(!"invalid bind point"); 1626 break; 1627 } 1628} 1629 1630void radv_CmdSetViewport( 1631 VkCommandBuffer commandBuffer, 1632 uint32_t firstViewport, 1633 uint32_t viewportCount, 1634 const VkViewport* pViewports) 1635{ 1636 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1637 1638 const uint32_t total_count = firstViewport + viewportCount; 1639 if (cmd_buffer->state.dynamic.viewport.count < total_count) 1640 cmd_buffer->state.dynamic.viewport.count = total_count; 1641 1642 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport, 1643 pViewports, viewportCount * sizeof(*pViewports)); 1644 1645 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT; 1646} 1647 1648void radv_CmdSetScissor( 1649 VkCommandBuffer commandBuffer, 1650 uint32_t firstScissor, 1651 uint32_t scissorCount, 1652 const VkRect2D* pScissors) 1653{ 1654 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1655 1656 const uint32_t total_count = firstScissor + scissorCount; 1657 if (cmd_buffer->state.dynamic.scissor.count < total_count) 1658 cmd_buffer->state.dynamic.scissor.count = total_count; 1659 1660 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor, 1661 pScissors, scissorCount * sizeof(*pScissors)); 1662 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR; 1663} 1664 1665void radv_CmdSetLineWidth( 1666 VkCommandBuffer commandBuffer, 1667 float lineWidth) 1668{ 1669 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1670 cmd_buffer->state.dynamic.line_width = lineWidth; 1671 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; 1672} 1673 1674void radv_CmdSetDepthBias( 1675 VkCommandBuffer commandBuffer, 1676 float depthBiasConstantFactor, 1677 float depthBiasClamp, 1678 float depthBiasSlopeFactor) 1679{ 1680 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1681 1682 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; 1683 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; 1684 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; 1685 1686 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 1687} 1688 1689void radv_CmdSetBlendConstants( 1690 VkCommandBuffer commandBuffer, 1691 const float blendConstants[4]) 1692{ 1693 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1694 1695 memcpy(cmd_buffer->state.dynamic.blend_constants, 1696 blendConstants, sizeof(float) * 4); 1697 1698 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; 1699} 1700 1701void radv_CmdSetDepthBounds( 1702 VkCommandBuffer commandBuffer, 1703 float minDepthBounds, 1704 float maxDepthBounds) 1705{ 1706 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1707 1708 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; 1709 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; 1710 1711 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; 1712} 1713 1714void radv_CmdSetStencilCompareMask( 1715 VkCommandBuffer commandBuffer, 1716 VkStencilFaceFlags faceMask, 1717 uint32_t compareMask) 1718{ 1719 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1720 1721 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1722 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; 1723 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1724 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; 1725 1726 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; 1727} 1728 1729void radv_CmdSetStencilWriteMask( 1730 VkCommandBuffer commandBuffer, 1731 VkStencilFaceFlags faceMask, 1732 uint32_t writeMask) 1733{ 1734 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1735 1736 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1737 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; 1738 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1739 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; 1740 1741 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; 1742} 1743 1744void radv_CmdSetStencilReference( 1745 VkCommandBuffer commandBuffer, 1746 VkStencilFaceFlags faceMask, 1747 uint32_t reference) 1748{ 1749 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1750 1751 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1752 cmd_buffer->state.dynamic.stencil_reference.front = reference; 1753 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1754 cmd_buffer->state.dynamic.stencil_reference.back = reference; 1755 1756 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE; 1757} 1758 1759 1760void radv_CmdExecuteCommands( 1761 VkCommandBuffer commandBuffer, 1762 uint32_t commandBufferCount, 1763 const VkCommandBuffer* pCmdBuffers) 1764{ 1765 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer); 1766 1767 for (uint32_t i = 0; i < commandBufferCount; i++) { 1768 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); 1769 1770 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); 1771 } 1772 1773 /* if we execute secondary we need to re-emit out pipelines */ 1774 if (commandBufferCount) { 1775 primary->state.emitted_pipeline = NULL; 1776 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1777 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL; 1778 } 1779} 1780 1781VkResult radv_CreateCommandPool( 1782 VkDevice _device, 1783 const VkCommandPoolCreateInfo* pCreateInfo, 1784 const VkAllocationCallbacks* pAllocator, 1785 VkCommandPool* pCmdPool) 1786{ 1787 RADV_FROM_HANDLE(radv_device, device, _device); 1788 struct radv_cmd_pool *pool; 1789 1790 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, 1791 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1792 if (pool == NULL) 1793 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1794 1795 if (pAllocator) 1796 pool->alloc = *pAllocator; 1797 else 1798 pool->alloc = device->alloc; 1799 1800 list_inithead(&pool->cmd_buffers); 1801 1802 pool->queue_family_index = pCreateInfo->queueFamilyIndex; 1803 1804 *pCmdPool = radv_cmd_pool_to_handle(pool); 1805 1806 return VK_SUCCESS; 1807 1808} 1809 1810void radv_DestroyCommandPool( 1811 VkDevice _device, 1812 VkCommandPool commandPool, 1813 const VkAllocationCallbacks* pAllocator) 1814{ 1815 RADV_FROM_HANDLE(radv_device, device, _device); 1816 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1817 1818 if (!pool) 1819 return; 1820 1821 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, 1822 &pool->cmd_buffers, pool_link) { 1823 radv_cmd_buffer_destroy(cmd_buffer); 1824 } 1825 1826 vk_free2(&device->alloc, pAllocator, pool); 1827} 1828 1829VkResult radv_ResetCommandPool( 1830 VkDevice device, 1831 VkCommandPool commandPool, 1832 VkCommandPoolResetFlags flags) 1833{ 1834 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1835 1836 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer, 1837 &pool->cmd_buffers, pool_link) { 1838 radv_reset_cmd_buffer(cmd_buffer); 1839 } 1840 1841 return VK_SUCCESS; 1842} 1843 1844void radv_CmdBeginRenderPass( 1845 VkCommandBuffer commandBuffer, 1846 const VkRenderPassBeginInfo* pRenderPassBegin, 1847 VkSubpassContents contents) 1848{ 1849 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1850 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); 1851 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); 1852 1853 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1854 cmd_buffer->cs, 2048); 1855 1856 cmd_buffer->state.framebuffer = framebuffer; 1857 cmd_buffer->state.pass = pass; 1858 cmd_buffer->state.render_area = pRenderPassBegin->renderArea; 1859 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin); 1860 1861 si_emit_cache_flush(cmd_buffer); 1862 1863 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); 1864 assert(cmd_buffer->cs->cdw <= cdw_max); 1865 1866 radv_cmd_buffer_clear_subpass(cmd_buffer); 1867} 1868 1869void radv_CmdNextSubpass( 1870 VkCommandBuffer commandBuffer, 1871 VkSubpassContents contents) 1872{ 1873 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1874 1875 si_emit_cache_flush(cmd_buffer); 1876 radv_cmd_buffer_resolve_subpass(cmd_buffer); 1877 1878 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1879 2048); 1880 1881 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); 1882 radv_cmd_buffer_clear_subpass(cmd_buffer); 1883} 1884 1885void radv_CmdDraw( 1886 VkCommandBuffer commandBuffer, 1887 uint32_t vertexCount, 1888 uint32_t instanceCount, 1889 uint32_t firstVertex, 1890 uint32_t firstInstance) 1891{ 1892 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1893 radv_cmd_buffer_flush_state(cmd_buffer); 1894 1895 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9); 1896 1897 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 1898 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 1899 if (loc->sgpr_idx != -1) { 1900 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 1901 radeon_emit(cmd_buffer->cs, firstVertex); 1902 radeon_emit(cmd_buffer->cs, firstInstance); 1903 } 1904 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1905 radeon_emit(cmd_buffer->cs, instanceCount); 1906 1907 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0)); 1908 radeon_emit(cmd_buffer->cs, vertexCount); 1909 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 1910 S_0287F0_USE_OPAQUE(0)); 1911 1912 assert(cmd_buffer->cs->cdw <= cdw_max); 1913} 1914 1915static void radv_emit_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) 1916{ 1917 uint32_t primitive_reset_index = cmd_buffer->state.last_primitive_reset_index ? 0xffffffffu : 0xffffu; 1918 1919 if (cmd_buffer->state.pipeline->graphics.prim_restart_enable && 1920 primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) { 1921 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index; 1922 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 1923 primitive_reset_index); 1924 } 1925} 1926 1927void radv_CmdDrawIndexed( 1928 VkCommandBuffer commandBuffer, 1929 uint32_t indexCount, 1930 uint32_t instanceCount, 1931 uint32_t firstIndex, 1932 int32_t vertexOffset, 1933 uint32_t firstInstance) 1934{ 1935 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1936 int index_size = cmd_buffer->state.index_type ? 4 : 2; 1937 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 1938 uint64_t index_va; 1939 1940 radv_cmd_buffer_flush_state(cmd_buffer); 1941 radv_emit_primitive_reset_index(cmd_buffer); 1942 1943 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14); 1944 1945 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 1946 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 1947 1948 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 1949 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 1950 if (loc->sgpr_idx != -1) { 1951 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 1952 radeon_emit(cmd_buffer->cs, vertexOffset); 1953 radeon_emit(cmd_buffer->cs, firstInstance); 1954 } 1955 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1956 radeon_emit(cmd_buffer->cs, instanceCount); 1957 1958 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 1959 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 1960 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false)); 1961 radeon_emit(cmd_buffer->cs, index_max_size); 1962 radeon_emit(cmd_buffer->cs, index_va); 1963 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF); 1964 radeon_emit(cmd_buffer->cs, indexCount); 1965 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA); 1966 1967 assert(cmd_buffer->cs->cdw <= cdw_max); 1968} 1969 1970static void 1971radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer, 1972 VkBuffer _buffer, 1973 VkDeviceSize offset, 1974 VkBuffer _count_buffer, 1975 VkDeviceSize count_offset, 1976 uint32_t draw_count, 1977 uint32_t stride, 1978 bool indexed) 1979{ 1980 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 1981 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer); 1982 struct radeon_winsys_cs *cs = cmd_buffer->cs; 1983 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA 1984 : V_0287F0_DI_SRC_SEL_AUTO_INDEX; 1985 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 1986 indirect_va += offset + buffer->offset; 1987 uint64_t count_va = 0; 1988 1989 if (count_buffer) { 1990 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo); 1991 count_va += count_offset + count_buffer->offset; 1992 } 1993 1994 if (!draw_count) 1995 return; 1996 1997 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8); 1998 1999 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 2000 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 2001 assert(loc->sgpr_idx != -1); 2002 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); 2003 radeon_emit(cs, 1); 2004 radeon_emit(cs, indirect_va); 2005 radeon_emit(cs, indirect_va >> 32); 2006 2007 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : 2008 PKT3_DRAW_INDIRECT_MULTI, 2009 8, false)); 2010 radeon_emit(cs, 0); 2011 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2); 2012 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2); 2013 radeon_emit(cs, S_2C3_COUNT_INDIRECT_ENABLE(!!count_va)); /* draw_index and count_indirect enable */ 2014 radeon_emit(cs, draw_count); /* count */ 2015 radeon_emit(cs, count_va); /* count_addr */ 2016 radeon_emit(cs, count_va >> 32); 2017 radeon_emit(cs, stride); /* stride */ 2018 radeon_emit(cs, di_src_sel); 2019} 2020 2021static void 2022radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer, 2023 VkBuffer buffer, 2024 VkDeviceSize offset, 2025 VkBuffer countBuffer, 2026 VkDeviceSize countBufferOffset, 2027 uint32_t maxDrawCount, 2028 uint32_t stride) 2029{ 2030 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2031 radv_cmd_buffer_flush_state(cmd_buffer); 2032 2033 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 2034 cmd_buffer->cs, 14); 2035 2036 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2037 countBuffer, countBufferOffset, maxDrawCount, stride, false); 2038 2039 assert(cmd_buffer->cs->cdw <= cdw_max); 2040} 2041 2042static void 2043radv_cmd_draw_indexed_indirect_count( 2044 VkCommandBuffer commandBuffer, 2045 VkBuffer buffer, 2046 VkDeviceSize offset, 2047 VkBuffer countBuffer, 2048 VkDeviceSize countBufferOffset, 2049 uint32_t maxDrawCount, 2050 uint32_t stride) 2051{ 2052 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2053 int index_size = cmd_buffer->state.index_type ? 4 : 2; 2054 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 2055 uint64_t index_va; 2056 radv_cmd_buffer_flush_state(cmd_buffer); 2057 radv_emit_primitive_reset_index(cmd_buffer); 2058 2059 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 2060 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 2061 2062 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21); 2063 2064 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 2065 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 2066 2067 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0)); 2068 radeon_emit(cmd_buffer->cs, index_va); 2069 radeon_emit(cmd_buffer->cs, index_va >> 32); 2070 2071 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0)); 2072 radeon_emit(cmd_buffer->cs, index_max_size); 2073 2074 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2075 countBuffer, countBufferOffset, maxDrawCount, stride, true); 2076 2077 assert(cmd_buffer->cs->cdw <= cdw_max); 2078} 2079 2080void radv_CmdDrawIndirect( 2081 VkCommandBuffer commandBuffer, 2082 VkBuffer buffer, 2083 VkDeviceSize offset, 2084 uint32_t drawCount, 2085 uint32_t stride) 2086{ 2087 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2088 VK_NULL_HANDLE, 0, drawCount, stride); 2089} 2090 2091void radv_CmdDrawIndexedIndirect( 2092 VkCommandBuffer commandBuffer, 2093 VkBuffer buffer, 2094 VkDeviceSize offset, 2095 uint32_t drawCount, 2096 uint32_t stride) 2097{ 2098 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2099 VK_NULL_HANDLE, 0, drawCount, stride); 2100} 2101 2102void radv_CmdDrawIndirectCountAMD( 2103 VkCommandBuffer commandBuffer, 2104 VkBuffer buffer, 2105 VkDeviceSize offset, 2106 VkBuffer countBuffer, 2107 VkDeviceSize countBufferOffset, 2108 uint32_t maxDrawCount, 2109 uint32_t stride) 2110{ 2111 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2112 countBuffer, countBufferOffset, 2113 maxDrawCount, stride); 2114} 2115 2116void radv_CmdDrawIndexedIndirectCountAMD( 2117 VkCommandBuffer commandBuffer, 2118 VkBuffer buffer, 2119 VkDeviceSize offset, 2120 VkBuffer countBuffer, 2121 VkDeviceSize countBufferOffset, 2122 uint32_t maxDrawCount, 2123 uint32_t stride) 2124{ 2125 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2126 countBuffer, countBufferOffset, 2127 maxDrawCount, stride); 2128} 2129 2130static void 2131radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer) 2132{ 2133 radv_emit_compute_pipeline(cmd_buffer); 2134 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline, 2135 VK_SHADER_STAGE_COMPUTE_BIT); 2136 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline, 2137 VK_SHADER_STAGE_COMPUTE_BIT); 2138 si_emit_cache_flush(cmd_buffer); 2139} 2140 2141void radv_CmdDispatch( 2142 VkCommandBuffer commandBuffer, 2143 uint32_t x, 2144 uint32_t y, 2145 uint32_t z) 2146{ 2147 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2148 2149 radv_flush_compute_state(cmd_buffer); 2150 2151 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10); 2152 2153 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2154 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2155 if (loc->sgpr_idx != -1) { 2156 assert(!loc->indirect); 2157 assert(loc->num_sgprs == 3); 2158 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2159 radeon_emit(cmd_buffer->cs, x); 2160 radeon_emit(cmd_buffer->cs, y); 2161 radeon_emit(cmd_buffer->cs, z); 2162 } 2163 2164 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2165 PKT3_SHADER_TYPE_S(1)); 2166 radeon_emit(cmd_buffer->cs, x); 2167 radeon_emit(cmd_buffer->cs, y); 2168 radeon_emit(cmd_buffer->cs, z); 2169 radeon_emit(cmd_buffer->cs, 1); 2170 2171 assert(cmd_buffer->cs->cdw <= cdw_max); 2172} 2173 2174void radv_CmdDispatchIndirect( 2175 VkCommandBuffer commandBuffer, 2176 VkBuffer _buffer, 2177 VkDeviceSize offset) 2178{ 2179 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2180 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2181 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2182 va += buffer->offset + offset; 2183 2184 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 2185 2186 radv_flush_compute_state(cmd_buffer); 2187 2188 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25); 2189 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2190 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2191 if (loc->sgpr_idx != -1) { 2192 for (unsigned i = 0; i < 3; ++i) { 2193 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 2194 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 2195 COPY_DATA_DST_SEL(COPY_DATA_REG)); 2196 radeon_emit(cmd_buffer->cs, (va + 4 * i)); 2197 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32); 2198 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i); 2199 radeon_emit(cmd_buffer->cs, 0); 2200 } 2201 } 2202 2203 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) | 2204 PKT3_SHADER_TYPE_S(1)); 2205 radeon_emit(cmd_buffer->cs, 1); 2206 radeon_emit(cmd_buffer->cs, va); 2207 radeon_emit(cmd_buffer->cs, va >> 32); 2208 2209 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) | 2210 PKT3_SHADER_TYPE_S(1)); 2211 radeon_emit(cmd_buffer->cs, 0); 2212 radeon_emit(cmd_buffer->cs, 1); 2213 2214 assert(cmd_buffer->cs->cdw <= cdw_max); 2215} 2216 2217void radv_unaligned_dispatch( 2218 struct radv_cmd_buffer *cmd_buffer, 2219 uint32_t x, 2220 uint32_t y, 2221 uint32_t z) 2222{ 2223 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 2224 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 2225 uint32_t blocks[3], remainder[3]; 2226 2227 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]); 2228 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]); 2229 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]); 2230 2231 /* If aligned, these should be an entire block size, not 0 */ 2232 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]); 2233 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]); 2234 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]); 2235 2236 radv_flush_compute_state(cmd_buffer); 2237 2238 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15); 2239 2240 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 2241 radeon_emit(cmd_buffer->cs, 2242 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) | 2243 S_00B81C_NUM_THREAD_PARTIAL(remainder[0])); 2244 radeon_emit(cmd_buffer->cs, 2245 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) | 2246 S_00B81C_NUM_THREAD_PARTIAL(remainder[1])); 2247 radeon_emit(cmd_buffer->cs, 2248 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) | 2249 S_00B81C_NUM_THREAD_PARTIAL(remainder[2])); 2250 2251 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2252 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2253 if (loc->sgpr_idx != -1) { 2254 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2255 radeon_emit(cmd_buffer->cs, blocks[0]); 2256 radeon_emit(cmd_buffer->cs, blocks[1]); 2257 radeon_emit(cmd_buffer->cs, blocks[2]); 2258 } 2259 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2260 PKT3_SHADER_TYPE_S(1)); 2261 radeon_emit(cmd_buffer->cs, blocks[0]); 2262 radeon_emit(cmd_buffer->cs, blocks[1]); 2263 radeon_emit(cmd_buffer->cs, blocks[2]); 2264 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) | 2265 S_00B800_PARTIAL_TG_EN(1)); 2266 2267 assert(cmd_buffer->cs->cdw <= cdw_max); 2268} 2269 2270void radv_CmdEndRenderPass( 2271 VkCommandBuffer commandBuffer) 2272{ 2273 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2274 2275 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); 2276 2277 si_emit_cache_flush(cmd_buffer); 2278 radv_cmd_buffer_resolve_subpass(cmd_buffer); 2279 2280 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { 2281 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; 2282 radv_handle_subpass_image_transition(cmd_buffer, 2283 (VkAttachmentReference){i, layout}); 2284 } 2285 2286 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); 2287 2288 cmd_buffer->state.pass = NULL; 2289 cmd_buffer->state.subpass = NULL; 2290 cmd_buffer->state.attachments = NULL; 2291 cmd_buffer->state.framebuffer = NULL; 2292} 2293 2294 2295static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, 2296 struct radv_image *image) 2297{ 2298 2299 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2300 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2301 2302 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->htile.offset, 2303 image->htile.size, 0xffffffff); 2304 2305 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META | 2306 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2307 RADV_CMD_FLAG_INV_VMEM_L1 | 2308 RADV_CMD_FLAG_INV_GLOBAL_L2; 2309} 2310 2311static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, 2312 struct radv_image *image, 2313 VkImageLayout src_layout, 2314 VkImageLayout dst_layout, 2315 VkImageSubresourceRange range, 2316 VkImageAspectFlags pending_clears) 2317{ 2318 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL && 2319 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) && 2320 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 && 2321 cmd_buffer->state.render_area.extent.width == image->extent.width && 2322 cmd_buffer->state.render_area.extent.height == image->extent.height) { 2323 /* The clear will initialize htile. */ 2324 return; 2325 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && 2326 radv_layout_has_htile(image, dst_layout)) { 2327 /* TODO: merge with the clear if applicable */ 2328 radv_initialize_htile(cmd_buffer, image); 2329 } else if (!radv_layout_has_htile(image, src_layout) && 2330 radv_layout_has_htile(image, dst_layout)) { 2331 radv_initialize_htile(cmd_buffer, image); 2332 } else if ((radv_layout_has_htile(image, src_layout) && 2333 !radv_layout_has_htile(image, dst_layout)) || 2334 (radv_layout_is_htile_compressed(image, src_layout) && 2335 !radv_layout_is_htile_compressed(image, dst_layout))) { 2336 2337 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; 2338 range.baseMipLevel = 0; 2339 range.levelCount = 1; 2340 2341 radv_decompress_depth_image_inplace(cmd_buffer, image, &range); 2342 } 2343} 2344 2345void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, 2346 struct radv_image *image, uint32_t value) 2347{ 2348 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2349 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2350 2351 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset, 2352 image->cmask.size, value); 2353 2354 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2355 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2356 RADV_CMD_FLAG_INV_VMEM_L1 | 2357 RADV_CMD_FLAG_INV_GLOBAL_L2; 2358} 2359 2360static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer, 2361 struct radv_image *image, 2362 VkImageLayout src_layout, 2363 VkImageLayout dst_layout, 2364 VkImageSubresourceRange range, 2365 VkImageAspectFlags pending_clears) 2366{ 2367 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2368 if (image->fmask.size) 2369 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu); 2370 else 2371 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu); 2372 } else if (radv_layout_has_cmask(image, src_layout) && 2373 !radv_layout_has_cmask(image, dst_layout)) { 2374 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2375 } 2376} 2377 2378void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, 2379 struct radv_image *image, uint32_t value) 2380{ 2381 2382 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2383 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2384 2385 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset, 2386 image->surface.dcc_size, value); 2387 2388 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2389 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2390 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2391 RADV_CMD_FLAG_INV_VMEM_L1 | 2392 RADV_CMD_FLAG_INV_GLOBAL_L2; 2393} 2394 2395static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer, 2396 struct radv_image *image, 2397 VkImageLayout src_layout, 2398 VkImageLayout dst_layout, 2399 VkImageSubresourceRange range, 2400 VkImageAspectFlags pending_clears) 2401{ 2402 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2403 radv_initialize_dcc(cmd_buffer, image, 0x20202020u); 2404 } else if(src_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && 2405 dst_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 2406 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2407 } 2408} 2409 2410static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 2411 struct radv_image *image, 2412 VkImageLayout src_layout, 2413 VkImageLayout dst_layout, 2414 VkImageSubresourceRange range, 2415 VkImageAspectFlags pending_clears) 2416{ 2417 if (image->htile.size) 2418 radv_handle_depth_image_transition(cmd_buffer, image, src_layout, 2419 dst_layout, range, pending_clears); 2420 2421 if (image->cmask.size) 2422 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout, 2423 dst_layout, range, pending_clears); 2424 2425 if (image->surface.dcc_size) 2426 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout, 2427 dst_layout, range, pending_clears); 2428} 2429 2430void radv_CmdPipelineBarrier( 2431 VkCommandBuffer commandBuffer, 2432 VkPipelineStageFlags srcStageMask, 2433 VkPipelineStageFlags destStageMask, 2434 VkBool32 byRegion, 2435 uint32_t memoryBarrierCount, 2436 const VkMemoryBarrier* pMemoryBarriers, 2437 uint32_t bufferMemoryBarrierCount, 2438 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2439 uint32_t imageMemoryBarrierCount, 2440 const VkImageMemoryBarrier* pImageMemoryBarriers) 2441{ 2442 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2443 VkAccessFlags src_flags = 0; 2444 VkAccessFlags dst_flags = 0; 2445 uint32_t b; 2446 for (uint32_t i = 0; i < memoryBarrierCount; i++) { 2447 src_flags |= pMemoryBarriers[i].srcAccessMask; 2448 dst_flags |= pMemoryBarriers[i].dstAccessMask; 2449 } 2450 2451 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) { 2452 src_flags |= pBufferMemoryBarriers[i].srcAccessMask; 2453 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask; 2454 } 2455 2456 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2457 src_flags |= pImageMemoryBarriers[i].srcAccessMask; 2458 dst_flags |= pImageMemoryBarriers[i].dstAccessMask; 2459 } 2460 2461 enum radv_cmd_flush_bits flush_bits = 0; 2462 for_each_bit(b, src_flags) { 2463 switch ((VkAccessFlagBits)(1 << b)) { 2464 case VK_ACCESS_SHADER_WRITE_BIT: 2465 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2466 break; 2467 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: 2468 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2469 break; 2470 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: 2471 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB; 2472 break; 2473 case VK_ACCESS_TRANSFER_WRITE_BIT: 2474 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2475 break; 2476 default: 2477 break; 2478 } 2479 } 2480 cmd_buffer->state.flush_bits |= flush_bits; 2481 2482 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2483 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2484 radv_handle_image_transition(cmd_buffer, image, 2485 pImageMemoryBarriers[i].oldLayout, 2486 pImageMemoryBarriers[i].newLayout, 2487 pImageMemoryBarriers[i].subresourceRange, 2488 0); 2489 } 2490 2491 flush_bits = 0; 2492 2493 for_each_bit(b, dst_flags) { 2494 switch ((VkAccessFlagBits)(1 << b)) { 2495 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: 2496 case VK_ACCESS_INDEX_READ_BIT: 2497 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: 2498 case VK_ACCESS_UNIFORM_READ_BIT: 2499 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; 2500 break; 2501 case VK_ACCESS_SHADER_READ_BIT: 2502 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2503 break; 2504 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: 2505 case VK_ACCESS_TRANSFER_READ_BIT: 2506 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: 2507 flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | RADV_CMD_FLAG_INV_GLOBAL_L2; 2508 default: 2509 break; 2510 } 2511 } 2512 2513 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2514 RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 2515 2516 cmd_buffer->state.flush_bits |= flush_bits; 2517} 2518 2519 2520static void write_event(struct radv_cmd_buffer *cmd_buffer, 2521 struct radv_event *event, 2522 VkPipelineStageFlags stageMask, 2523 unsigned value) 2524{ 2525 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2526 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2527 2528 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2529 2530 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12); 2531 2532 /* TODO: this is overkill. Probably should figure something out from 2533 * the stage mask. */ 2534 2535 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class == CIK) { 2536 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2537 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2538 EVENT_INDEX(5)); 2539 radeon_emit(cs, va); 2540 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2541 radeon_emit(cs, 2); 2542 radeon_emit(cs, 0); 2543 } 2544 2545 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2546 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2547 EVENT_INDEX(5)); 2548 radeon_emit(cs, va); 2549 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2550 radeon_emit(cs, value); 2551 radeon_emit(cs, 0); 2552 2553 assert(cmd_buffer->cs->cdw <= cdw_max); 2554} 2555 2556void radv_CmdSetEvent(VkCommandBuffer commandBuffer, 2557 VkEvent _event, 2558 VkPipelineStageFlags stageMask) 2559{ 2560 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2561 RADV_FROM_HANDLE(radv_event, event, _event); 2562 2563 write_event(cmd_buffer, event, stageMask, 1); 2564} 2565 2566void radv_CmdResetEvent(VkCommandBuffer commandBuffer, 2567 VkEvent _event, 2568 VkPipelineStageFlags stageMask) 2569{ 2570 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2571 RADV_FROM_HANDLE(radv_event, event, _event); 2572 2573 write_event(cmd_buffer, event, stageMask, 0); 2574} 2575 2576void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, 2577 uint32_t eventCount, 2578 const VkEvent* pEvents, 2579 VkPipelineStageFlags srcStageMask, 2580 VkPipelineStageFlags dstStageMask, 2581 uint32_t memoryBarrierCount, 2582 const VkMemoryBarrier* pMemoryBarriers, 2583 uint32_t bufferMemoryBarrierCount, 2584 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2585 uint32_t imageMemoryBarrierCount, 2586 const VkImageMemoryBarrier* pImageMemoryBarriers) 2587{ 2588 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2589 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2590 2591 for (unsigned i = 0; i < eventCount; ++i) { 2592 RADV_FROM_HANDLE(radv_event, event, pEvents[i]); 2593 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2594 2595 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2596 2597 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); 2598 2599 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); 2600 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); 2601 radeon_emit(cs, va); 2602 radeon_emit(cs, va >> 32); 2603 radeon_emit(cs, 1); /* reference value */ 2604 radeon_emit(cs, 0xffffffff); /* mask */ 2605 radeon_emit(cs, 4); /* poll interval */ 2606 2607 assert(cmd_buffer->cs->cdw <= cdw_max); 2608 } 2609 2610 2611 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2612 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2613 2614 radv_handle_image_transition(cmd_buffer, image, 2615 pImageMemoryBarriers[i].oldLayout, 2616 pImageMemoryBarriers[i].newLayout, 2617 pImageMemoryBarriers[i].subresourceRange, 2618 0); 2619 } 2620 2621 /* TODO: figure out how to do memory barriers without waiting */ 2622 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 2623 RADV_CMD_FLAG_INV_GLOBAL_L2 | 2624 RADV_CMD_FLAG_INV_VMEM_L1 | 2625 RADV_CMD_FLAG_INV_SMEM_L1; 2626} 2627