radv_cmd_buffer.c revision 8406f79d6a3511dc6bf4d9c2ab3a80828c363d3a
1/* 2 * Copyright © 2016 Red Hat. 3 * Copyright © 2016 Bas Nieuwenhuizen 4 * 5 * based in part on anv driver which is: 6 * Copyright © 2015 Intel Corporation 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * IN THE SOFTWARE. 26 */ 27 28#include "radv_private.h" 29#include "radv_radeon_winsys.h" 30#include "radv_cs.h" 31#include "sid.h" 32#include "vk_format.h" 33#include "radv_meta.h" 34 35#include "ac_debug.h" 36 37static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 38 struct radv_image *image, 39 VkImageLayout src_layout, 40 VkImageLayout dst_layout, 41 int src_family, 42 int dst_family, 43 VkImageSubresourceRange range, 44 VkImageAspectFlags pending_clears); 45 46const struct radv_dynamic_state default_dynamic_state = { 47 .viewport = { 48 .count = 0, 49 }, 50 .scissor = { 51 .count = 0, 52 }, 53 .line_width = 1.0f, 54 .depth_bias = { 55 .bias = 0.0f, 56 .clamp = 0.0f, 57 .slope = 0.0f, 58 }, 59 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f }, 60 .depth_bounds = { 61 .min = 0.0f, 62 .max = 1.0f, 63 }, 64 .stencil_compare_mask = { 65 .front = ~0u, 66 .back = ~0u, 67 }, 68 .stencil_write_mask = { 69 .front = ~0u, 70 .back = ~0u, 71 }, 72 .stencil_reference = { 73 .front = 0u, 74 .back = 0u, 75 }, 76}; 77 78void 79radv_dynamic_state_copy(struct radv_dynamic_state *dest, 80 const struct radv_dynamic_state *src, 81 uint32_t copy_mask) 82{ 83 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { 84 dest->viewport.count = src->viewport.count; 85 typed_memcpy(dest->viewport.viewports, src->viewport.viewports, 86 src->viewport.count); 87 } 88 89 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) { 90 dest->scissor.count = src->scissor.count; 91 typed_memcpy(dest->scissor.scissors, src->scissor.scissors, 92 src->scissor.count); 93 } 94 95 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) 96 dest->line_width = src->line_width; 97 98 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) 99 dest->depth_bias = src->depth_bias; 100 101 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) 102 typed_memcpy(dest->blend_constants, src->blend_constants, 4); 103 104 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) 105 dest->depth_bounds = src->depth_bounds; 106 107 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) 108 dest->stencil_compare_mask = src->stencil_compare_mask; 109 110 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) 111 dest->stencil_write_mask = src->stencil_write_mask; 112 113 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) 114 dest->stencil_reference = src->stencil_reference; 115} 116 117bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) 118{ 119 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && 120 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; 121} 122 123enum ring_type radv_queue_family_to_ring(int f) { 124 switch (f) { 125 case RADV_QUEUE_GENERAL: 126 return RING_GFX; 127 case RADV_QUEUE_COMPUTE: 128 return RING_COMPUTE; 129 case RADV_QUEUE_TRANSFER: 130 return RING_DMA; 131 default: 132 unreachable("Unknown queue family"); 133 } 134} 135 136static VkResult radv_create_cmd_buffer( 137 struct radv_device * device, 138 struct radv_cmd_pool * pool, 139 VkCommandBufferLevel level, 140 VkCommandBuffer* pCommandBuffer) 141{ 142 struct radv_cmd_buffer *cmd_buffer; 143 VkResult result; 144 unsigned ring; 145 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8, 146 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 147 if (cmd_buffer == NULL) 148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 149 150 memset(cmd_buffer, 0, sizeof(*cmd_buffer)); 151 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; 152 cmd_buffer->device = device; 153 cmd_buffer->pool = pool; 154 cmd_buffer->level = level; 155 156 if (pool) { 157 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); 158 cmd_buffer->queue_family_index = pool->queue_family_index; 159 160 } else { 161 /* Init the pool_link so we can safefly call list_del when we destroy 162 * the command buffer 163 */ 164 list_inithead(&cmd_buffer->pool_link); 165 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL; 166 } 167 168 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index); 169 170 cmd_buffer->cs = device->ws->cs_create(device->ws, ring); 171 if (!cmd_buffer->cs) { 172 result = VK_ERROR_OUT_OF_HOST_MEMORY; 173 goto fail; 174 } 175 176 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer); 177 178 cmd_buffer->upload.offset = 0; 179 cmd_buffer->upload.size = 0; 180 list_inithead(&cmd_buffer->upload.list); 181 182 return VK_SUCCESS; 183 184fail: 185 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 186 187 return result; 188} 189 190static bool 191radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, 192 uint64_t min_needed) 193{ 194 uint64_t new_size; 195 struct radeon_winsys_bo *bo; 196 struct radv_cmd_buffer_upload *upload; 197 struct radv_device *device = cmd_buffer->device; 198 199 new_size = MAX2(min_needed, 16 * 1024); 200 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size); 201 202 bo = device->ws->buffer_create(device->ws, 203 new_size, 4096, 204 RADEON_DOMAIN_GTT, 205 RADEON_FLAG_CPU_ACCESS); 206 207 if (!bo) { 208 cmd_buffer->record_fail = true; 209 return false; 210 } 211 212 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); 213 if (cmd_buffer->upload.upload_bo) { 214 upload = malloc(sizeof(*upload)); 215 216 if (!upload) { 217 cmd_buffer->record_fail = true; 218 device->ws->buffer_destroy(bo); 219 return false; 220 } 221 222 memcpy(upload, &cmd_buffer->upload, sizeof(*upload)); 223 list_add(&upload->list, &cmd_buffer->upload.list); 224 } 225 226 cmd_buffer->upload.upload_bo = bo; 227 cmd_buffer->upload.size = new_size; 228 cmd_buffer->upload.offset = 0; 229 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo); 230 231 if (!cmd_buffer->upload.map) { 232 cmd_buffer->record_fail = true; 233 return false; 234 } 235 236 return true; 237} 238 239bool 240radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, 241 unsigned size, 242 unsigned alignment, 243 unsigned *out_offset, 244 void **ptr) 245{ 246 uint64_t offset = align(cmd_buffer->upload.offset, alignment); 247 if (offset + size > cmd_buffer->upload.size) { 248 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) 249 return false; 250 offset = 0; 251 } 252 253 *out_offset = offset; 254 *ptr = cmd_buffer->upload.map + offset; 255 256 cmd_buffer->upload.offset = offset + size; 257 return true; 258} 259 260bool 261radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer, 262 unsigned size, unsigned alignment, 263 const void *data, unsigned *out_offset) 264{ 265 uint8_t *ptr; 266 267 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment, 268 out_offset, (void **)&ptr)) 269 return false; 270 271 if (ptr) 272 memcpy(ptr, data, size); 273 274 return true; 275} 276 277void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer) 278{ 279 struct radv_device *device = cmd_buffer->device; 280 struct radeon_winsys_cs *cs = cmd_buffer->cs; 281 uint64_t va; 282 283 if (!device->trace_bo) 284 return; 285 286 va = device->ws->buffer_get_va(device->trace_bo); 287 288 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7); 289 290 ++cmd_buffer->state.trace_id; 291 device->ws->cs_add_buffer(cs, device->trace_bo, 8); 292 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); 293 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 294 S_370_WR_CONFIRM(1) | 295 S_370_ENGINE_SEL(V_370_ME)); 296 radeon_emit(cs, va); 297 radeon_emit(cs, va >> 32); 298 radeon_emit(cs, cmd_buffer->state.trace_id); 299 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 300 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id)); 301} 302 303static void 304radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer, 305 struct radv_pipeline *pipeline) 306{ 307 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8); 308 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control, 309 8); 310 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control); 311 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask); 312} 313 314static void 315radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer, 316 struct radv_pipeline *pipeline) 317{ 318 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds; 319 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control); 320 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control); 321 322 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control); 323 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2); 324} 325 326/* 12.4 fixed-point */ 327static unsigned radv_pack_float_12p4(float x) 328{ 329 return x <= 0 ? 0 : 330 x >= 4096 ? 0xffff : x * 16; 331} 332 333static uint32_t 334shader_stage_to_user_data_0(gl_shader_stage stage) 335{ 336 switch (stage) { 337 case MESA_SHADER_FRAGMENT: 338 return R_00B030_SPI_SHADER_USER_DATA_PS_0; 339 case MESA_SHADER_VERTEX: 340 return R_00B130_SPI_SHADER_USER_DATA_VS_0; 341 case MESA_SHADER_COMPUTE: 342 return R_00B900_COMPUTE_USER_DATA_0; 343 default: 344 unreachable("unknown shader"); 345 } 346} 347 348static struct ac_userdata_info * 349radv_lookup_user_sgpr(struct radv_pipeline *pipeline, 350 gl_shader_stage stage, 351 int idx) 352{ 353 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx]; 354} 355 356static void 357radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer, 358 struct radv_pipeline *pipeline, 359 gl_shader_stage stage, 360 int idx, uint64_t va) 361{ 362 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); 363 uint32_t base_reg = shader_stage_to_user_data_0(stage); 364 if (loc->sgpr_idx == -1) 365 return; 366 assert(loc->num_sgprs == 2); 367 assert(!loc->indirect); 368 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2); 369 radeon_emit(cmd_buffer->cs, va); 370 radeon_emit(cmd_buffer->cs, va >> 32); 371} 372 373static void 374radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, 375 struct radv_pipeline *pipeline) 376{ 377 int num_samples = pipeline->graphics.ms.num_samples; 378 struct radv_multisample_state *ms = &pipeline->graphics.ms; 379 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; 380 381 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); 382 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]); 383 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]); 384 385 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa); 386 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); 387 388 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) 389 return; 390 391 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); 392 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); 393 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); 394 395 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); 396 397 uint32_t samples_offset; 398 void *samples_ptr; 399 void *src; 400 radv_cmd_buffer_upload_alloc(cmd_buffer, num_samples * 4 * 2, 256, &samples_offset, 401 &samples_ptr); 402 switch (num_samples) { 403 case 1: 404 src = cmd_buffer->device->sample_locations_1x; 405 break; 406 case 2: 407 src = cmd_buffer->device->sample_locations_2x; 408 break; 409 case 4: 410 src = cmd_buffer->device->sample_locations_4x; 411 break; 412 case 8: 413 src = cmd_buffer->device->sample_locations_8x; 414 break; 415 case 16: 416 src = cmd_buffer->device->sample_locations_16x; 417 break; 418 } 419 memcpy(samples_ptr, src, num_samples * 4 * 2); 420 421 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 422 va += samples_offset; 423 424 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 425 AC_UD_PS_SAMPLE_POS, va); 426} 427 428static void 429radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer, 430 struct radv_pipeline *pipeline) 431{ 432 struct radv_raster_state *raster = &pipeline->graphics.raster; 433 434 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL, 435 raster->pa_cl_clip_cntl); 436 437 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0, 438 raster->spi_interp_control); 439 440 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2); 441 radeon_emit(cmd_buffer->cs, 0); 442 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | 443 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */ 444 445 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL, 446 raster->pa_su_vtx_cntl); 447 448 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL, 449 raster->pa_su_sc_mode_cntl); 450} 451 452static void 453radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer, 454 struct radv_pipeline *pipeline) 455{ 456 struct radeon_winsys *ws = cmd_buffer->device->ws; 457 struct radv_shader_variant *vs; 458 uint64_t va; 459 unsigned export_count; 460 unsigned clip_dist_mask, cull_dist_mask, total_mask; 461 462 assert (pipeline->shaders[MESA_SHADER_VERTEX]); 463 464 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 465 va = ws->buffer_get_va(vs->bo); 466 ws->cs_add_buffer(cmd_buffer->cs, vs->bo, 8); 467 468 clip_dist_mask = vs->info.vs.clip_dist_mask; 469 cull_dist_mask = vs->info.vs.cull_dist_mask; 470 total_mask = clip_dist_mask | cull_dist_mask; 471 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, 0); 472 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0); 473 474 export_count = MAX2(1, vs->info.vs.param_exports); 475 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, 476 S_0286C4_VS_EXPORT_COUNT(export_count - 1)); 477 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT, 478 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | 479 S_02870C_POS1_EXPORT_FORMAT(vs->info.vs.pos_exports > 1 ? 480 V_02870C_SPI_SHADER_4COMP : 481 V_02870C_SPI_SHADER_NONE) | 482 S_02870C_POS2_EXPORT_FORMAT(vs->info.vs.pos_exports > 2 ? 483 V_02870C_SPI_SHADER_4COMP : 484 V_02870C_SPI_SHADER_NONE) | 485 S_02870C_POS3_EXPORT_FORMAT(vs->info.vs.pos_exports > 3 ? 486 V_02870C_SPI_SHADER_4COMP : 487 V_02870C_SPI_SHADER_NONE)); 488 489 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4); 490 radeon_emit(cmd_buffer->cs, va >> 8); 491 radeon_emit(cmd_buffer->cs, va >> 40); 492 radeon_emit(cmd_buffer->cs, vs->rsrc1); 493 radeon_emit(cmd_buffer->cs, vs->rsrc2); 494 495 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL, 496 S_028818_VTX_W0_FMT(1) | 497 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 498 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 499 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 500 501 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, 502 S_02881C_USE_VTX_POINT_SIZE(vs->info.vs.writes_pointsize) | 503 S_02881C_VS_OUT_MISC_VEC_ENA(vs->info.vs.writes_pointsize) | 504 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) | 505 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) | 506 pipeline->graphics.raster.pa_cl_vs_out_cntl | 507 cull_dist_mask << 8 | 508 clip_dist_mask); 509 510} 511 512 513 514static void 515radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer, 516 struct radv_pipeline *pipeline) 517{ 518 struct radeon_winsys *ws = cmd_buffer->device->ws; 519 struct radv_shader_variant *ps, *vs; 520 uint64_t va; 521 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); 522 struct radv_blend_state *blend = &pipeline->graphics.blend; 523 unsigned ps_offset = 0; 524 unsigned z_order; 525 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]); 526 527 ps = pipeline->shaders[MESA_SHADER_FRAGMENT]; 528 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 529 va = ws->buffer_get_va(ps->bo); 530 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8); 531 532 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4); 533 radeon_emit(cmd_buffer->cs, va >> 8); 534 radeon_emit(cmd_buffer->cs, va >> 40); 535 radeon_emit(cmd_buffer->cs, ps->rsrc1); 536 radeon_emit(cmd_buffer->cs, ps->rsrc2); 537 538 if (ps->info.fs.early_fragment_test || !ps->info.fs.writes_memory) 539 z_order = V_02880C_EARLY_Z_THEN_LATE_Z; 540 else 541 z_order = V_02880C_LATE_Z; 542 543 544 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL, 545 S_02880C_Z_EXPORT_ENABLE(ps->info.fs.writes_z) | 546 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.fs.writes_stencil) | 547 S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) | 548 S_02880C_Z_ORDER(z_order) | 549 S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) | 550 S_02880C_EXEC_ON_HIER_FAIL(ps->info.fs.writes_memory) | 551 S_02880C_EXEC_ON_NOOP(ps->info.fs.writes_memory)); 552 553 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA, 554 ps->config.spi_ps_input_ena); 555 556 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR, 557 ps->config.spi_ps_input_addr); 558 559 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(0); 560 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL, 561 S_0286D8_NUM_INTERP(ps->info.fs.num_interp)); 562 563 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 564 565 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT, 566 ps->info.fs.writes_stencil ? V_028710_SPI_SHADER_32_GR : 567 ps->info.fs.writes_z ? V_028710_SPI_SHADER_32_R : 568 V_028710_SPI_SHADER_ZERO); 569 570 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); 571 572 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); 573 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); 574 575 if (ps->info.fs.has_pcoord) { 576 unsigned val; 577 val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20); 578 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 579 ps_offset = 1; 580 } 581 582 for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) { 583 unsigned vs_offset, flat_shade; 584 unsigned val; 585 586 if (!(ps->info.fs.input_mask & (1u << i))) 587 continue; 588 589 590 if (!(vs->info.vs.export_mask & (1u << i))) { 591 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, 592 S_028644_OFFSET(0x20)); 593 ++ps_offset; 594 continue; 595 } 596 597 vs_offset = util_bitcount(vs->info.vs.export_mask & ((1u << i) - 1)); 598 flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset)); 599 600 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade); 601 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 602 ++ps_offset; 603 } 604} 605 606static void 607radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer, 608 struct radv_pipeline *pipeline) 609{ 610 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline) 611 return; 612 613 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline); 614 radv_emit_graphics_blend_state(cmd_buffer, pipeline); 615 radv_emit_graphics_raster_state(cmd_buffer, pipeline); 616 radv_update_multisample_state(cmd_buffer, pipeline); 617 radv_emit_vertex_shader(cmd_buffer, pipeline); 618 radv_emit_fragment_shader(cmd_buffer, pipeline); 619 620 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 621 pipeline->graphics.prim_restart_enable); 622 623 cmd_buffer->state.emitted_pipeline = pipeline; 624} 625 626static void 627radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer) 628{ 629 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count, 630 cmd_buffer->state.dynamic.viewport.viewports); 631} 632 633static void 634radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) 635{ 636 uint32_t count = cmd_buffer->state.dynamic.scissor.count; 637 si_write_scissors(cmd_buffer->cs, 0, count, 638 cmd_buffer->state.dynamic.scissor.scissors); 639 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, 640 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0)); 641} 642 643static void 644radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, 645 int index, 646 struct radv_color_buffer_info *cb) 647{ 648 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; 649 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); 650 radeon_emit(cmd_buffer->cs, cb->cb_color_base); 651 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch); 652 radeon_emit(cmd_buffer->cs, cb->cb_color_slice); 653 radeon_emit(cmd_buffer->cs, cb->cb_color_view); 654 radeon_emit(cmd_buffer->cs, cb->cb_color_info); 655 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); 656 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); 657 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); 658 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice); 659 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); 660 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice); 661 662 if (is_vi) { /* DCC BASE */ 663 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base); 664 } 665} 666 667static void 668radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, 669 struct radv_ds_buffer_info *ds, 670 struct radv_image *image, 671 VkImageLayout layout) 672{ 673 uint32_t db_z_info = ds->db_z_info; 674 675 if (!radv_layout_has_htile(image, layout)) 676 db_z_info &= C_028040_TILE_SURFACE_ENABLE; 677 678 if (!radv_layout_can_expclear(image, layout)) 679 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR; 680 681 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); 682 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); 683 684 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9); 685 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */ 686 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */ 687 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ 688 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */ 689 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */ 690 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */ 691 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */ 692 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ 693 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ 694 695 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); 696 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 697 ds->pa_su_poly_offset_db_fmt_cntl); 698} 699 700/* 701 * To hw resolve multisample images both src and dst need to have the same 702 * micro tiling mode. However we don't always know in advance when creating 703 * the images. This function gets called if we have a resolve attachment, 704 * and tests if the attachment image has the same tiling mode, then it 705 * checks if the generated framebuffer data has the same tiling mode, and 706 * updates it if not. 707 */ 708static void radv_set_optimal_micro_tile_mode(struct radv_device *device, 709 struct radv_attachment_info *att, 710 uint32_t micro_tile_mode) 711{ 712 struct radv_image *image = att->attachment->image; 713 uint32_t tile_mode_index; 714 if (image->surface.nsamples <= 1) 715 return; 716 717 if (image->surface.micro_tile_mode != micro_tile_mode) { 718 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode); 719 } 720 721 if (att->cb.micro_tile_mode != micro_tile_mode) { 722 tile_mode_index = image->surface.tiling_index[0]; 723 724 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX; 725 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index); 726 att->cb.micro_tile_mode = micro_tile_mode; 727 } 728} 729 730void 731radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 732 struct radv_image *image, 733 VkClearDepthStencilValue ds_clear_value, 734 VkImageAspectFlags aspects) 735{ 736 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 737 va += image->offset + image->clear_value_offset; 738 unsigned reg_offset = 0, reg_count = 0; 739 740 if (!image->htile.size || !aspects) 741 return; 742 743 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { 744 ++reg_count; 745 } else { 746 ++reg_offset; 747 va += 4; 748 } 749 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 750 ++reg_count; 751 752 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 753 754 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); 755 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 756 S_370_WR_CONFIRM(1) | 757 S_370_ENGINE_SEL(V_370_PFP)); 758 radeon_emit(cmd_buffer->cs, va); 759 radeon_emit(cmd_buffer->cs, va >> 32); 760 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 761 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); 762 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 763 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); 764 765 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count); 766 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 767 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */ 768 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 769 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */ 770} 771 772static void 773radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 774 struct radv_image *image) 775{ 776 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 777 va += image->offset + image->clear_value_offset; 778 779 if (!image->htile.size) 780 return; 781 782 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 783 784 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 785 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 786 COPY_DATA_DST_SEL(COPY_DATA_REG) | 787 COPY_DATA_COUNT_SEL); 788 radeon_emit(cmd_buffer->cs, va); 789 radeon_emit(cmd_buffer->cs, va >> 32); 790 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2); 791 radeon_emit(cmd_buffer->cs, 0); 792 793 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 794 radeon_emit(cmd_buffer->cs, 0); 795} 796 797void 798radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 799 struct radv_image *image, 800 int idx, 801 uint32_t color_values[2]) 802{ 803 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 804 va += image->offset + image->clear_value_offset; 805 806 if (!image->cmask.size && !image->surface.dcc_size) 807 return; 808 809 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 810 811 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); 812 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 813 S_370_WR_CONFIRM(1) | 814 S_370_ENGINE_SEL(V_370_PFP)); 815 radeon_emit(cmd_buffer->cs, va); 816 radeon_emit(cmd_buffer->cs, va >> 32); 817 radeon_emit(cmd_buffer->cs, color_values[0]); 818 radeon_emit(cmd_buffer->cs, color_values[1]); 819 820 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2); 821 radeon_emit(cmd_buffer->cs, color_values[0]); 822 radeon_emit(cmd_buffer->cs, color_values[1]); 823} 824 825static void 826radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 827 struct radv_image *image, 828 int idx) 829{ 830 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 831 va += image->offset + image->clear_value_offset; 832 833 if (!image->cmask.size && !image->surface.dcc_size) 834 return; 835 836 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c; 837 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 838 839 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 840 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 841 COPY_DATA_DST_SEL(COPY_DATA_REG) | 842 COPY_DATA_COUNT_SEL); 843 radeon_emit(cmd_buffer->cs, va); 844 radeon_emit(cmd_buffer->cs, va >> 32); 845 radeon_emit(cmd_buffer->cs, reg >> 2); 846 radeon_emit(cmd_buffer->cs, 0); 847 848 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 849 radeon_emit(cmd_buffer->cs, 0); 850} 851 852void 853radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) 854{ 855 int i; 856 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; 857 const struct radv_subpass *subpass = cmd_buffer->state.subpass; 858 int dst_resolve_micro_tile_mode = -1; 859 860 if (subpass->has_resolve) { 861 uint32_t a = subpass->resolve_attachments[0].attachment; 862 const struct radv_image *image = framebuffer->attachments[a].attachment->image; 863 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode; 864 } 865 for (i = 0; i < subpass->color_count; ++i) { 866 int idx = subpass->color_attachments[i].attachment; 867 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 868 869 if (dst_resolve_micro_tile_mode != -1) { 870 radv_set_optimal_micro_tile_mode(cmd_buffer->device, 871 att, dst_resolve_micro_tile_mode); 872 } 873 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 874 875 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); 876 radv_emit_fb_color_state(cmd_buffer, i, &att->cb); 877 878 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i); 879 } 880 881 for (i = subpass->color_count; i < 8; i++) 882 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 883 S_028C70_FORMAT(V_028C70_COLOR_INVALID)); 884 885 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 886 int idx = subpass->depth_stencil_attachment.attachment; 887 VkImageLayout layout = subpass->depth_stencil_attachment.layout; 888 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 889 struct radv_image *image = att->attachment->image; 890 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 891 892 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); 893 894 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { 895 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 896 cmd_buffer->state.offset_scale = att->ds.offset_scale; 897 } 898 radv_load_depth_clear_regs(cmd_buffer, image); 899 } else { 900 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); 901 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ 902 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ 903 } 904 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR, 905 S_028208_BR_X(framebuffer->width) | 906 S_028208_BR_Y(framebuffer->height)); 907} 908 909void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) 910{ 911 uint32_t db_count_control; 912 913 if(!cmd_buffer->state.active_occlusion_queries) { 914 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 915 db_count_control = 0; 916 } else { 917 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1); 918 } 919 } else { 920 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 921 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 922 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */ 923 S_028004_ZPASS_ENABLE(1) | 924 S_028004_SLICE_EVEN_ENABLE(1) | 925 S_028004_SLICE_ODD_ENABLE(1); 926 } else { 927 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 928 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */ 929 } 930 } 931 932 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); 933} 934 935static void 936radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) 937{ 938 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; 939 940 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) { 941 unsigned width = cmd_buffer->state.dynamic.line_width * 8; 942 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL, 943 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF))); 944 } 945 946 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) { 947 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4); 948 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4); 949 } 950 951 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | 952 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | 953 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) { 954 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2); 955 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) | 956 S_028430_STENCILMASK(d->stencil_compare_mask.front) | 957 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) | 958 S_028430_STENCILOPVAL(1)); 959 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) | 960 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) | 961 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) | 962 S_028434_STENCILOPVAL_BF(1)); 963 } 964 965 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 966 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) { 967 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min)); 968 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max)); 969 } 970 971 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 972 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { 973 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster; 974 unsigned slope = fui(d->depth_bias.slope * 16.0f); 975 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale); 976 977 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) { 978 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5); 979 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */ 980 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */ 981 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */ 982 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */ 983 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */ 984 } 985 } 986 987 cmd_buffer->state.dirty = 0; 988} 989 990static void 991emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 992 struct radv_pipeline *pipeline, 993 int idx, 994 uint64_t va, 995 gl_shader_stage stage) 996{ 997 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx]; 998 uint32_t base_reg = shader_stage_to_user_data_0(stage); 999 1000 if (desc_set_loc->sgpr_idx == -1) 1001 return; 1002 1003 assert(!desc_set_loc->indirect); 1004 assert(desc_set_loc->num_sgprs == 2); 1005 radeon_set_sh_reg_seq(cmd_buffer->cs, 1006 base_reg + desc_set_loc->sgpr_idx * 4, 2); 1007 radeon_emit(cmd_buffer->cs, va); 1008 radeon_emit(cmd_buffer->cs, va >> 32); 1009} 1010 1011static void 1012radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 1013 struct radv_pipeline *pipeline, 1014 VkShaderStageFlags stages, 1015 struct radv_descriptor_set *set, 1016 unsigned idx) 1017{ 1018 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 1019 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1020 idx, set->va, 1021 MESA_SHADER_FRAGMENT); 1022 1023 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 1024 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1025 idx, set->va, 1026 MESA_SHADER_VERTEX); 1027 1028 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 1029 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1030 idx, set->va, 1031 MESA_SHADER_COMPUTE); 1032} 1033 1034static void 1035radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, 1036 struct radv_pipeline *pipeline, 1037 VkShaderStageFlags stages) 1038{ 1039 unsigned i; 1040 if (!cmd_buffer->state.descriptors_dirty) 1041 return; 1042 1043 for (i = 0; i < MAX_SETS; i++) { 1044 if (!(cmd_buffer->state.descriptors_dirty & (1 << i))) 1045 continue; 1046 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i]; 1047 if (!set) 1048 continue; 1049 1050 radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i); 1051 } 1052 cmd_buffer->state.descriptors_dirty = 0; 1053} 1054 1055static void 1056radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, 1057 struct radv_pipeline *pipeline, 1058 VkShaderStageFlags stages) 1059{ 1060 struct radv_pipeline_layout *layout = pipeline->layout; 1061 unsigned offset; 1062 void *ptr; 1063 uint64_t va; 1064 1065 stages &= cmd_buffer->push_constant_stages; 1066 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count)) 1067 return; 1068 1069 radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + 1070 16 * layout->dynamic_offset_count, 1071 256, &offset, &ptr); 1072 1073 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); 1074 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers, 1075 16 * layout->dynamic_offset_count); 1076 1077 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1078 va += offset; 1079 1080 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 1081 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1082 AC_UD_PUSH_CONSTANTS, va); 1083 1084 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 1085 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 1086 AC_UD_PUSH_CONSTANTS, va); 1087 1088 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 1089 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE, 1090 AC_UD_PUSH_CONSTANTS, va); 1091 1092 cmd_buffer->push_constant_stages &= ~stages; 1093} 1094 1095static void 1096radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer) 1097{ 1098 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; 1099 struct radv_device *device = cmd_buffer->device; 1100 uint32_t ia_multi_vgt_param; 1101 uint32_t ls_hs_config = 0; 1102 1103 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1104 cmd_buffer->cs, 4096); 1105 1106 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) && 1107 cmd_buffer->state.pipeline->num_vertex_attribs) { 1108 unsigned vb_offset; 1109 void *vb_ptr; 1110 uint32_t i = 0; 1111 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs; 1112 uint64_t va; 1113 1114 /* allocate some descriptor state for vertex buffers */ 1115 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256, 1116 &vb_offset, &vb_ptr); 1117 1118 for (i = 0; i < num_attribs; i++) { 1119 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; 1120 uint32_t offset; 1121 int vb = cmd_buffer->state.pipeline->va_binding[i]; 1122 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; 1123 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; 1124 1125 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 1126 va = device->ws->buffer_get_va(buffer->bo); 1127 1128 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i]; 1129 va += offset + buffer->offset; 1130 desc[0] = va; 1131 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); 1132 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) 1133 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1; 1134 else 1135 desc[2] = buffer->size - offset; 1136 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i]; 1137 } 1138 1139 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1140 va += vb_offset; 1141 1142 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1143 AC_UD_VS_VERTEX_BUFFERS, va); 1144 } 1145 1146 cmd_buffer->state.vertex_descriptors_dirty = false; 1147 cmd_buffer->state.vb_dirty = 0; 1148 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) 1149 radv_emit_graphics_pipeline(cmd_buffer, pipeline); 1150 1151 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS) 1152 radv_emit_framebuffer_state(cmd_buffer); 1153 1154 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT)) 1155 radv_emit_viewport(cmd_buffer); 1156 1157 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) 1158 radv_emit_scissor(cmd_buffer); 1159 1160 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) { 1161 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, 0); 1162 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer); 1163 1164 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 1165 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); 1166 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config); 1167 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim); 1168 } else { 1169 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim); 1170 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param); 1171 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config); 1172 } 1173 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out); 1174 } 1175 1176 radv_cmd_buffer_flush_dynamic_state(cmd_buffer); 1177 1178 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.pipeline, 1179 VK_SHADER_STAGE_ALL_GRAPHICS); 1180 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline, 1181 VK_SHADER_STAGE_ALL_GRAPHICS); 1182 1183 assert(cmd_buffer->cs->cdw <= cdw_max); 1184 1185 si_emit_cache_flush(cmd_buffer); 1186} 1187 1188static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, 1189 VkPipelineStageFlags src_stage_mask) 1190{ 1191 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | 1192 VK_PIPELINE_STAGE_TRANSFER_BIT | 1193 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1194 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1195 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH; 1196 } 1197 1198 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 1199 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 1200 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 1201 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 1202 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | 1203 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | 1204 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | 1205 VK_PIPELINE_STAGE_TRANSFER_BIT | 1206 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1207 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | 1208 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1209 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 1210 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | 1211 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | 1212 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | 1213 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) { 1214 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; 1215 } 1216} 1217 1218static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier) 1219{ 1220 radv_stage_flush(cmd_buffer, barrier->src_stage_mask); 1221 1222 /* TODO: actual cache flushes */ 1223} 1224 1225static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, 1226 VkAttachmentReference att) 1227{ 1228 unsigned idx = att.attachment; 1229 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; 1230 VkImageSubresourceRange range; 1231 range.aspectMask = 0; 1232 range.baseMipLevel = view->base_mip; 1233 range.levelCount = 1; 1234 range.baseArrayLayer = view->base_layer; 1235 range.layerCount = cmd_buffer->state.framebuffer->layers; 1236 1237 radv_handle_image_transition(cmd_buffer, 1238 view->image, 1239 cmd_buffer->state.attachments[idx].current_layout, 1240 att.layout, 0, 0, range, 1241 cmd_buffer->state.attachments[idx].pending_clear_aspects); 1242 1243 cmd_buffer->state.attachments[idx].current_layout = att.layout; 1244 1245 1246} 1247 1248void 1249radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, 1250 const struct radv_subpass *subpass, bool transitions) 1251{ 1252 if (transitions) { 1253 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); 1254 1255 for (unsigned i = 0; i < subpass->color_count; ++i) { 1256 radv_handle_subpass_image_transition(cmd_buffer, 1257 subpass->color_attachments[i]); 1258 } 1259 1260 for (unsigned i = 0; i < subpass->input_count; ++i) { 1261 radv_handle_subpass_image_transition(cmd_buffer, 1262 subpass->input_attachments[i]); 1263 } 1264 1265 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 1266 radv_handle_subpass_image_transition(cmd_buffer, 1267 subpass->depth_stencil_attachment); 1268 } 1269 } 1270 1271 cmd_buffer->state.subpass = subpass; 1272 1273 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS; 1274} 1275 1276static void 1277radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, 1278 struct radv_render_pass *pass, 1279 const VkRenderPassBeginInfo *info) 1280{ 1281 struct radv_cmd_state *state = &cmd_buffer->state; 1282 1283 if (pass->attachment_count == 0) { 1284 state->attachments = NULL; 1285 return; 1286 } 1287 1288 state->attachments = vk_alloc(&cmd_buffer->pool->alloc, 1289 pass->attachment_count * 1290 sizeof(state->attachments[0]), 1291 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1292 if (state->attachments == NULL) { 1293 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ 1294 abort(); 1295 } 1296 1297 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 1298 struct radv_render_pass_attachment *att = &pass->attachments[i]; 1299 VkImageAspectFlags att_aspects = vk_format_aspects(att->format); 1300 VkImageAspectFlags clear_aspects = 0; 1301 1302 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { 1303 /* color attachment */ 1304 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1305 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; 1306 } 1307 } else { 1308 /* depthstencil attachment */ 1309 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && 1310 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1311 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 1312 } 1313 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && 1314 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1315 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 1316 } 1317 } 1318 1319 state->attachments[i].pending_clear_aspects = clear_aspects; 1320 if (clear_aspects && info) { 1321 assert(info->clearValueCount > i); 1322 state->attachments[i].clear_value = info->pClearValues[i]; 1323 } 1324 1325 state->attachments[i].current_layout = att->initial_layout; 1326 } 1327} 1328 1329VkResult radv_AllocateCommandBuffers( 1330 VkDevice _device, 1331 const VkCommandBufferAllocateInfo *pAllocateInfo, 1332 VkCommandBuffer *pCommandBuffers) 1333{ 1334 RADV_FROM_HANDLE(radv_device, device, _device); 1335 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool); 1336 1337 VkResult result = VK_SUCCESS; 1338 uint32_t i; 1339 1340 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { 1341 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level, 1342 &pCommandBuffers[i]); 1343 if (result != VK_SUCCESS) 1344 break; 1345 } 1346 1347 if (result != VK_SUCCESS) 1348 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool, 1349 i, pCommandBuffers); 1350 1351 return result; 1352} 1353 1354static void 1355radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) 1356{ 1357 list_del(&cmd_buffer->pool_link); 1358 1359 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1360 &cmd_buffer->upload.list, list) { 1361 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1362 list_del(&up->list); 1363 free(up); 1364 } 1365 1366 if (cmd_buffer->upload.upload_bo) 1367 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); 1368 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); 1369 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 1370} 1371 1372void radv_FreeCommandBuffers( 1373 VkDevice device, 1374 VkCommandPool commandPool, 1375 uint32_t commandBufferCount, 1376 const VkCommandBuffer *pCommandBuffers) 1377{ 1378 for (uint32_t i = 0; i < commandBufferCount; i++) { 1379 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]); 1380 1381 if (cmd_buffer) 1382 radv_cmd_buffer_destroy(cmd_buffer); 1383 } 1384} 1385 1386static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) 1387{ 1388 1389 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); 1390 1391 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1392 &cmd_buffer->upload.list, list) { 1393 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1394 list_del(&up->list); 1395 free(up); 1396 } 1397 1398 if (cmd_buffer->upload.upload_bo) 1399 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, 1400 cmd_buffer->upload.upload_bo, 8); 1401 cmd_buffer->upload.offset = 0; 1402 1403 cmd_buffer->record_fail = false; 1404} 1405 1406VkResult radv_ResetCommandBuffer( 1407 VkCommandBuffer commandBuffer, 1408 VkCommandBufferResetFlags flags) 1409{ 1410 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1411 radv_reset_cmd_buffer(cmd_buffer); 1412 return VK_SUCCESS; 1413} 1414 1415VkResult radv_BeginCommandBuffer( 1416 VkCommandBuffer commandBuffer, 1417 const VkCommandBufferBeginInfo *pBeginInfo) 1418{ 1419 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1420 radv_reset_cmd_buffer(cmd_buffer); 1421 1422 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state)); 1423 1424 /* setup initial configuration into command buffer */ 1425 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 1426 switch (cmd_buffer->queue_family_index) { 1427 case RADV_QUEUE_GENERAL: 1428 /* Flush read caches at the beginning of CS not flushed by the kernel. */ 1429 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_ICACHE | 1430 RADV_CMD_FLAG_PS_PARTIAL_FLUSH | 1431 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1432 RADV_CMD_FLAG_INV_VMEM_L1 | 1433 RADV_CMD_FLAG_INV_SMEM_L1 | 1434 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 1435 RADV_CMD_FLAG_INV_GLOBAL_L2; 1436 si_init_config(cmd_buffer->device->physical_device, cmd_buffer); 1437 radv_set_db_count_control(cmd_buffer); 1438 si_emit_cache_flush(cmd_buffer); 1439 break; 1440 case RADV_QUEUE_COMPUTE: 1441 cmd_buffer->state.flush_bits = RADV_CMD_FLAG_INV_ICACHE | 1442 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1443 RADV_CMD_FLAG_INV_VMEM_L1 | 1444 RADV_CMD_FLAG_INV_SMEM_L1 | 1445 RADV_CMD_FLAG_INV_GLOBAL_L2; 1446 si_init_compute(cmd_buffer->device->physical_device, cmd_buffer); 1447 si_emit_cache_flush(cmd_buffer); 1448 break; 1449 case RADV_QUEUE_TRANSFER: 1450 default: 1451 break; 1452 } 1453 } 1454 1455 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 1456 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer); 1457 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); 1458 1459 struct radv_subpass *subpass = 1460 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; 1461 1462 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); 1463 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); 1464 } 1465 1466 return VK_SUCCESS; 1467} 1468 1469void radv_CmdBindVertexBuffers( 1470 VkCommandBuffer commandBuffer, 1471 uint32_t firstBinding, 1472 uint32_t bindingCount, 1473 const VkBuffer* pBuffers, 1474 const VkDeviceSize* pOffsets) 1475{ 1476 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1477 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings; 1478 1479 /* We have to defer setting up vertex buffer since we need the buffer 1480 * stride from the pipeline. */ 1481 1482 assert(firstBinding + bindingCount < MAX_VBS); 1483 for (uint32_t i = 0; i < bindingCount; i++) { 1484 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]); 1485 vb[firstBinding + i].offset = pOffsets[i]; 1486 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i); 1487 } 1488} 1489 1490void radv_CmdBindIndexBuffer( 1491 VkCommandBuffer commandBuffer, 1492 VkBuffer buffer, 1493 VkDeviceSize offset, 1494 VkIndexType indexType) 1495{ 1496 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1497 1498 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer); 1499 cmd_buffer->state.index_offset = offset; 1500 cmd_buffer->state.index_type = indexType; /* vk matches hw */ 1501 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; 1502 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8); 1503} 1504 1505 1506void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, 1507 struct radv_descriptor_set *set, 1508 unsigned idx) 1509{ 1510 struct radeon_winsys *ws = cmd_buffer->device->ws; 1511 1512 cmd_buffer->state.descriptors[idx] = set; 1513 cmd_buffer->state.descriptors_dirty |= (1 << idx); 1514 if (!set) 1515 return; 1516 1517 for (unsigned j = 0; j < set->layout->buffer_count; ++j) 1518 if (set->descriptors[j]) 1519 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7); 1520 1521 if(set->bo) 1522 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8); 1523} 1524 1525void radv_CmdBindDescriptorSets( 1526 VkCommandBuffer commandBuffer, 1527 VkPipelineBindPoint pipelineBindPoint, 1528 VkPipelineLayout _layout, 1529 uint32_t firstSet, 1530 uint32_t descriptorSetCount, 1531 const VkDescriptorSet* pDescriptorSets, 1532 uint32_t dynamicOffsetCount, 1533 const uint32_t* pDynamicOffsets) 1534{ 1535 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1536 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout); 1537 unsigned dyn_idx = 0; 1538 1539 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1540 cmd_buffer->cs, MAX_SETS * 4 * 6); 1541 1542 for (unsigned i = 0; i < descriptorSetCount; ++i) { 1543 unsigned idx = i + firstSet; 1544 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); 1545 radv_bind_descriptor_set(cmd_buffer, set, idx); 1546 1547 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { 1548 unsigned idx = j + layout->set[i].dynamic_offset_start; 1549 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4; 1550 assert(dyn_idx < dynamicOffsetCount); 1551 1552 struct radv_descriptor_range *range = set->dynamic_descriptors + j; 1553 uint64_t va = range->va + pDynamicOffsets[dyn_idx]; 1554 dst[0] = va; 1555 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); 1556 dst[2] = range->size; 1557 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | 1558 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | 1559 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | 1560 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | 1561 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | 1562 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); 1563 cmd_buffer->push_constant_stages |= 1564 set->layout->dynamic_shader_stages; 1565 } 1566 } 1567 1568 assert(cmd_buffer->cs->cdw <= cdw_max); 1569} 1570 1571void radv_CmdPushConstants(VkCommandBuffer commandBuffer, 1572 VkPipelineLayout layout, 1573 VkShaderStageFlags stageFlags, 1574 uint32_t offset, 1575 uint32_t size, 1576 const void* pValues) 1577{ 1578 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1579 memcpy(cmd_buffer->push_constants + offset, pValues, size); 1580 cmd_buffer->push_constant_stages |= stageFlags; 1581} 1582 1583VkResult radv_EndCommandBuffer( 1584 VkCommandBuffer commandBuffer) 1585{ 1586 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1587 1588 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) 1589 si_emit_cache_flush(cmd_buffer); 1590 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) || 1591 cmd_buffer->record_fail) 1592 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1593 return VK_SUCCESS; 1594} 1595 1596static void 1597radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) 1598{ 1599 struct radeon_winsys *ws = cmd_buffer->device->ws; 1600 struct radv_shader_variant *compute_shader; 1601 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 1602 uint64_t va; 1603 1604 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) 1605 return; 1606 1607 cmd_buffer->state.emitted_compute_pipeline = pipeline; 1608 1609 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 1610 va = ws->buffer_get_va(compute_shader->bo); 1611 1612 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8); 1613 1614 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1615 cmd_buffer->cs, 16); 1616 1617 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2); 1618 radeon_emit(cmd_buffer->cs, va >> 8); 1619 radeon_emit(cmd_buffer->cs, va >> 40); 1620 1621 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2); 1622 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1); 1623 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2); 1624 1625 /* change these once we have scratch support */ 1626 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE, 1627 S_00B860_WAVES(32) | S_00B860_WAVESIZE(0)); 1628 1629 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 1630 radeon_emit(cmd_buffer->cs, 1631 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0])); 1632 radeon_emit(cmd_buffer->cs, 1633 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1])); 1634 radeon_emit(cmd_buffer->cs, 1635 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2])); 1636 1637 assert(cmd_buffer->cs->cdw <= cdw_max); 1638} 1639 1640 1641void radv_CmdBindPipeline( 1642 VkCommandBuffer commandBuffer, 1643 VkPipelineBindPoint pipelineBindPoint, 1644 VkPipeline _pipeline) 1645{ 1646 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1647 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline); 1648 1649 for (unsigned i = 0; i < MAX_SETS; i++) { 1650 if (cmd_buffer->state.descriptors[i]) 1651 cmd_buffer->state.descriptors_dirty |= (1 << i); 1652 } 1653 1654 switch (pipelineBindPoint) { 1655 case VK_PIPELINE_BIND_POINT_COMPUTE: 1656 cmd_buffer->state.compute_pipeline = pipeline; 1657 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 1658 break; 1659 case VK_PIPELINE_BIND_POINT_GRAPHICS: 1660 cmd_buffer->state.pipeline = pipeline; 1661 cmd_buffer->state.vertex_descriptors_dirty = true; 1662 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1663 cmd_buffer->push_constant_stages |= pipeline->active_stages; 1664 1665 /* Apply the dynamic state from the pipeline */ 1666 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask; 1667 radv_dynamic_state_copy(&cmd_buffer->state.dynamic, 1668 &pipeline->dynamic_state, 1669 pipeline->dynamic_state_mask); 1670 break; 1671 default: 1672 assert(!"invalid bind point"); 1673 break; 1674 } 1675} 1676 1677void radv_CmdSetViewport( 1678 VkCommandBuffer commandBuffer, 1679 uint32_t firstViewport, 1680 uint32_t viewportCount, 1681 const VkViewport* pViewports) 1682{ 1683 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1684 1685 const uint32_t total_count = firstViewport + viewportCount; 1686 if (cmd_buffer->state.dynamic.viewport.count < total_count) 1687 cmd_buffer->state.dynamic.viewport.count = total_count; 1688 1689 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport, 1690 pViewports, viewportCount * sizeof(*pViewports)); 1691 1692 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT; 1693} 1694 1695void radv_CmdSetScissor( 1696 VkCommandBuffer commandBuffer, 1697 uint32_t firstScissor, 1698 uint32_t scissorCount, 1699 const VkRect2D* pScissors) 1700{ 1701 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1702 1703 const uint32_t total_count = firstScissor + scissorCount; 1704 if (cmd_buffer->state.dynamic.scissor.count < total_count) 1705 cmd_buffer->state.dynamic.scissor.count = total_count; 1706 1707 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor, 1708 pScissors, scissorCount * sizeof(*pScissors)); 1709 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR; 1710} 1711 1712void radv_CmdSetLineWidth( 1713 VkCommandBuffer commandBuffer, 1714 float lineWidth) 1715{ 1716 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1717 cmd_buffer->state.dynamic.line_width = lineWidth; 1718 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; 1719} 1720 1721void radv_CmdSetDepthBias( 1722 VkCommandBuffer commandBuffer, 1723 float depthBiasConstantFactor, 1724 float depthBiasClamp, 1725 float depthBiasSlopeFactor) 1726{ 1727 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1728 1729 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; 1730 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; 1731 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; 1732 1733 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 1734} 1735 1736void radv_CmdSetBlendConstants( 1737 VkCommandBuffer commandBuffer, 1738 const float blendConstants[4]) 1739{ 1740 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1741 1742 memcpy(cmd_buffer->state.dynamic.blend_constants, 1743 blendConstants, sizeof(float) * 4); 1744 1745 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; 1746} 1747 1748void radv_CmdSetDepthBounds( 1749 VkCommandBuffer commandBuffer, 1750 float minDepthBounds, 1751 float maxDepthBounds) 1752{ 1753 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1754 1755 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; 1756 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; 1757 1758 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; 1759} 1760 1761void radv_CmdSetStencilCompareMask( 1762 VkCommandBuffer commandBuffer, 1763 VkStencilFaceFlags faceMask, 1764 uint32_t compareMask) 1765{ 1766 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1767 1768 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1769 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; 1770 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1771 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; 1772 1773 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; 1774} 1775 1776void radv_CmdSetStencilWriteMask( 1777 VkCommandBuffer commandBuffer, 1778 VkStencilFaceFlags faceMask, 1779 uint32_t writeMask) 1780{ 1781 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1782 1783 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1784 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; 1785 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1786 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; 1787 1788 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; 1789} 1790 1791void radv_CmdSetStencilReference( 1792 VkCommandBuffer commandBuffer, 1793 VkStencilFaceFlags faceMask, 1794 uint32_t reference) 1795{ 1796 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1797 1798 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1799 cmd_buffer->state.dynamic.stencil_reference.front = reference; 1800 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1801 cmd_buffer->state.dynamic.stencil_reference.back = reference; 1802 1803 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE; 1804} 1805 1806 1807void radv_CmdExecuteCommands( 1808 VkCommandBuffer commandBuffer, 1809 uint32_t commandBufferCount, 1810 const VkCommandBuffer* pCmdBuffers) 1811{ 1812 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer); 1813 1814 for (uint32_t i = 0; i < commandBufferCount; i++) { 1815 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); 1816 1817 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); 1818 } 1819 1820 /* if we execute secondary we need to re-emit out pipelines */ 1821 if (commandBufferCount) { 1822 primary->state.emitted_pipeline = NULL; 1823 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1824 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL; 1825 } 1826} 1827 1828VkResult radv_CreateCommandPool( 1829 VkDevice _device, 1830 const VkCommandPoolCreateInfo* pCreateInfo, 1831 const VkAllocationCallbacks* pAllocator, 1832 VkCommandPool* pCmdPool) 1833{ 1834 RADV_FROM_HANDLE(radv_device, device, _device); 1835 struct radv_cmd_pool *pool; 1836 1837 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, 1838 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1839 if (pool == NULL) 1840 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1841 1842 if (pAllocator) 1843 pool->alloc = *pAllocator; 1844 else 1845 pool->alloc = device->alloc; 1846 1847 list_inithead(&pool->cmd_buffers); 1848 1849 pool->queue_family_index = pCreateInfo->queueFamilyIndex; 1850 1851 *pCmdPool = radv_cmd_pool_to_handle(pool); 1852 1853 return VK_SUCCESS; 1854 1855} 1856 1857void radv_DestroyCommandPool( 1858 VkDevice _device, 1859 VkCommandPool commandPool, 1860 const VkAllocationCallbacks* pAllocator) 1861{ 1862 RADV_FROM_HANDLE(radv_device, device, _device); 1863 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1864 1865 if (!pool) 1866 return; 1867 1868 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, 1869 &pool->cmd_buffers, pool_link) { 1870 radv_cmd_buffer_destroy(cmd_buffer); 1871 } 1872 1873 vk_free2(&device->alloc, pAllocator, pool); 1874} 1875 1876VkResult radv_ResetCommandPool( 1877 VkDevice device, 1878 VkCommandPool commandPool, 1879 VkCommandPoolResetFlags flags) 1880{ 1881 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1882 1883 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer, 1884 &pool->cmd_buffers, pool_link) { 1885 radv_reset_cmd_buffer(cmd_buffer); 1886 } 1887 1888 return VK_SUCCESS; 1889} 1890 1891void radv_CmdBeginRenderPass( 1892 VkCommandBuffer commandBuffer, 1893 const VkRenderPassBeginInfo* pRenderPassBegin, 1894 VkSubpassContents contents) 1895{ 1896 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1897 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); 1898 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); 1899 1900 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1901 cmd_buffer->cs, 2048); 1902 1903 cmd_buffer->state.framebuffer = framebuffer; 1904 cmd_buffer->state.pass = pass; 1905 cmd_buffer->state.render_area = pRenderPassBegin->renderArea; 1906 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin); 1907 1908 si_emit_cache_flush(cmd_buffer); 1909 1910 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); 1911 assert(cmd_buffer->cs->cdw <= cdw_max); 1912 1913 radv_cmd_buffer_clear_subpass(cmd_buffer); 1914} 1915 1916void radv_CmdNextSubpass( 1917 VkCommandBuffer commandBuffer, 1918 VkSubpassContents contents) 1919{ 1920 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1921 1922 si_emit_cache_flush(cmd_buffer); 1923 radv_cmd_buffer_resolve_subpass(cmd_buffer); 1924 1925 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1926 2048); 1927 1928 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); 1929 radv_cmd_buffer_clear_subpass(cmd_buffer); 1930} 1931 1932void radv_CmdDraw( 1933 VkCommandBuffer commandBuffer, 1934 uint32_t vertexCount, 1935 uint32_t instanceCount, 1936 uint32_t firstVertex, 1937 uint32_t firstInstance) 1938{ 1939 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1940 radv_cmd_buffer_flush_state(cmd_buffer); 1941 1942 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9); 1943 1944 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 1945 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 1946 if (loc->sgpr_idx != -1) { 1947 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 1948 radeon_emit(cmd_buffer->cs, firstVertex); 1949 radeon_emit(cmd_buffer->cs, firstInstance); 1950 } 1951 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1952 radeon_emit(cmd_buffer->cs, instanceCount); 1953 1954 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0)); 1955 radeon_emit(cmd_buffer->cs, vertexCount); 1956 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 1957 S_0287F0_USE_OPAQUE(0)); 1958 1959 assert(cmd_buffer->cs->cdw <= cdw_max); 1960 1961 radv_cmd_buffer_trace_emit(cmd_buffer); 1962} 1963 1964static void radv_emit_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) 1965{ 1966 uint32_t primitive_reset_index = cmd_buffer->state.last_primitive_reset_index ? 0xffffffffu : 0xffffu; 1967 1968 if (cmd_buffer->state.pipeline->graphics.prim_restart_enable && 1969 primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) { 1970 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index; 1971 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 1972 primitive_reset_index); 1973 } 1974} 1975 1976void radv_CmdDrawIndexed( 1977 VkCommandBuffer commandBuffer, 1978 uint32_t indexCount, 1979 uint32_t instanceCount, 1980 uint32_t firstIndex, 1981 int32_t vertexOffset, 1982 uint32_t firstInstance) 1983{ 1984 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1985 int index_size = cmd_buffer->state.index_type ? 4 : 2; 1986 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 1987 uint64_t index_va; 1988 1989 radv_cmd_buffer_flush_state(cmd_buffer); 1990 radv_emit_primitive_reset_index(cmd_buffer); 1991 1992 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14); 1993 1994 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 1995 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 1996 1997 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 1998 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 1999 if (loc->sgpr_idx != -1) { 2000 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 2001 radeon_emit(cmd_buffer->cs, vertexOffset); 2002 radeon_emit(cmd_buffer->cs, firstInstance); 2003 } 2004 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 2005 radeon_emit(cmd_buffer->cs, instanceCount); 2006 2007 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 2008 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 2009 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false)); 2010 radeon_emit(cmd_buffer->cs, index_max_size); 2011 radeon_emit(cmd_buffer->cs, index_va); 2012 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF); 2013 radeon_emit(cmd_buffer->cs, indexCount); 2014 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA); 2015 2016 assert(cmd_buffer->cs->cdw <= cdw_max); 2017 radv_cmd_buffer_trace_emit(cmd_buffer); 2018} 2019 2020static void 2021radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer, 2022 VkBuffer _buffer, 2023 VkDeviceSize offset, 2024 VkBuffer _count_buffer, 2025 VkDeviceSize count_offset, 2026 uint32_t draw_count, 2027 uint32_t stride, 2028 bool indexed) 2029{ 2030 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2031 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer); 2032 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2033 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA 2034 : V_0287F0_DI_SRC_SEL_AUTO_INDEX; 2035 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2036 indirect_va += offset + buffer->offset; 2037 uint64_t count_va = 0; 2038 2039 if (count_buffer) { 2040 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo); 2041 count_va += count_offset + count_buffer->offset; 2042 } 2043 2044 if (!draw_count) 2045 return; 2046 2047 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8); 2048 2049 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 2050 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 2051 assert(loc->sgpr_idx != -1); 2052 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); 2053 radeon_emit(cs, 1); 2054 radeon_emit(cs, indirect_va); 2055 radeon_emit(cs, indirect_va >> 32); 2056 2057 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : 2058 PKT3_DRAW_INDIRECT_MULTI, 2059 8, false)); 2060 radeon_emit(cs, 0); 2061 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2); 2062 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2); 2063 radeon_emit(cs, S_2C3_COUNT_INDIRECT_ENABLE(!!count_va)); /* draw_index and count_indirect enable */ 2064 radeon_emit(cs, draw_count); /* count */ 2065 radeon_emit(cs, count_va); /* count_addr */ 2066 radeon_emit(cs, count_va >> 32); 2067 radeon_emit(cs, stride); /* stride */ 2068 radeon_emit(cs, di_src_sel); 2069 radv_cmd_buffer_trace_emit(cmd_buffer); 2070} 2071 2072static void 2073radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer, 2074 VkBuffer buffer, 2075 VkDeviceSize offset, 2076 VkBuffer countBuffer, 2077 VkDeviceSize countBufferOffset, 2078 uint32_t maxDrawCount, 2079 uint32_t stride) 2080{ 2081 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2082 radv_cmd_buffer_flush_state(cmd_buffer); 2083 2084 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 2085 cmd_buffer->cs, 14); 2086 2087 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2088 countBuffer, countBufferOffset, maxDrawCount, stride, false); 2089 2090 assert(cmd_buffer->cs->cdw <= cdw_max); 2091} 2092 2093static void 2094radv_cmd_draw_indexed_indirect_count( 2095 VkCommandBuffer commandBuffer, 2096 VkBuffer buffer, 2097 VkDeviceSize offset, 2098 VkBuffer countBuffer, 2099 VkDeviceSize countBufferOffset, 2100 uint32_t maxDrawCount, 2101 uint32_t stride) 2102{ 2103 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2104 int index_size = cmd_buffer->state.index_type ? 4 : 2; 2105 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 2106 uint64_t index_va; 2107 radv_cmd_buffer_flush_state(cmd_buffer); 2108 radv_emit_primitive_reset_index(cmd_buffer); 2109 2110 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 2111 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 2112 2113 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21); 2114 2115 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 2116 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 2117 2118 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0)); 2119 radeon_emit(cmd_buffer->cs, index_va); 2120 radeon_emit(cmd_buffer->cs, index_va >> 32); 2121 2122 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0)); 2123 radeon_emit(cmd_buffer->cs, index_max_size); 2124 2125 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2126 countBuffer, countBufferOffset, maxDrawCount, stride, true); 2127 2128 assert(cmd_buffer->cs->cdw <= cdw_max); 2129} 2130 2131void radv_CmdDrawIndirect( 2132 VkCommandBuffer commandBuffer, 2133 VkBuffer buffer, 2134 VkDeviceSize offset, 2135 uint32_t drawCount, 2136 uint32_t stride) 2137{ 2138 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2139 VK_NULL_HANDLE, 0, drawCount, stride); 2140} 2141 2142void radv_CmdDrawIndexedIndirect( 2143 VkCommandBuffer commandBuffer, 2144 VkBuffer buffer, 2145 VkDeviceSize offset, 2146 uint32_t drawCount, 2147 uint32_t stride) 2148{ 2149 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2150 VK_NULL_HANDLE, 0, drawCount, stride); 2151} 2152 2153void radv_CmdDrawIndirectCountAMD( 2154 VkCommandBuffer commandBuffer, 2155 VkBuffer buffer, 2156 VkDeviceSize offset, 2157 VkBuffer countBuffer, 2158 VkDeviceSize countBufferOffset, 2159 uint32_t maxDrawCount, 2160 uint32_t stride) 2161{ 2162 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2163 countBuffer, countBufferOffset, 2164 maxDrawCount, stride); 2165} 2166 2167void radv_CmdDrawIndexedIndirectCountAMD( 2168 VkCommandBuffer commandBuffer, 2169 VkBuffer buffer, 2170 VkDeviceSize offset, 2171 VkBuffer countBuffer, 2172 VkDeviceSize countBufferOffset, 2173 uint32_t maxDrawCount, 2174 uint32_t stride) 2175{ 2176 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2177 countBuffer, countBufferOffset, 2178 maxDrawCount, stride); 2179} 2180 2181static void 2182radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer) 2183{ 2184 radv_emit_compute_pipeline(cmd_buffer); 2185 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline, 2186 VK_SHADER_STAGE_COMPUTE_BIT); 2187 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline, 2188 VK_SHADER_STAGE_COMPUTE_BIT); 2189 si_emit_cache_flush(cmd_buffer); 2190} 2191 2192void radv_CmdDispatch( 2193 VkCommandBuffer commandBuffer, 2194 uint32_t x, 2195 uint32_t y, 2196 uint32_t z) 2197{ 2198 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2199 2200 radv_flush_compute_state(cmd_buffer); 2201 2202 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10); 2203 2204 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2205 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2206 if (loc->sgpr_idx != -1) { 2207 assert(!loc->indirect); 2208 assert(loc->num_sgprs == 3); 2209 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2210 radeon_emit(cmd_buffer->cs, x); 2211 radeon_emit(cmd_buffer->cs, y); 2212 radeon_emit(cmd_buffer->cs, z); 2213 } 2214 2215 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2216 PKT3_SHADER_TYPE_S(1)); 2217 radeon_emit(cmd_buffer->cs, x); 2218 radeon_emit(cmd_buffer->cs, y); 2219 radeon_emit(cmd_buffer->cs, z); 2220 radeon_emit(cmd_buffer->cs, 1); 2221 2222 assert(cmd_buffer->cs->cdw <= cdw_max); 2223 radv_cmd_buffer_trace_emit(cmd_buffer); 2224} 2225 2226void radv_CmdDispatchIndirect( 2227 VkCommandBuffer commandBuffer, 2228 VkBuffer _buffer, 2229 VkDeviceSize offset) 2230{ 2231 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2232 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2233 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2234 va += buffer->offset + offset; 2235 2236 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 2237 2238 radv_flush_compute_state(cmd_buffer); 2239 2240 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25); 2241 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2242 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2243 if (loc->sgpr_idx != -1) { 2244 for (unsigned i = 0; i < 3; ++i) { 2245 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 2246 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 2247 COPY_DATA_DST_SEL(COPY_DATA_REG)); 2248 radeon_emit(cmd_buffer->cs, (va + 4 * i)); 2249 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32); 2250 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i); 2251 radeon_emit(cmd_buffer->cs, 0); 2252 } 2253 } 2254 2255 if (radv_cmd_buffer_uses_mec(cmd_buffer)) { 2256 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) | 2257 PKT3_SHADER_TYPE_S(1)); 2258 radeon_emit(cmd_buffer->cs, va); 2259 radeon_emit(cmd_buffer->cs, va >> 32); 2260 radeon_emit(cmd_buffer->cs, 1); 2261 } else { 2262 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) | 2263 PKT3_SHADER_TYPE_S(1)); 2264 radeon_emit(cmd_buffer->cs, 1); 2265 radeon_emit(cmd_buffer->cs, va); 2266 radeon_emit(cmd_buffer->cs, va >> 32); 2267 2268 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) | 2269 PKT3_SHADER_TYPE_S(1)); 2270 radeon_emit(cmd_buffer->cs, 0); 2271 radeon_emit(cmd_buffer->cs, 1); 2272 } 2273 2274 assert(cmd_buffer->cs->cdw <= cdw_max); 2275 radv_cmd_buffer_trace_emit(cmd_buffer); 2276} 2277 2278void radv_unaligned_dispatch( 2279 struct radv_cmd_buffer *cmd_buffer, 2280 uint32_t x, 2281 uint32_t y, 2282 uint32_t z) 2283{ 2284 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 2285 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 2286 uint32_t blocks[3], remainder[3]; 2287 2288 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]); 2289 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]); 2290 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]); 2291 2292 /* If aligned, these should be an entire block size, not 0 */ 2293 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]); 2294 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]); 2295 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]); 2296 2297 radv_flush_compute_state(cmd_buffer); 2298 2299 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15); 2300 2301 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 2302 radeon_emit(cmd_buffer->cs, 2303 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) | 2304 S_00B81C_NUM_THREAD_PARTIAL(remainder[0])); 2305 radeon_emit(cmd_buffer->cs, 2306 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) | 2307 S_00B81C_NUM_THREAD_PARTIAL(remainder[1])); 2308 radeon_emit(cmd_buffer->cs, 2309 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) | 2310 S_00B81C_NUM_THREAD_PARTIAL(remainder[2])); 2311 2312 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2313 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2314 if (loc->sgpr_idx != -1) { 2315 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2316 radeon_emit(cmd_buffer->cs, blocks[0]); 2317 radeon_emit(cmd_buffer->cs, blocks[1]); 2318 radeon_emit(cmd_buffer->cs, blocks[2]); 2319 } 2320 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2321 PKT3_SHADER_TYPE_S(1)); 2322 radeon_emit(cmd_buffer->cs, blocks[0]); 2323 radeon_emit(cmd_buffer->cs, blocks[1]); 2324 radeon_emit(cmd_buffer->cs, blocks[2]); 2325 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) | 2326 S_00B800_PARTIAL_TG_EN(1)); 2327 2328 assert(cmd_buffer->cs->cdw <= cdw_max); 2329 radv_cmd_buffer_trace_emit(cmd_buffer); 2330} 2331 2332void radv_CmdEndRenderPass( 2333 VkCommandBuffer commandBuffer) 2334{ 2335 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2336 2337 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); 2338 2339 si_emit_cache_flush(cmd_buffer); 2340 radv_cmd_buffer_resolve_subpass(cmd_buffer); 2341 2342 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { 2343 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; 2344 radv_handle_subpass_image_transition(cmd_buffer, 2345 (VkAttachmentReference){i, layout}); 2346 } 2347 2348 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); 2349 2350 cmd_buffer->state.pass = NULL; 2351 cmd_buffer->state.subpass = NULL; 2352 cmd_buffer->state.attachments = NULL; 2353 cmd_buffer->state.framebuffer = NULL; 2354} 2355 2356 2357static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, 2358 struct radv_image *image) 2359{ 2360 2361 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2362 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2363 2364 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->htile.offset, 2365 image->htile.size, 0xffffffff); 2366 2367 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META | 2368 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2369 RADV_CMD_FLAG_INV_VMEM_L1 | 2370 RADV_CMD_FLAG_INV_GLOBAL_L2; 2371} 2372 2373static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, 2374 struct radv_image *image, 2375 VkImageLayout src_layout, 2376 VkImageLayout dst_layout, 2377 VkImageSubresourceRange range, 2378 VkImageAspectFlags pending_clears) 2379{ 2380 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL && 2381 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) && 2382 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 && 2383 cmd_buffer->state.render_area.extent.width == image->extent.width && 2384 cmd_buffer->state.render_area.extent.height == image->extent.height) { 2385 /* The clear will initialize htile. */ 2386 return; 2387 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && 2388 radv_layout_has_htile(image, dst_layout)) { 2389 /* TODO: merge with the clear if applicable */ 2390 radv_initialize_htile(cmd_buffer, image); 2391 } else if (!radv_layout_has_htile(image, src_layout) && 2392 radv_layout_has_htile(image, dst_layout)) { 2393 radv_initialize_htile(cmd_buffer, image); 2394 } else if ((radv_layout_has_htile(image, src_layout) && 2395 !radv_layout_has_htile(image, dst_layout)) || 2396 (radv_layout_is_htile_compressed(image, src_layout) && 2397 !radv_layout_is_htile_compressed(image, dst_layout))) { 2398 2399 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; 2400 range.baseMipLevel = 0; 2401 range.levelCount = 1; 2402 2403 radv_decompress_depth_image_inplace(cmd_buffer, image, &range); 2404 } 2405} 2406 2407void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, 2408 struct radv_image *image, uint32_t value) 2409{ 2410 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2411 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2412 2413 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset, 2414 image->cmask.size, value); 2415 2416 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2417 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2418 RADV_CMD_FLAG_INV_VMEM_L1 | 2419 RADV_CMD_FLAG_INV_GLOBAL_L2; 2420} 2421 2422static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer, 2423 struct radv_image *image, 2424 VkImageLayout src_layout, 2425 VkImageLayout dst_layout, 2426 unsigned src_queue_mask, 2427 unsigned dst_queue_mask, 2428 VkImageSubresourceRange range, 2429 VkImageAspectFlags pending_clears) 2430{ 2431 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2432 if (image->fmask.size) 2433 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu); 2434 else 2435 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu); 2436 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && 2437 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { 2438 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2439 } 2440} 2441 2442void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, 2443 struct radv_image *image, uint32_t value) 2444{ 2445 2446 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2447 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2448 2449 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset, 2450 image->surface.dcc_size, value); 2451 2452 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2453 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2454 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2455 RADV_CMD_FLAG_INV_VMEM_L1 | 2456 RADV_CMD_FLAG_INV_GLOBAL_L2; 2457} 2458 2459static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer, 2460 struct radv_image *image, 2461 VkImageLayout src_layout, 2462 VkImageLayout dst_layout, 2463 unsigned src_queue_mask, 2464 unsigned dst_queue_mask, 2465 VkImageSubresourceRange range, 2466 VkImageAspectFlags pending_clears) 2467{ 2468 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2469 radv_initialize_dcc(cmd_buffer, image, 0x20202020u); 2470 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && 2471 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { 2472 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2473 } 2474} 2475 2476static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 2477 struct radv_image *image, 2478 VkImageLayout src_layout, 2479 VkImageLayout dst_layout, 2480 int src_family, 2481 int dst_family, 2482 VkImageSubresourceRange range, 2483 VkImageAspectFlags pending_clears) 2484{ 2485 if (image->exclusive && src_family != dst_family) { 2486 /* This is an acquire or a release operation and there will be 2487 * a corresponding release/acquire. Do the transition in the 2488 * most flexible queue. */ 2489 2490 assert(src_family == cmd_buffer->queue_family_index || 2491 dst_family == cmd_buffer->queue_family_index); 2492 2493 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) 2494 return; 2495 2496 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && 2497 (src_family == RADV_QUEUE_GENERAL || 2498 dst_family == RADV_QUEUE_GENERAL)) 2499 return; 2500 } 2501 2502 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family); 2503 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family); 2504 2505 if (image->htile.size) 2506 radv_handle_depth_image_transition(cmd_buffer, image, src_layout, 2507 dst_layout, range, pending_clears); 2508 2509 if (image->cmask.size) 2510 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout, 2511 dst_layout, src_queue_mask, 2512 dst_queue_mask, range, 2513 pending_clears); 2514 2515 if (image->surface.dcc_size) 2516 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout, 2517 dst_layout, src_queue_mask, 2518 dst_queue_mask, range, 2519 pending_clears); 2520} 2521 2522void radv_CmdPipelineBarrier( 2523 VkCommandBuffer commandBuffer, 2524 VkPipelineStageFlags srcStageMask, 2525 VkPipelineStageFlags destStageMask, 2526 VkBool32 byRegion, 2527 uint32_t memoryBarrierCount, 2528 const VkMemoryBarrier* pMemoryBarriers, 2529 uint32_t bufferMemoryBarrierCount, 2530 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2531 uint32_t imageMemoryBarrierCount, 2532 const VkImageMemoryBarrier* pImageMemoryBarriers) 2533{ 2534 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2535 VkAccessFlags src_flags = 0; 2536 VkAccessFlags dst_flags = 0; 2537 uint32_t b; 2538 for (uint32_t i = 0; i < memoryBarrierCount; i++) { 2539 src_flags |= pMemoryBarriers[i].srcAccessMask; 2540 dst_flags |= pMemoryBarriers[i].dstAccessMask; 2541 } 2542 2543 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) { 2544 src_flags |= pBufferMemoryBarriers[i].srcAccessMask; 2545 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask; 2546 } 2547 2548 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2549 src_flags |= pImageMemoryBarriers[i].srcAccessMask; 2550 dst_flags |= pImageMemoryBarriers[i].dstAccessMask; 2551 } 2552 2553 enum radv_cmd_flush_bits flush_bits = 0; 2554 for_each_bit(b, src_flags) { 2555 switch ((VkAccessFlagBits)(1 << b)) { 2556 case VK_ACCESS_SHADER_WRITE_BIT: 2557 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2558 break; 2559 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: 2560 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2561 break; 2562 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: 2563 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB; 2564 break; 2565 case VK_ACCESS_TRANSFER_WRITE_BIT: 2566 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2567 break; 2568 default: 2569 break; 2570 } 2571 } 2572 cmd_buffer->state.flush_bits |= flush_bits; 2573 2574 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2575 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2576 radv_handle_image_transition(cmd_buffer, image, 2577 pImageMemoryBarriers[i].oldLayout, 2578 pImageMemoryBarriers[i].newLayout, 2579 pImageMemoryBarriers[i].srcQueueFamilyIndex, 2580 pImageMemoryBarriers[i].dstQueueFamilyIndex, 2581 pImageMemoryBarriers[i].subresourceRange, 2582 0); 2583 } 2584 2585 flush_bits = 0; 2586 2587 for_each_bit(b, dst_flags) { 2588 switch ((VkAccessFlagBits)(1 << b)) { 2589 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: 2590 case VK_ACCESS_INDEX_READ_BIT: 2591 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: 2592 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; 2593 break; 2594 case VK_ACCESS_UNIFORM_READ_BIT: 2595 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; 2596 break; 2597 case VK_ACCESS_SHADER_READ_BIT: 2598 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2599 break; 2600 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: 2601 case VK_ACCESS_TRANSFER_READ_BIT: 2602 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: 2603 flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | RADV_CMD_FLAG_INV_GLOBAL_L2; 2604 default: 2605 break; 2606 } 2607 } 2608 2609 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2610 RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 2611 2612 cmd_buffer->state.flush_bits |= flush_bits; 2613} 2614 2615 2616static void write_event(struct radv_cmd_buffer *cmd_buffer, 2617 struct radv_event *event, 2618 VkPipelineStageFlags stageMask, 2619 unsigned value) 2620{ 2621 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2622 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2623 2624 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2625 2626 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12); 2627 2628 /* TODO: this is overkill. Probably should figure something out from 2629 * the stage mask. */ 2630 2631 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK) { 2632 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2633 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2634 EVENT_INDEX(5)); 2635 radeon_emit(cs, va); 2636 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2637 radeon_emit(cs, 2); 2638 radeon_emit(cs, 0); 2639 } 2640 2641 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2642 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2643 EVENT_INDEX(5)); 2644 radeon_emit(cs, va); 2645 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2646 radeon_emit(cs, value); 2647 radeon_emit(cs, 0); 2648 2649 assert(cmd_buffer->cs->cdw <= cdw_max); 2650} 2651 2652void radv_CmdSetEvent(VkCommandBuffer commandBuffer, 2653 VkEvent _event, 2654 VkPipelineStageFlags stageMask) 2655{ 2656 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2657 RADV_FROM_HANDLE(radv_event, event, _event); 2658 2659 write_event(cmd_buffer, event, stageMask, 1); 2660} 2661 2662void radv_CmdResetEvent(VkCommandBuffer commandBuffer, 2663 VkEvent _event, 2664 VkPipelineStageFlags stageMask) 2665{ 2666 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2667 RADV_FROM_HANDLE(radv_event, event, _event); 2668 2669 write_event(cmd_buffer, event, stageMask, 0); 2670} 2671 2672void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, 2673 uint32_t eventCount, 2674 const VkEvent* pEvents, 2675 VkPipelineStageFlags srcStageMask, 2676 VkPipelineStageFlags dstStageMask, 2677 uint32_t memoryBarrierCount, 2678 const VkMemoryBarrier* pMemoryBarriers, 2679 uint32_t bufferMemoryBarrierCount, 2680 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2681 uint32_t imageMemoryBarrierCount, 2682 const VkImageMemoryBarrier* pImageMemoryBarriers) 2683{ 2684 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2685 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2686 2687 for (unsigned i = 0; i < eventCount; ++i) { 2688 RADV_FROM_HANDLE(radv_event, event, pEvents[i]); 2689 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2690 2691 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2692 2693 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); 2694 2695 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); 2696 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); 2697 radeon_emit(cs, va); 2698 radeon_emit(cs, va >> 32); 2699 radeon_emit(cs, 1); /* reference value */ 2700 radeon_emit(cs, 0xffffffff); /* mask */ 2701 radeon_emit(cs, 4); /* poll interval */ 2702 2703 assert(cmd_buffer->cs->cdw <= cdw_max); 2704 } 2705 2706 2707 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2708 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2709 2710 radv_handle_image_transition(cmd_buffer, image, 2711 pImageMemoryBarriers[i].oldLayout, 2712 pImageMemoryBarriers[i].newLayout, 2713 pImageMemoryBarriers[i].srcQueueFamilyIndex, 2714 pImageMemoryBarriers[i].dstQueueFamilyIndex, 2715 pImageMemoryBarriers[i].subresourceRange, 2716 0); 2717 } 2718 2719 /* TODO: figure out how to do memory barriers without waiting */ 2720 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 2721 RADV_CMD_FLAG_INV_GLOBAL_L2 | 2722 RADV_CMD_FLAG_INV_VMEM_L1 | 2723 RADV_CMD_FLAG_INV_SMEM_L1; 2724} 2725