radv_cmd_buffer.c revision a74a4edc90a6a6fddfcf5e5d72c301bcf13ad125
1/* 2 * Copyright © 2016 Red Hat. 3 * Copyright © 2016 Bas Nieuwenhuizen 4 * 5 * based in part on anv driver which is: 6 * Copyright © 2015 Intel Corporation 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * IN THE SOFTWARE. 26 */ 27 28#include "radv_private.h" 29#include "radv_radeon_winsys.h" 30#include "radv_cs.h" 31#include "sid.h" 32#include "vk_format.h" 33#include "radv_meta.h" 34 35static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 36 struct radv_image *image, 37 VkImageLayout src_layout, 38 VkImageLayout dst_layout, 39 VkImageSubresourceRange range, 40 VkImageAspectFlags pending_clears); 41 42const struct radv_dynamic_state default_dynamic_state = { 43 .viewport = { 44 .count = 0, 45 }, 46 .scissor = { 47 .count = 0, 48 }, 49 .line_width = 1.0f, 50 .depth_bias = { 51 .bias = 0.0f, 52 .clamp = 0.0f, 53 .slope = 0.0f, 54 }, 55 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f }, 56 .depth_bounds = { 57 .min = 0.0f, 58 .max = 1.0f, 59 }, 60 .stencil_compare_mask = { 61 .front = ~0u, 62 .back = ~0u, 63 }, 64 .stencil_write_mask = { 65 .front = ~0u, 66 .back = ~0u, 67 }, 68 .stencil_reference = { 69 .front = 0u, 70 .back = 0u, 71 }, 72}; 73 74void 75radv_dynamic_state_copy(struct radv_dynamic_state *dest, 76 const struct radv_dynamic_state *src, 77 uint32_t copy_mask) 78{ 79 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { 80 dest->viewport.count = src->viewport.count; 81 typed_memcpy(dest->viewport.viewports, src->viewport.viewports, 82 src->viewport.count); 83 } 84 85 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) { 86 dest->scissor.count = src->scissor.count; 87 typed_memcpy(dest->scissor.scissors, src->scissor.scissors, 88 src->scissor.count); 89 } 90 91 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) 92 dest->line_width = src->line_width; 93 94 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) 95 dest->depth_bias = src->depth_bias; 96 97 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) 98 typed_memcpy(dest->blend_constants, src->blend_constants, 4); 99 100 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) 101 dest->depth_bounds = src->depth_bounds; 102 103 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) 104 dest->stencil_compare_mask = src->stencil_compare_mask; 105 106 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) 107 dest->stencil_write_mask = src->stencil_write_mask; 108 109 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) 110 dest->stencil_reference = src->stencil_reference; 111} 112 113static VkResult radv_create_cmd_buffer( 114 struct radv_device * device, 115 struct radv_cmd_pool * pool, 116 VkCommandBufferLevel level, 117 VkCommandBuffer* pCommandBuffer) 118{ 119 struct radv_cmd_buffer *cmd_buffer; 120 VkResult result; 121 122 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8, 123 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 124 if (cmd_buffer == NULL) 125 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 126 127 memset(cmd_buffer, 0, sizeof(*cmd_buffer)); 128 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; 129 cmd_buffer->device = device; 130 cmd_buffer->pool = pool; 131 cmd_buffer->level = level; 132 133 if (pool) { 134 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); 135 } else { 136 /* Init the pool_link so we can safefly call list_del when we destroy 137 * the command buffer 138 */ 139 list_inithead(&cmd_buffer->pool_link); 140 } 141 142 cmd_buffer->cs = device->ws->cs_create(device->ws, RING_GFX); 143 if (!cmd_buffer->cs) { 144 result = VK_ERROR_OUT_OF_HOST_MEMORY; 145 goto fail; 146 } 147 148 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer); 149 150 cmd_buffer->upload.offset = 0; 151 cmd_buffer->upload.size = 0; 152 list_inithead(&cmd_buffer->upload.list); 153 154 return VK_SUCCESS; 155 156fail: 157 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 158 159 return result; 160} 161 162static bool 163radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, 164 uint64_t min_needed) 165{ 166 uint64_t new_size; 167 struct radeon_winsys_bo *bo; 168 struct radv_cmd_buffer_upload *upload; 169 struct radv_device *device = cmd_buffer->device; 170 171 new_size = MAX2(min_needed, 16 * 1024); 172 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size); 173 174 bo = device->ws->buffer_create(device->ws, 175 new_size, 4096, 176 RADEON_DOMAIN_GTT, 177 RADEON_FLAG_CPU_ACCESS); 178 179 if (!bo) { 180 cmd_buffer->record_fail = true; 181 return false; 182 } 183 184 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); 185 if (cmd_buffer->upload.upload_bo) { 186 upload = malloc(sizeof(*upload)); 187 188 if (!upload) { 189 cmd_buffer->record_fail = true; 190 device->ws->buffer_destroy(bo); 191 return false; 192 } 193 194 memcpy(upload, &cmd_buffer->upload, sizeof(*upload)); 195 list_add(&upload->list, &cmd_buffer->upload.list); 196 } 197 198 cmd_buffer->upload.upload_bo = bo; 199 cmd_buffer->upload.size = new_size; 200 cmd_buffer->upload.offset = 0; 201 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo); 202 203 if (!cmd_buffer->upload.map) { 204 cmd_buffer->record_fail = true; 205 return false; 206 } 207 208 return true; 209} 210 211bool 212radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, 213 unsigned size, 214 unsigned alignment, 215 unsigned *out_offset, 216 void **ptr) 217{ 218 uint64_t offset = align(cmd_buffer->upload.offset, alignment); 219 if (offset + size > cmd_buffer->upload.size) { 220 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) 221 return false; 222 offset = 0; 223 } 224 225 *out_offset = offset; 226 *ptr = cmd_buffer->upload.map + offset; 227 228 cmd_buffer->upload.offset = offset + size; 229 return true; 230} 231 232bool 233radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer, 234 unsigned size, unsigned alignment, 235 const void *data, unsigned *out_offset) 236{ 237 uint8_t *ptr; 238 239 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment, 240 out_offset, (void **)&ptr)) 241 return false; 242 243 if (ptr) 244 memcpy(ptr, data, size); 245 246 return true; 247} 248 249static void 250radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer, 251 struct radv_pipeline *pipeline) 252{ 253 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8); 254 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control, 255 8); 256 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control); 257 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask); 258} 259 260static void 261radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer, 262 struct radv_pipeline *pipeline) 263{ 264 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds; 265 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control); 266 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control); 267 268 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control); 269 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2); 270} 271 272/* 12.4 fixed-point */ 273static unsigned radv_pack_float_12p4(float x) 274{ 275 return x <= 0 ? 0 : 276 x >= 4096 ? 0xffff : x * 16; 277} 278 279static void 280radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, 281 struct radv_pipeline *pipeline) 282{ 283 int num_samples = pipeline->graphics.ms.num_samples; 284 struct radv_multisample_state *ms = &pipeline->graphics.ms; 285 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; 286 287 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); 288 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]); 289 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]); 290 291 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa); 292 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); 293 294 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) 295 return; 296 297 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); 298 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); 299 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); 300 301 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); 302 303 uint32_t samples_offset; 304 void *samples_ptr; 305 void *src; 306 radv_cmd_buffer_upload_alloc(cmd_buffer, num_samples * 4 * 2, 256, &samples_offset, 307 &samples_ptr); 308 switch (num_samples) { 309 case 1: 310 src = cmd_buffer->device->sample_locations_1x; 311 break; 312 case 2: 313 src = cmd_buffer->device->sample_locations_2x; 314 break; 315 case 4: 316 src = cmd_buffer->device->sample_locations_4x; 317 break; 318 case 8: 319 src = cmd_buffer->device->sample_locations_8x; 320 break; 321 case 16: 322 src = cmd_buffer->device->sample_locations_16x; 323 break; 324 } 325 memcpy(samples_ptr, src, num_samples * 4 * 2); 326 327 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 328 va += samples_offset; 329 330 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B030_SPI_SHADER_USER_DATA_PS_0 + AC_USERDATA_PS_SAMPLE_POS * 4, 2); 331 radeon_emit(cmd_buffer->cs, va); 332 radeon_emit(cmd_buffer->cs, va >> 32); 333} 334 335static void 336radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer, 337 struct radv_pipeline *pipeline) 338{ 339 struct radv_raster_state *raster = &pipeline->graphics.raster; 340 341 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL, 342 raster->pa_cl_clip_cntl); 343 344 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0, 345 raster->spi_interp_control); 346 347 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2); 348 radeon_emit(cmd_buffer->cs, 0); 349 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | 350 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */ 351 352 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL, 353 raster->pa_su_vtx_cntl); 354 355 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL, 356 raster->pa_su_sc_mode_cntl); 357} 358 359static void 360radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer, 361 struct radv_pipeline *pipeline) 362{ 363 struct radeon_winsys *ws = cmd_buffer->device->ws; 364 struct radv_shader_variant *vs; 365 uint64_t va; 366 unsigned export_count; 367 unsigned clip_dist_mask, cull_dist_mask, total_mask; 368 369 assert (pipeline->shaders[MESA_SHADER_VERTEX]); 370 371 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 372 va = ws->buffer_get_va(vs->bo); 373 ws->cs_add_buffer(cmd_buffer->cs, vs->bo, 8); 374 375 clip_dist_mask = vs->info.vs.clip_dist_mask; 376 cull_dist_mask = vs->info.vs.cull_dist_mask; 377 total_mask = clip_dist_mask | cull_dist_mask; 378 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, 0); 379 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0); 380 381 export_count = MAX2(1, vs->info.vs.param_exports); 382 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, 383 S_0286C4_VS_EXPORT_COUNT(export_count - 1)); 384 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT, 385 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | 386 S_02870C_POS1_EXPORT_FORMAT(vs->info.vs.pos_exports > 1 ? 387 V_02870C_SPI_SHADER_4COMP : 388 V_02870C_SPI_SHADER_NONE) | 389 S_02870C_POS2_EXPORT_FORMAT(vs->info.vs.pos_exports > 2 ? 390 V_02870C_SPI_SHADER_4COMP : 391 V_02870C_SPI_SHADER_NONE) | 392 S_02870C_POS3_EXPORT_FORMAT(vs->info.vs.pos_exports > 3 ? 393 V_02870C_SPI_SHADER_4COMP : 394 V_02870C_SPI_SHADER_NONE)); 395 396 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4); 397 radeon_emit(cmd_buffer->cs, va >> 8); 398 radeon_emit(cmd_buffer->cs, va >> 40); 399 radeon_emit(cmd_buffer->cs, vs->rsrc1); 400 radeon_emit(cmd_buffer->cs, vs->rsrc2); 401 402 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL, 403 S_028818_VTX_W0_FMT(1) | 404 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 405 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 406 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 407 408 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, 409 S_02881C_USE_VTX_POINT_SIZE(vs->info.vs.writes_pointsize) | 410 S_02881C_VS_OUT_MISC_VEC_ENA(vs->info.vs.writes_pointsize) | 411 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) | 412 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) | 413 pipeline->graphics.raster.pa_cl_vs_out_cntl | 414 cull_dist_mask << 8 | 415 clip_dist_mask); 416 417} 418 419 420 421static void 422radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer, 423 struct radv_pipeline *pipeline) 424{ 425 struct radeon_winsys *ws = cmd_buffer->device->ws; 426 struct radv_shader_variant *ps, *vs; 427 uint64_t va; 428 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); 429 struct radv_blend_state *blend = &pipeline->graphics.blend; 430 unsigned ps_offset = 0; 431 unsigned z_order; 432 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]); 433 434 ps = pipeline->shaders[MESA_SHADER_FRAGMENT]; 435 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 436 va = ws->buffer_get_va(ps->bo); 437 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8); 438 439 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4); 440 radeon_emit(cmd_buffer->cs, va >> 8); 441 radeon_emit(cmd_buffer->cs, va >> 40); 442 radeon_emit(cmd_buffer->cs, ps->rsrc1); 443 radeon_emit(cmd_buffer->cs, ps->rsrc2); 444 445 if (ps->info.fs.early_fragment_test || !ps->info.fs.writes_memory) 446 z_order = V_02880C_EARLY_Z_THEN_LATE_Z; 447 else 448 z_order = V_02880C_LATE_Z; 449 450 451 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL, 452 S_02880C_Z_EXPORT_ENABLE(ps->info.fs.writes_z) | 453 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.fs.writes_stencil) | 454 S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) | 455 S_02880C_Z_ORDER(z_order) | 456 S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) | 457 S_02880C_EXEC_ON_HIER_FAIL(ps->info.fs.writes_memory) | 458 S_02880C_EXEC_ON_NOOP(ps->info.fs.writes_memory)); 459 460 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA, 461 ps->config.spi_ps_input_ena); 462 463 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR, 464 ps->config.spi_ps_input_addr); 465 466 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(0); 467 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL, 468 S_0286D8_NUM_INTERP(ps->info.fs.num_interp)); 469 470 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 471 472 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT, 473 ps->info.fs.writes_stencil ? V_028710_SPI_SHADER_32_GR : 474 ps->info.fs.writes_z ? V_028710_SPI_SHADER_32_R : 475 V_028710_SPI_SHADER_ZERO); 476 477 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); 478 479 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); 480 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); 481 482 if (ps->info.fs.has_pcoord) { 483 unsigned val; 484 val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20); 485 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 486 ps_offset = 1; 487 } 488 489 for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) { 490 unsigned vs_offset, flat_shade; 491 unsigned val; 492 493 if (!(ps->info.fs.input_mask & (1u << i))) 494 continue; 495 496 497 if (!(vs->info.vs.export_mask & (1u << i))) { 498 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, 499 S_028644_OFFSET(0x20)); 500 ++ps_offset; 501 continue; 502 } 503 504 vs_offset = util_bitcount(vs->info.vs.export_mask & ((1u << i) - 1)); 505 flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset)); 506 507 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade); 508 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 509 ++ps_offset; 510 } 511} 512 513static void 514radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer, 515 struct radv_pipeline *pipeline) 516{ 517 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline) 518 return; 519 520 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline); 521 radv_emit_graphics_blend_state(cmd_buffer, pipeline); 522 radv_emit_graphics_raster_state(cmd_buffer, pipeline); 523 radv_update_multisample_state(cmd_buffer, pipeline); 524 radv_emit_vertex_shader(cmd_buffer, pipeline); 525 radv_emit_fragment_shader(cmd_buffer, pipeline); 526 527 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 528 pipeline->graphics.prim_restart_enable); 529 530 cmd_buffer->state.emitted_pipeline = pipeline; 531} 532 533static void 534radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer) 535{ 536 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count, 537 cmd_buffer->state.dynamic.viewport.viewports); 538} 539 540static void 541radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) 542{ 543 uint32_t count = cmd_buffer->state.dynamic.scissor.count; 544 si_write_scissors(cmd_buffer->cs, 0, count, 545 cmd_buffer->state.dynamic.scissor.scissors); 546 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, 547 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0)); 548} 549 550static void 551radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, 552 int index, 553 struct radv_color_buffer_info *cb) 554{ 555 bool is_vi = cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= VI; 556 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); 557 radeon_emit(cmd_buffer->cs, cb->cb_color_base); 558 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch); 559 radeon_emit(cmd_buffer->cs, cb->cb_color_slice); 560 radeon_emit(cmd_buffer->cs, cb->cb_color_view); 561 radeon_emit(cmd_buffer->cs, cb->cb_color_info); 562 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); 563 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); 564 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); 565 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice); 566 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); 567 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice); 568 569 if (is_vi) { /* DCC BASE */ 570 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base); 571 } 572} 573 574static void 575radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, 576 struct radv_ds_buffer_info *ds, 577 struct radv_image *image, 578 VkImageLayout layout) 579{ 580 uint32_t db_z_info = ds->db_z_info; 581 582 if (!radv_layout_has_htile(image, layout)) 583 db_z_info &= C_028040_TILE_SURFACE_ENABLE; 584 585 if (!radv_layout_can_expclear(image, layout)) 586 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR; 587 588 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); 589 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); 590 591 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9); 592 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */ 593 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */ 594 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ 595 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */ 596 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */ 597 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */ 598 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */ 599 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ 600 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ 601 602 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); 603 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 604 ds->pa_su_poly_offset_db_fmt_cntl); 605} 606 607/* 608 * To hw resolve multisample images both src and dst need to have the same 609 * micro tiling mode. However we don't always know in advance when creating 610 * the images. This function gets called if we have a resolve attachment, 611 * and tests if the attachment image has the same tiling mode, then it 612 * checks if the generated framebuffer data has the same tiling mode, and 613 * updates it if not. 614 */ 615static void radv_set_optimal_micro_tile_mode(struct radv_device *device, 616 struct radv_attachment_info *att, 617 uint32_t micro_tile_mode) 618{ 619 struct radv_image *image = att->attachment->image; 620 uint32_t tile_mode_index; 621 if (image->surface.nsamples <= 1) 622 return; 623 624 if (image->surface.micro_tile_mode != micro_tile_mode) { 625 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode); 626 } 627 628 if (att->cb.micro_tile_mode != micro_tile_mode) { 629 tile_mode_index = image->surface.tiling_index[0]; 630 631 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX; 632 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index); 633 att->cb.micro_tile_mode = micro_tile_mode; 634 } 635} 636 637void 638radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 639 struct radv_image *image, 640 VkClearDepthStencilValue ds_clear_value, 641 VkImageAspectFlags aspects) 642{ 643 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 644 va += image->offset + image->clear_value_offset; 645 unsigned reg_offset = 0, reg_count = 0; 646 647 if (!image->htile.size || !aspects) 648 return; 649 650 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { 651 ++reg_count; 652 } else { 653 ++reg_offset; 654 va += 4; 655 } 656 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 657 ++reg_count; 658 659 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 660 661 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); 662 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 663 S_370_WR_CONFIRM(1) | 664 S_370_ENGINE_SEL(V_370_PFP)); 665 radeon_emit(cmd_buffer->cs, va); 666 radeon_emit(cmd_buffer->cs, va >> 32); 667 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 668 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); 669 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 670 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); 671 672 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count); 673 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 674 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */ 675 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 676 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */ 677} 678 679static void 680radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 681 struct radv_image *image) 682{ 683 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 684 va += image->offset + image->clear_value_offset; 685 686 if (!image->htile.size) 687 return; 688 689 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 690 691 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 692 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 693 COPY_DATA_DST_SEL(COPY_DATA_REG) | 694 COPY_DATA_COUNT_SEL); 695 radeon_emit(cmd_buffer->cs, va); 696 radeon_emit(cmd_buffer->cs, va >> 32); 697 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2); 698 radeon_emit(cmd_buffer->cs, 0); 699 700 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 701 radeon_emit(cmd_buffer->cs, 0); 702} 703 704void 705radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 706 struct radv_image *image, 707 int idx, 708 uint32_t color_values[2]) 709{ 710 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 711 va += image->offset + image->clear_value_offset; 712 713 if (!image->cmask.size && !image->surface.dcc_size) 714 return; 715 716 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 717 718 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); 719 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 720 S_370_WR_CONFIRM(1) | 721 S_370_ENGINE_SEL(V_370_PFP)); 722 radeon_emit(cmd_buffer->cs, va); 723 radeon_emit(cmd_buffer->cs, va >> 32); 724 radeon_emit(cmd_buffer->cs, color_values[0]); 725 radeon_emit(cmd_buffer->cs, color_values[1]); 726 727 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2); 728 radeon_emit(cmd_buffer->cs, color_values[0]); 729 radeon_emit(cmd_buffer->cs, color_values[1]); 730} 731 732static void 733radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 734 struct radv_image *image, 735 int idx) 736{ 737 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 738 va += image->offset + image->clear_value_offset; 739 740 if (!image->cmask.size && !image->surface.dcc_size) 741 return; 742 743 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c; 744 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 745 746 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 747 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 748 COPY_DATA_DST_SEL(COPY_DATA_REG) | 749 COPY_DATA_COUNT_SEL); 750 radeon_emit(cmd_buffer->cs, va); 751 radeon_emit(cmd_buffer->cs, va >> 32); 752 radeon_emit(cmd_buffer->cs, reg >> 2); 753 radeon_emit(cmd_buffer->cs, 0); 754 755 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 756 radeon_emit(cmd_buffer->cs, 0); 757} 758 759void 760radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) 761{ 762 int i; 763 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; 764 const struct radv_subpass *subpass = cmd_buffer->state.subpass; 765 int dst_resolve_micro_tile_mode = -1; 766 767 if (subpass->has_resolve) { 768 uint32_t a = subpass->resolve_attachments[0].attachment; 769 const struct radv_image *image = framebuffer->attachments[a].attachment->image; 770 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode; 771 } 772 for (i = 0; i < subpass->color_count; ++i) { 773 int idx = subpass->color_attachments[i].attachment; 774 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 775 776 if (dst_resolve_micro_tile_mode != -1) { 777 radv_set_optimal_micro_tile_mode(cmd_buffer->device, 778 att, dst_resolve_micro_tile_mode); 779 } 780 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 781 782 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); 783 radv_emit_fb_color_state(cmd_buffer, i, &att->cb); 784 785 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i); 786 } 787 788 for (i = subpass->color_count; i < 8; i++) 789 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 790 S_028C70_FORMAT(V_028C70_COLOR_INVALID)); 791 792 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 793 int idx = subpass->depth_stencil_attachment.attachment; 794 VkImageLayout layout = subpass->depth_stencil_attachment.layout; 795 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 796 struct radv_image *image = att->attachment->image; 797 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 798 799 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); 800 801 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { 802 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 803 cmd_buffer->state.offset_scale = att->ds.offset_scale; 804 } 805 radv_load_depth_clear_regs(cmd_buffer, image); 806 } else { 807 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); 808 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ 809 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ 810 } 811 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR, 812 S_028208_BR_X(framebuffer->width) | 813 S_028208_BR_Y(framebuffer->height)); 814} 815 816void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) 817{ 818 uint32_t db_count_control; 819 820 if(!cmd_buffer->state.active_occlusion_queries) { 821 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 822 db_count_control = 0; 823 } else { 824 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1); 825 } 826 } else { 827 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 828 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 829 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */ 830 S_028004_ZPASS_ENABLE(1) | 831 S_028004_SLICE_EVEN_ENABLE(1) | 832 S_028004_SLICE_ODD_ENABLE(1); 833 } else { 834 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 835 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */ 836 } 837 } 838 839 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); 840} 841 842static void 843radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) 844{ 845 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; 846 847 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) { 848 unsigned width = cmd_buffer->state.dynamic.line_width * 8; 849 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL, 850 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF))); 851 } 852 853 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) { 854 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4); 855 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4); 856 } 857 858 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | 859 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | 860 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) { 861 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2); 862 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) | 863 S_028430_STENCILMASK(d->stencil_compare_mask.front) | 864 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) | 865 S_028430_STENCILOPVAL(1)); 866 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) | 867 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) | 868 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) | 869 S_028434_STENCILOPVAL_BF(1)); 870 } 871 872 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 873 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) { 874 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min)); 875 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max)); 876 } 877 878 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 879 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { 880 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster; 881 unsigned slope = fui(d->depth_bias.slope * 16.0f); 882 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale); 883 884 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) { 885 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5); 886 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */ 887 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */ 888 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */ 889 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */ 890 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */ 891 } 892 } 893 894 cmd_buffer->state.dirty = 0; 895} 896 897static void 898radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 899 VkShaderStageFlags stages, 900 struct radv_descriptor_set *set, 901 unsigned idx) 902{ 903 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) { 904 radeon_set_sh_reg_seq(cmd_buffer->cs, 905 R_00B030_SPI_SHADER_USER_DATA_PS_0 + 8 * idx, 2); 906 radeon_emit(cmd_buffer->cs, set->va); 907 radeon_emit(cmd_buffer->cs, set->va >> 32); 908 } 909 910 if (stages & VK_SHADER_STAGE_VERTEX_BIT) { 911 radeon_set_sh_reg_seq(cmd_buffer->cs, 912 R_00B130_SPI_SHADER_USER_DATA_VS_0 + 8 * idx, 2); 913 radeon_emit(cmd_buffer->cs, set->va); 914 radeon_emit(cmd_buffer->cs, set->va >> 32); 915 } 916 917 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) { 918 radeon_set_sh_reg_seq(cmd_buffer->cs, 919 R_00B900_COMPUTE_USER_DATA_0 + 8 * idx, 2); 920 radeon_emit(cmd_buffer->cs, set->va); 921 radeon_emit(cmd_buffer->cs, set->va >> 32); 922 } 923} 924 925static void 926radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, 927 VkShaderStageFlags stages) 928{ 929 unsigned i; 930 if (!cmd_buffer->state.descriptors_dirty) 931 return; 932 933 for (i = 0; i < MAX_SETS; i++) { 934 if (!(cmd_buffer->state.descriptors_dirty & (1 << i))) 935 continue; 936 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i]; 937 if (!set) 938 continue; 939 940 radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i); 941 } 942 cmd_buffer->state.descriptors_dirty = 0; 943} 944 945static void 946radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, 947 struct radv_pipeline *pipeline, 948 VkShaderStageFlags stages) 949{ 950 struct radv_pipeline_layout *layout = pipeline->layout; 951 unsigned offset; 952 void *ptr; 953 uint64_t va; 954 955 stages &= cmd_buffer->push_constant_stages; 956 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count)) 957 return; 958 959 radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + 960 16 * layout->dynamic_offset_count, 961 256, &offset, &ptr); 962 963 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); 964 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers, 965 16 * layout->dynamic_offset_count); 966 967 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 968 va += offset; 969 970 if (stages & VK_SHADER_STAGE_VERTEX_BIT) { 971 radeon_set_sh_reg_seq(cmd_buffer->cs, 972 R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_PUSH_CONST_DYN * 4, 2); 973 radeon_emit(cmd_buffer->cs, va); 974 radeon_emit(cmd_buffer->cs, va >> 32); 975 } 976 977 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) { 978 radeon_set_sh_reg_seq(cmd_buffer->cs, 979 R_00B030_SPI_SHADER_USER_DATA_PS_0 + AC_USERDATA_PUSH_CONST_DYN * 4, 2); 980 radeon_emit(cmd_buffer->cs, va); 981 radeon_emit(cmd_buffer->cs, va >> 32); 982 } 983 984 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) { 985 radeon_set_sh_reg_seq(cmd_buffer->cs, 986 R_00B900_COMPUTE_USER_DATA_0 + AC_USERDATA_PUSH_CONST_DYN * 4, 2); 987 radeon_emit(cmd_buffer->cs, va); 988 radeon_emit(cmd_buffer->cs, va >> 32); 989 } 990 991 cmd_buffer->push_constant_stages &= ~stages; 992} 993 994static void 995radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer) 996{ 997 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; 998 struct radv_device *device = cmd_buffer->device; 999 uint32_t ia_multi_vgt_param; 1000 uint32_t ls_hs_config = 0; 1001 1002 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1003 4096); 1004 1005 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) && 1006 cmd_buffer->state.pipeline->num_vertex_attribs) { 1007 unsigned vb_offset; 1008 void *vb_ptr; 1009 uint32_t i = 0; 1010 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs; 1011 uint64_t va; 1012 1013 /* allocate some descriptor state for vertex buffers */ 1014 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256, 1015 &vb_offset, &vb_ptr); 1016 1017 for (i = 0; i < num_attribs; i++) { 1018 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; 1019 uint32_t offset; 1020 int vb = cmd_buffer->state.pipeline->va_binding[i]; 1021 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; 1022 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; 1023 1024 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 1025 va = device->ws->buffer_get_va(buffer->bo); 1026 1027 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i]; 1028 va += offset + buffer->offset; 1029 desc[0] = va; 1030 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); 1031 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class <= CIK && stride) 1032 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1; 1033 else 1034 desc[2] = buffer->size - offset; 1035 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i]; 1036 } 1037 1038 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1039 va += vb_offset; 1040 radeon_set_sh_reg_seq(cmd_buffer->cs, 1041 R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_VS_VERTEX_BUFFERS * 4, 2); 1042 radeon_emit(cmd_buffer->cs, va); 1043 radeon_emit(cmd_buffer->cs, va >> 32); 1044 1045 } 1046 1047 cmd_buffer->state.vertex_descriptors_dirty = false; 1048 cmd_buffer->state.vb_dirty = 0; 1049 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) 1050 radv_emit_graphics_pipeline(cmd_buffer, pipeline); 1051 1052 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS) 1053 radv_emit_framebuffer_state(cmd_buffer); 1054 1055 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT)) 1056 radv_emit_viewport(cmd_buffer); 1057 1058 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) 1059 radv_emit_scissor(cmd_buffer); 1060 1061 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) { 1062 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, 0); 1063 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer); 1064 1065 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class >= CIK) { 1066 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); 1067 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config); 1068 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim); 1069 } else { 1070 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim); 1071 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param); 1072 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config); 1073 } 1074 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out); 1075 } 1076 1077 radv_cmd_buffer_flush_dynamic_state(cmd_buffer); 1078 1079 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS); 1080 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline, 1081 VK_SHADER_STAGE_ALL_GRAPHICS); 1082 1083 assert(cmd_buffer->cs->cdw <= cdw_max); 1084 1085 si_emit_cache_flush(cmd_buffer); 1086} 1087 1088static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, 1089 VkPipelineStageFlags src_stage_mask) 1090{ 1091 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | 1092 VK_PIPELINE_STAGE_TRANSFER_BIT | 1093 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1094 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1095 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH; 1096 } 1097 1098 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 1099 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 1100 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 1101 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 1102 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | 1103 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | 1104 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | 1105 VK_PIPELINE_STAGE_TRANSFER_BIT | 1106 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1107 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | 1108 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1109 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 1110 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | 1111 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | 1112 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | 1113 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) { 1114 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; 1115 } 1116} 1117 1118static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier) 1119{ 1120 radv_stage_flush(cmd_buffer, barrier->src_stage_mask); 1121 1122 /* TODO: actual cache flushes */ 1123} 1124 1125static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, 1126 VkAttachmentReference att) 1127{ 1128 unsigned idx = att.attachment; 1129 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; 1130 VkImageSubresourceRange range; 1131 range.aspectMask = 0; 1132 range.baseMipLevel = view->base_mip; 1133 range.levelCount = 1; 1134 range.baseArrayLayer = view->base_layer; 1135 range.layerCount = cmd_buffer->state.framebuffer->layers; 1136 1137 radv_handle_image_transition(cmd_buffer, 1138 view->image, 1139 cmd_buffer->state.attachments[idx].current_layout, 1140 att.layout, range, 1141 cmd_buffer->state.attachments[idx].pending_clear_aspects); 1142 1143 cmd_buffer->state.attachments[idx].current_layout = att.layout; 1144 1145 1146} 1147 1148void 1149radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, 1150 const struct radv_subpass *subpass, bool transitions) 1151{ 1152 if (transitions) { 1153 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); 1154 1155 for (unsigned i = 0; i < subpass->color_count; ++i) { 1156 radv_handle_subpass_image_transition(cmd_buffer, 1157 subpass->color_attachments[i]); 1158 } 1159 1160 for (unsigned i = 0; i < subpass->input_count; ++i) { 1161 radv_handle_subpass_image_transition(cmd_buffer, 1162 subpass->input_attachments[i]); 1163 } 1164 1165 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 1166 radv_handle_subpass_image_transition(cmd_buffer, 1167 subpass->depth_stencil_attachment); 1168 } 1169 } 1170 1171 cmd_buffer->state.subpass = subpass; 1172 1173 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS; 1174} 1175 1176static void 1177radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, 1178 struct radv_render_pass *pass, 1179 const VkRenderPassBeginInfo *info) 1180{ 1181 struct radv_cmd_state *state = &cmd_buffer->state; 1182 1183 if (pass->attachment_count == 0) { 1184 state->attachments = NULL; 1185 return; 1186 } 1187 1188 state->attachments = vk_alloc(&cmd_buffer->pool->alloc, 1189 pass->attachment_count * 1190 sizeof(state->attachments[0]), 1191 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1192 if (state->attachments == NULL) { 1193 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ 1194 abort(); 1195 } 1196 1197 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 1198 struct radv_render_pass_attachment *att = &pass->attachments[i]; 1199 VkImageAspectFlags att_aspects = vk_format_aspects(att->format); 1200 VkImageAspectFlags clear_aspects = 0; 1201 1202 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { 1203 /* color attachment */ 1204 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1205 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; 1206 } 1207 } else { 1208 /* depthstencil attachment */ 1209 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && 1210 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1211 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 1212 } 1213 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && 1214 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1215 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 1216 } 1217 } 1218 1219 state->attachments[i].pending_clear_aspects = clear_aspects; 1220 if (clear_aspects && info) { 1221 assert(info->clearValueCount > i); 1222 state->attachments[i].clear_value = info->pClearValues[i]; 1223 } 1224 1225 state->attachments[i].current_layout = att->initial_layout; 1226 } 1227} 1228 1229VkResult radv_AllocateCommandBuffers( 1230 VkDevice _device, 1231 const VkCommandBufferAllocateInfo *pAllocateInfo, 1232 VkCommandBuffer *pCommandBuffers) 1233{ 1234 RADV_FROM_HANDLE(radv_device, device, _device); 1235 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool); 1236 1237 VkResult result = VK_SUCCESS; 1238 uint32_t i; 1239 1240 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { 1241 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level, 1242 &pCommandBuffers[i]); 1243 if (result != VK_SUCCESS) 1244 break; 1245 } 1246 1247 if (result != VK_SUCCESS) 1248 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool, 1249 i, pCommandBuffers); 1250 1251 return result; 1252} 1253 1254static void 1255radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) 1256{ 1257 list_del(&cmd_buffer->pool_link); 1258 1259 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1260 &cmd_buffer->upload.list, list) { 1261 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1262 list_del(&up->list); 1263 free(up); 1264 } 1265 1266 if (cmd_buffer->upload.upload_bo) 1267 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); 1268 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); 1269 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 1270} 1271 1272void radv_FreeCommandBuffers( 1273 VkDevice device, 1274 VkCommandPool commandPool, 1275 uint32_t commandBufferCount, 1276 const VkCommandBuffer *pCommandBuffers) 1277{ 1278 for (uint32_t i = 0; i < commandBufferCount; i++) { 1279 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]); 1280 1281 if (cmd_buffer) 1282 radv_cmd_buffer_destroy(cmd_buffer); 1283 } 1284} 1285 1286static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) 1287{ 1288 1289 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); 1290 1291 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1292 &cmd_buffer->upload.list, list) { 1293 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1294 list_del(&up->list); 1295 free(up); 1296 } 1297 1298 if (cmd_buffer->upload.upload_bo) 1299 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, 1300 cmd_buffer->upload.upload_bo, 8); 1301 cmd_buffer->upload.offset = 0; 1302 1303 cmd_buffer->record_fail = false; 1304} 1305 1306VkResult radv_ResetCommandBuffer( 1307 VkCommandBuffer commandBuffer, 1308 VkCommandBufferResetFlags flags) 1309{ 1310 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1311 radv_reset_cmd_buffer(cmd_buffer); 1312 return VK_SUCCESS; 1313} 1314 1315VkResult radv_BeginCommandBuffer( 1316 VkCommandBuffer commandBuffer, 1317 const VkCommandBufferBeginInfo *pBeginInfo) 1318{ 1319 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1320 radv_reset_cmd_buffer(cmd_buffer); 1321 1322 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state)); 1323 1324 /* setup initial configuration into command buffer */ 1325 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 1326 /* Flush read caches at the beginning of CS not flushed by the kernel. */ 1327 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_ICACHE | 1328 RADV_CMD_FLAG_PS_PARTIAL_FLUSH | 1329 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1330 RADV_CMD_FLAG_INV_VMEM_L1 | 1331 RADV_CMD_FLAG_INV_SMEM_L1 | 1332 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 1333 RADV_CMD_FLAG_INV_GLOBAL_L2; 1334 si_init_config(&cmd_buffer->device->instance->physicalDevice, cmd_buffer); 1335 radv_set_db_count_control(cmd_buffer); 1336 si_emit_cache_flush(cmd_buffer); 1337 } 1338 1339 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 1340 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer); 1341 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); 1342 1343 struct radv_subpass *subpass = 1344 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; 1345 1346 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); 1347 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); 1348 } 1349 1350 return VK_SUCCESS; 1351} 1352 1353void radv_CmdBindVertexBuffers( 1354 VkCommandBuffer commandBuffer, 1355 uint32_t firstBinding, 1356 uint32_t bindingCount, 1357 const VkBuffer* pBuffers, 1358 const VkDeviceSize* pOffsets) 1359{ 1360 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1361 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings; 1362 1363 /* We have to defer setting up vertex buffer since we need the buffer 1364 * stride from the pipeline. */ 1365 1366 assert(firstBinding + bindingCount < MAX_VBS); 1367 for (uint32_t i = 0; i < bindingCount; i++) { 1368 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]); 1369 vb[firstBinding + i].offset = pOffsets[i]; 1370 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i); 1371 } 1372} 1373 1374void radv_CmdBindIndexBuffer( 1375 VkCommandBuffer commandBuffer, 1376 VkBuffer buffer, 1377 VkDeviceSize offset, 1378 VkIndexType indexType) 1379{ 1380 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1381 1382 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer); 1383 cmd_buffer->state.index_offset = offset; 1384 cmd_buffer->state.index_type = indexType; /* vk matches hw */ 1385 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; 1386 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8); 1387} 1388 1389 1390void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, 1391 struct radv_descriptor_set *set, 1392 unsigned idx) 1393{ 1394 struct radeon_winsys *ws = cmd_buffer->device->ws; 1395 1396 cmd_buffer->state.descriptors[idx] = set; 1397 cmd_buffer->state.descriptors_dirty |= (1 << idx); 1398 if (!set) 1399 return; 1400 1401 for (unsigned j = 0; j < set->layout->buffer_count; ++j) 1402 if (set->descriptors[j]) 1403 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7); 1404 1405 if(set->bo) 1406 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8); 1407} 1408 1409void radv_CmdBindDescriptorSets( 1410 VkCommandBuffer commandBuffer, 1411 VkPipelineBindPoint pipelineBindPoint, 1412 VkPipelineLayout _layout, 1413 uint32_t firstSet, 1414 uint32_t descriptorSetCount, 1415 const VkDescriptorSet* pDescriptorSets, 1416 uint32_t dynamicOffsetCount, 1417 const uint32_t* pDynamicOffsets) 1418{ 1419 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1420 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout); 1421 unsigned dyn_idx = 0; 1422 1423 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1424 MAX_SETS * 4 * 6); 1425 1426 for (unsigned i = 0; i < descriptorSetCount; ++i) { 1427 unsigned idx = i + firstSet; 1428 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); 1429 radv_bind_descriptor_set(cmd_buffer, set, idx); 1430 1431 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { 1432 unsigned idx = j + layout->set[i].dynamic_offset_start; 1433 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4; 1434 assert(dyn_idx < dynamicOffsetCount); 1435 1436 struct radv_descriptor_range *range = set->dynamic_descriptors + j; 1437 uint64_t va = range->va + pDynamicOffsets[dyn_idx]; 1438 dst[0] = va; 1439 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); 1440 dst[2] = range->size; 1441 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | 1442 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | 1443 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | 1444 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | 1445 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | 1446 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); 1447 cmd_buffer->push_constant_stages |= 1448 set->layout->dynamic_shader_stages; 1449 } 1450 } 1451 1452 assert(cmd_buffer->cs->cdw <= cdw_max); 1453} 1454 1455void radv_CmdPushConstants(VkCommandBuffer commandBuffer, 1456 VkPipelineLayout layout, 1457 VkShaderStageFlags stageFlags, 1458 uint32_t offset, 1459 uint32_t size, 1460 const void* pValues) 1461{ 1462 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1463 memcpy(cmd_buffer->push_constants + offset, pValues, size); 1464 cmd_buffer->push_constant_stages |= stageFlags; 1465} 1466 1467VkResult radv_EndCommandBuffer( 1468 VkCommandBuffer commandBuffer) 1469{ 1470 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1471 1472 si_emit_cache_flush(cmd_buffer); 1473 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) || 1474 cmd_buffer->record_fail) 1475 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1476 return VK_SUCCESS; 1477} 1478 1479static void 1480radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) 1481{ 1482 struct radeon_winsys *ws = cmd_buffer->device->ws; 1483 struct radv_shader_variant *compute_shader; 1484 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 1485 uint64_t va; 1486 1487 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) 1488 return; 1489 1490 cmd_buffer->state.emitted_compute_pipeline = pipeline; 1491 1492 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 1493 va = ws->buffer_get_va(compute_shader->bo); 1494 1495 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8); 1496 1497 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 16); 1498 1499 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2); 1500 radeon_emit(cmd_buffer->cs, va >> 8); 1501 radeon_emit(cmd_buffer->cs, va >> 40); 1502 1503 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2); 1504 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1); 1505 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2); 1506 1507 /* change these once we have scratch support */ 1508 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE, 1509 S_00B860_WAVES(32) | S_00B860_WAVESIZE(0)); 1510 1511 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 1512 radeon_emit(cmd_buffer->cs, 1513 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0])); 1514 radeon_emit(cmd_buffer->cs, 1515 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1])); 1516 radeon_emit(cmd_buffer->cs, 1517 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2])); 1518 1519 assert(cmd_buffer->cs->cdw <= cdw_max); 1520} 1521 1522 1523void radv_CmdBindPipeline( 1524 VkCommandBuffer commandBuffer, 1525 VkPipelineBindPoint pipelineBindPoint, 1526 VkPipeline _pipeline) 1527{ 1528 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1529 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline); 1530 1531 for (unsigned i = 0; i < MAX_SETS; i++) { 1532 if (cmd_buffer->state.descriptors[i]) 1533 cmd_buffer->state.descriptors_dirty |= (1 << i); 1534 } 1535 1536 switch (pipelineBindPoint) { 1537 case VK_PIPELINE_BIND_POINT_COMPUTE: 1538 cmd_buffer->state.compute_pipeline = pipeline; 1539 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 1540 break; 1541 case VK_PIPELINE_BIND_POINT_GRAPHICS: 1542 cmd_buffer->state.pipeline = pipeline; 1543 cmd_buffer->state.vertex_descriptors_dirty = true; 1544 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1545 cmd_buffer->push_constant_stages |= pipeline->active_stages; 1546 1547 /* Apply the dynamic state from the pipeline */ 1548 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask; 1549 radv_dynamic_state_copy(&cmd_buffer->state.dynamic, 1550 &pipeline->dynamic_state, 1551 pipeline->dynamic_state_mask); 1552 break; 1553 default: 1554 assert(!"invalid bind point"); 1555 break; 1556 } 1557} 1558 1559void radv_CmdSetViewport( 1560 VkCommandBuffer commandBuffer, 1561 uint32_t firstViewport, 1562 uint32_t viewportCount, 1563 const VkViewport* pViewports) 1564{ 1565 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1566 1567 const uint32_t total_count = firstViewport + viewportCount; 1568 if (cmd_buffer->state.dynamic.viewport.count < total_count) 1569 cmd_buffer->state.dynamic.viewport.count = total_count; 1570 1571 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport, 1572 pViewports, viewportCount * sizeof(*pViewports)); 1573 1574 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT; 1575} 1576 1577void radv_CmdSetScissor( 1578 VkCommandBuffer commandBuffer, 1579 uint32_t firstScissor, 1580 uint32_t scissorCount, 1581 const VkRect2D* pScissors) 1582{ 1583 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1584 1585 const uint32_t total_count = firstScissor + scissorCount; 1586 if (cmd_buffer->state.dynamic.scissor.count < total_count) 1587 cmd_buffer->state.dynamic.scissor.count = total_count; 1588 1589 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor, 1590 pScissors, scissorCount * sizeof(*pScissors)); 1591 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR; 1592} 1593 1594void radv_CmdSetLineWidth( 1595 VkCommandBuffer commandBuffer, 1596 float lineWidth) 1597{ 1598 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1599 cmd_buffer->state.dynamic.line_width = lineWidth; 1600 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; 1601} 1602 1603void radv_CmdSetDepthBias( 1604 VkCommandBuffer commandBuffer, 1605 float depthBiasConstantFactor, 1606 float depthBiasClamp, 1607 float depthBiasSlopeFactor) 1608{ 1609 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1610 1611 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; 1612 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; 1613 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; 1614 1615 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 1616} 1617 1618void radv_CmdSetBlendConstants( 1619 VkCommandBuffer commandBuffer, 1620 const float blendConstants[4]) 1621{ 1622 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1623 1624 memcpy(cmd_buffer->state.dynamic.blend_constants, 1625 blendConstants, sizeof(float) * 4); 1626 1627 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; 1628} 1629 1630void radv_CmdSetDepthBounds( 1631 VkCommandBuffer commandBuffer, 1632 float minDepthBounds, 1633 float maxDepthBounds) 1634{ 1635 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1636 1637 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; 1638 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; 1639 1640 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; 1641} 1642 1643void radv_CmdSetStencilCompareMask( 1644 VkCommandBuffer commandBuffer, 1645 VkStencilFaceFlags faceMask, 1646 uint32_t compareMask) 1647{ 1648 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1649 1650 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1651 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; 1652 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1653 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; 1654 1655 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; 1656} 1657 1658void radv_CmdSetStencilWriteMask( 1659 VkCommandBuffer commandBuffer, 1660 VkStencilFaceFlags faceMask, 1661 uint32_t writeMask) 1662{ 1663 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1664 1665 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1666 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; 1667 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1668 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; 1669 1670 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; 1671} 1672 1673void radv_CmdSetStencilReference( 1674 VkCommandBuffer commandBuffer, 1675 VkStencilFaceFlags faceMask, 1676 uint32_t reference) 1677{ 1678 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1679 1680 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1681 cmd_buffer->state.dynamic.stencil_reference.front = reference; 1682 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1683 cmd_buffer->state.dynamic.stencil_reference.back = reference; 1684 1685 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE; 1686} 1687 1688 1689void radv_CmdExecuteCommands( 1690 VkCommandBuffer commandBuffer, 1691 uint32_t commandBufferCount, 1692 const VkCommandBuffer* pCmdBuffers) 1693{ 1694 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer); 1695 1696 for (uint32_t i = 0; i < commandBufferCount; i++) { 1697 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); 1698 1699 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); 1700 } 1701 1702 /* if we execute secondary we need to re-emit out pipelines */ 1703 if (commandBufferCount) { 1704 primary->state.emitted_pipeline = NULL; 1705 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1706 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL; 1707 } 1708} 1709 1710VkResult radv_CreateCommandPool( 1711 VkDevice _device, 1712 const VkCommandPoolCreateInfo* pCreateInfo, 1713 const VkAllocationCallbacks* pAllocator, 1714 VkCommandPool* pCmdPool) 1715{ 1716 RADV_FROM_HANDLE(radv_device, device, _device); 1717 struct radv_cmd_pool *pool; 1718 1719 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, 1720 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1721 if (pool == NULL) 1722 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1723 1724 if (pAllocator) 1725 pool->alloc = *pAllocator; 1726 else 1727 pool->alloc = device->alloc; 1728 1729 list_inithead(&pool->cmd_buffers); 1730 1731 *pCmdPool = radv_cmd_pool_to_handle(pool); 1732 1733 return VK_SUCCESS; 1734 1735} 1736 1737void radv_DestroyCommandPool( 1738 VkDevice _device, 1739 VkCommandPool commandPool, 1740 const VkAllocationCallbacks* pAllocator) 1741{ 1742 RADV_FROM_HANDLE(radv_device, device, _device); 1743 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1744 1745 if (!pool) 1746 return; 1747 1748 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, 1749 &pool->cmd_buffers, pool_link) { 1750 radv_cmd_buffer_destroy(cmd_buffer); 1751 } 1752 1753 vk_free2(&device->alloc, pAllocator, pool); 1754} 1755 1756VkResult radv_ResetCommandPool( 1757 VkDevice device, 1758 VkCommandPool commandPool, 1759 VkCommandPoolResetFlags flags) 1760{ 1761 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1762 1763 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer, 1764 &pool->cmd_buffers, pool_link) { 1765 radv_reset_cmd_buffer(cmd_buffer); 1766 } 1767 1768 return VK_SUCCESS; 1769} 1770 1771void radv_CmdBeginRenderPass( 1772 VkCommandBuffer commandBuffer, 1773 const VkRenderPassBeginInfo* pRenderPassBegin, 1774 VkSubpassContents contents) 1775{ 1776 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1777 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); 1778 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); 1779 1780 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1781 2048); 1782 1783 cmd_buffer->state.framebuffer = framebuffer; 1784 cmd_buffer->state.pass = pass; 1785 cmd_buffer->state.render_area = pRenderPassBegin->renderArea; 1786 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin); 1787 1788 si_emit_cache_flush(cmd_buffer); 1789 1790 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); 1791 assert(cmd_buffer->cs->cdw <= cdw_max); 1792 1793 radv_cmd_buffer_clear_subpass(cmd_buffer); 1794} 1795 1796void radv_CmdNextSubpass( 1797 VkCommandBuffer commandBuffer, 1798 VkSubpassContents contents) 1799{ 1800 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1801 1802 si_emit_cache_flush(cmd_buffer); 1803 radv_cmd_buffer_resolve_subpass(cmd_buffer); 1804 1805 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1806 2048); 1807 1808 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); 1809 radv_cmd_buffer_clear_subpass(cmd_buffer); 1810} 1811 1812void radv_CmdDraw( 1813 VkCommandBuffer commandBuffer, 1814 uint32_t vertexCount, 1815 uint32_t instanceCount, 1816 uint32_t firstVertex, 1817 uint32_t firstInstance) 1818{ 1819 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1820 radv_cmd_buffer_flush_state(cmd_buffer); 1821 1822 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9); 1823 1824 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_VS_BASE_VERTEX * 4, 2); 1825 radeon_emit(cmd_buffer->cs, firstVertex); 1826 radeon_emit(cmd_buffer->cs, firstInstance); 1827 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1828 radeon_emit(cmd_buffer->cs, instanceCount); 1829 1830 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0)); 1831 radeon_emit(cmd_buffer->cs, vertexCount); 1832 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 1833 S_0287F0_USE_OPAQUE(0)); 1834 1835 assert(cmd_buffer->cs->cdw <= cdw_max); 1836} 1837 1838static void radv_emit_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) 1839{ 1840 uint32_t primitive_reset_index = cmd_buffer->state.last_primitive_reset_index ? 0xffffffffu : 0xffffu; 1841 1842 if (cmd_buffer->state.pipeline->graphics.prim_restart_enable && 1843 primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) { 1844 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index; 1845 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 1846 primitive_reset_index); 1847 } 1848} 1849 1850void radv_CmdDrawIndexed( 1851 VkCommandBuffer commandBuffer, 1852 uint32_t indexCount, 1853 uint32_t instanceCount, 1854 uint32_t firstIndex, 1855 int32_t vertexOffset, 1856 uint32_t firstInstance) 1857{ 1858 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1859 int index_size = cmd_buffer->state.index_type ? 4 : 2; 1860 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 1861 uint64_t index_va; 1862 1863 radv_cmd_buffer_flush_state(cmd_buffer); 1864 radv_emit_primitive_reset_index(cmd_buffer); 1865 1866 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14); 1867 1868 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 1869 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 1870 1871 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_VS_BASE_VERTEX * 4, 2); 1872 radeon_emit(cmd_buffer->cs, vertexOffset); 1873 radeon_emit(cmd_buffer->cs, firstInstance); 1874 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1875 radeon_emit(cmd_buffer->cs, instanceCount); 1876 1877 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 1878 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 1879 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false)); 1880 radeon_emit(cmd_buffer->cs, index_max_size); 1881 radeon_emit(cmd_buffer->cs, index_va); 1882 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF); 1883 radeon_emit(cmd_buffer->cs, indexCount); 1884 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA); 1885 1886 assert(cmd_buffer->cs->cdw <= cdw_max); 1887} 1888 1889static void 1890radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer, 1891 VkBuffer _buffer, 1892 VkDeviceSize offset, 1893 VkBuffer _count_buffer, 1894 VkDeviceSize count_offset, 1895 uint32_t draw_count, 1896 uint32_t stride, 1897 bool indexed) 1898{ 1899 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 1900 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer); 1901 struct radeon_winsys_cs *cs = cmd_buffer->cs; 1902 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA 1903 : V_0287F0_DI_SRC_SEL_AUTO_INDEX; 1904 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 1905 indirect_va += offset + buffer->offset; 1906 uint64_t count_va = 0; 1907 1908 if (count_buffer) { 1909 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo); 1910 count_va += count_offset + count_buffer->offset; 1911 } 1912 1913 if (!draw_count) 1914 return; 1915 1916 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8); 1917 1918 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); 1919 radeon_emit(cs, 1); 1920 radeon_emit(cs, indirect_va); 1921 radeon_emit(cs, indirect_va >> 32); 1922 1923 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : 1924 PKT3_DRAW_INDIRECT_MULTI, 1925 8, false)); 1926 radeon_emit(cs, 0); 1927 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_VS_BASE_VERTEX * 4) - SI_SH_REG_OFFSET) >> 2); 1928 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + AC_USERDATA_VS_START_INSTANCE * 4) - SI_SH_REG_OFFSET) >> 2); 1929 radeon_emit(cs, S_2C3_COUNT_INDIRECT_ENABLE(!!count_va)); /* draw_index and count_indirect enable */ 1930 radeon_emit(cs, draw_count); /* count */ 1931 radeon_emit(cs, count_va); /* count_addr */ 1932 radeon_emit(cs, count_va >> 32); 1933 radeon_emit(cs, stride); /* stride */ 1934 radeon_emit(cs, di_src_sel); 1935} 1936 1937static void 1938radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer, 1939 VkBuffer buffer, 1940 VkDeviceSize offset, 1941 VkBuffer countBuffer, 1942 VkDeviceSize countBufferOffset, 1943 uint32_t maxDrawCount, 1944 uint32_t stride) 1945{ 1946 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1947 radv_cmd_buffer_flush_state(cmd_buffer); 1948 1949 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14); 1950 1951 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 1952 countBuffer, countBufferOffset, maxDrawCount, stride, false); 1953 1954 assert(cmd_buffer->cs->cdw <= cdw_max); 1955} 1956 1957static void 1958radv_cmd_draw_indexed_indirect_count( 1959 VkCommandBuffer commandBuffer, 1960 VkBuffer buffer, 1961 VkDeviceSize offset, 1962 VkBuffer countBuffer, 1963 VkDeviceSize countBufferOffset, 1964 uint32_t maxDrawCount, 1965 uint32_t stride) 1966{ 1967 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1968 int index_size = cmd_buffer->state.index_type ? 4 : 2; 1969 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 1970 uint64_t index_va; 1971 radv_cmd_buffer_flush_state(cmd_buffer); 1972 radv_emit_primitive_reset_index(cmd_buffer); 1973 1974 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 1975 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 1976 1977 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21); 1978 1979 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 1980 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 1981 1982 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0)); 1983 radeon_emit(cmd_buffer->cs, index_va); 1984 radeon_emit(cmd_buffer->cs, index_va >> 32); 1985 1986 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0)); 1987 radeon_emit(cmd_buffer->cs, index_max_size); 1988 1989 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 1990 countBuffer, countBufferOffset, maxDrawCount, stride, true); 1991 1992 assert(cmd_buffer->cs->cdw <= cdw_max); 1993} 1994 1995void radv_CmdDrawIndirect( 1996 VkCommandBuffer commandBuffer, 1997 VkBuffer buffer, 1998 VkDeviceSize offset, 1999 uint32_t drawCount, 2000 uint32_t stride) 2001{ 2002 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2003 VK_NULL_HANDLE, 0, drawCount, stride); 2004} 2005 2006void radv_CmdDrawIndexedIndirect( 2007 VkCommandBuffer commandBuffer, 2008 VkBuffer buffer, 2009 VkDeviceSize offset, 2010 uint32_t drawCount, 2011 uint32_t stride) 2012{ 2013 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2014 VK_NULL_HANDLE, 0, drawCount, stride); 2015} 2016 2017void radv_CmdDrawIndirectCountAMD( 2018 VkCommandBuffer commandBuffer, 2019 VkBuffer buffer, 2020 VkDeviceSize offset, 2021 VkBuffer countBuffer, 2022 VkDeviceSize countBufferOffset, 2023 uint32_t maxDrawCount, 2024 uint32_t stride) 2025{ 2026 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2027 countBuffer, countBufferOffset, 2028 maxDrawCount, stride); 2029} 2030 2031void radv_CmdDrawIndexedIndirectCountAMD( 2032 VkCommandBuffer commandBuffer, 2033 VkBuffer buffer, 2034 VkDeviceSize offset, 2035 VkBuffer countBuffer, 2036 VkDeviceSize countBufferOffset, 2037 uint32_t maxDrawCount, 2038 uint32_t stride) 2039{ 2040 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2041 countBuffer, countBufferOffset, 2042 maxDrawCount, stride); 2043} 2044 2045static void 2046radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer) 2047{ 2048 radv_emit_compute_pipeline(cmd_buffer); 2049 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT); 2050 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline, 2051 VK_SHADER_STAGE_COMPUTE_BIT); 2052 si_emit_cache_flush(cmd_buffer); 2053} 2054 2055void radv_CmdDispatch( 2056 VkCommandBuffer commandBuffer, 2057 uint32_t x, 2058 uint32_t y, 2059 uint32_t z) 2060{ 2061 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2062 2063 radv_flush_compute_state(cmd_buffer); 2064 2065 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10); 2066 2067 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + AC_USERDATA_CS_GRID_SIZE * 4, 3); 2068 radeon_emit(cmd_buffer->cs, x); 2069 radeon_emit(cmd_buffer->cs, y); 2070 radeon_emit(cmd_buffer->cs, z); 2071 2072 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2073 PKT3_SHADER_TYPE_S(1)); 2074 radeon_emit(cmd_buffer->cs, x); 2075 radeon_emit(cmd_buffer->cs, y); 2076 radeon_emit(cmd_buffer->cs, z); 2077 radeon_emit(cmd_buffer->cs, 1); 2078 2079 assert(cmd_buffer->cs->cdw <= cdw_max); 2080} 2081 2082void radv_CmdDispatchIndirect( 2083 VkCommandBuffer commandBuffer, 2084 VkBuffer _buffer, 2085 VkDeviceSize offset) 2086{ 2087 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2088 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2089 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2090 va += buffer->offset + offset; 2091 2092 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 2093 2094 radv_flush_compute_state(cmd_buffer); 2095 2096 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25); 2097 2098 for (unsigned i = 0; i < 3; ++i) { 2099 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 2100 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 2101 COPY_DATA_DST_SEL(COPY_DATA_REG)); 2102 radeon_emit(cmd_buffer->cs, (va + 4 * i)); 2103 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32); 2104 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + AC_USERDATA_CS_GRID_SIZE * 4) >> 2) + i); 2105 radeon_emit(cmd_buffer->cs, 0); 2106 } 2107 2108 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) | 2109 PKT3_SHADER_TYPE_S(1)); 2110 radeon_emit(cmd_buffer->cs, 1); 2111 radeon_emit(cmd_buffer->cs, va); 2112 radeon_emit(cmd_buffer->cs, va >> 32); 2113 2114 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) | 2115 PKT3_SHADER_TYPE_S(1)); 2116 radeon_emit(cmd_buffer->cs, 0); 2117 radeon_emit(cmd_buffer->cs, 1); 2118 2119 assert(cmd_buffer->cs->cdw <= cdw_max); 2120} 2121 2122void radv_unaligned_dispatch( 2123 struct radv_cmd_buffer *cmd_buffer, 2124 uint32_t x, 2125 uint32_t y, 2126 uint32_t z) 2127{ 2128 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 2129 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 2130 uint32_t blocks[3], remainder[3]; 2131 2132 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]); 2133 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]); 2134 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]); 2135 2136 /* If aligned, these should be an entire block size, not 0 */ 2137 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]); 2138 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]); 2139 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]); 2140 2141 radv_flush_compute_state(cmd_buffer); 2142 2143 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15); 2144 2145 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 2146 radeon_emit(cmd_buffer->cs, 2147 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) | 2148 S_00B81C_NUM_THREAD_PARTIAL(remainder[0])); 2149 radeon_emit(cmd_buffer->cs, 2150 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) | 2151 S_00B81C_NUM_THREAD_PARTIAL(remainder[1])); 2152 radeon_emit(cmd_buffer->cs, 2153 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) | 2154 S_00B81C_NUM_THREAD_PARTIAL(remainder[2])); 2155 2156 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + AC_USERDATA_CS_GRID_SIZE * 4, 3); 2157 radeon_emit(cmd_buffer->cs, blocks[0]); 2158 radeon_emit(cmd_buffer->cs, blocks[1]); 2159 radeon_emit(cmd_buffer->cs, blocks[2]); 2160 2161 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2162 PKT3_SHADER_TYPE_S(1)); 2163 radeon_emit(cmd_buffer->cs, blocks[0]); 2164 radeon_emit(cmd_buffer->cs, blocks[1]); 2165 radeon_emit(cmd_buffer->cs, blocks[2]); 2166 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) | 2167 S_00B800_PARTIAL_TG_EN(1)); 2168 2169 assert(cmd_buffer->cs->cdw <= cdw_max); 2170} 2171 2172void radv_CmdEndRenderPass( 2173 VkCommandBuffer commandBuffer) 2174{ 2175 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2176 2177 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); 2178 2179 si_emit_cache_flush(cmd_buffer); 2180 radv_cmd_buffer_resolve_subpass(cmd_buffer); 2181 2182 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { 2183 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; 2184 radv_handle_subpass_image_transition(cmd_buffer, 2185 (VkAttachmentReference){i, layout}); 2186 } 2187 2188 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); 2189 2190 cmd_buffer->state.pass = NULL; 2191 cmd_buffer->state.subpass = NULL; 2192 cmd_buffer->state.attachments = NULL; 2193 cmd_buffer->state.framebuffer = NULL; 2194} 2195 2196 2197static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, 2198 struct radv_image *image) 2199{ 2200 2201 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2202 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2203 2204 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->htile.offset, 2205 image->htile.size, 0xffffffff); 2206 2207 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META | 2208 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2209 RADV_CMD_FLAG_INV_VMEM_L1 | 2210 RADV_CMD_FLAG_INV_GLOBAL_L2; 2211} 2212 2213static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, 2214 struct radv_image *image, 2215 VkImageLayout src_layout, 2216 VkImageLayout dst_layout, 2217 VkImageSubresourceRange range, 2218 VkImageAspectFlags pending_clears) 2219{ 2220 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL && 2221 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) && 2222 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 && 2223 cmd_buffer->state.render_area.extent.width == image->extent.width && 2224 cmd_buffer->state.render_area.extent.height == image->extent.height) { 2225 /* The clear will initialize htile. */ 2226 return; 2227 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && 2228 radv_layout_has_htile(image, dst_layout)) { 2229 /* TODO: merge with the clear if applicable */ 2230 radv_initialize_htile(cmd_buffer, image); 2231 } else if (!radv_layout_has_htile(image, src_layout) && 2232 radv_layout_has_htile(image, dst_layout)) { 2233 radv_initialize_htile(cmd_buffer, image); 2234 } else if ((radv_layout_has_htile(image, src_layout) && 2235 !radv_layout_has_htile(image, dst_layout)) || 2236 (radv_layout_is_htile_compressed(image, src_layout) && 2237 !radv_layout_is_htile_compressed(image, dst_layout))) { 2238 2239 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; 2240 range.baseMipLevel = 0; 2241 range.levelCount = 1; 2242 2243 radv_decompress_depth_image_inplace(cmd_buffer, image, &range); 2244 } 2245} 2246 2247void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, 2248 struct radv_image *image, uint32_t value) 2249{ 2250 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2251 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2252 2253 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset, 2254 image->cmask.size, value); 2255 2256 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2257 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2258 RADV_CMD_FLAG_INV_VMEM_L1 | 2259 RADV_CMD_FLAG_INV_GLOBAL_L2; 2260} 2261 2262static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer, 2263 struct radv_image *image, 2264 VkImageLayout src_layout, 2265 VkImageLayout dst_layout, 2266 VkImageSubresourceRange range, 2267 VkImageAspectFlags pending_clears) 2268{ 2269 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2270 if (image->fmask.size) 2271 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu); 2272 else 2273 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu); 2274 } else if (radv_layout_has_cmask(image, src_layout) && 2275 !radv_layout_has_cmask(image, dst_layout)) { 2276 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2277 } 2278} 2279 2280void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, 2281 struct radv_image *image, uint32_t value) 2282{ 2283 2284 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2285 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2286 2287 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset, 2288 image->surface.dcc_size, value); 2289 2290 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2291 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2292 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2293 RADV_CMD_FLAG_INV_VMEM_L1 | 2294 RADV_CMD_FLAG_INV_GLOBAL_L2; 2295} 2296 2297static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer, 2298 struct radv_image *image, 2299 VkImageLayout src_layout, 2300 VkImageLayout dst_layout, 2301 VkImageSubresourceRange range, 2302 VkImageAspectFlags pending_clears) 2303{ 2304 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2305 radv_initialize_dcc(cmd_buffer, image, 0x20202020u); 2306 } else if(src_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && 2307 dst_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 2308 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2309 } 2310} 2311 2312static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 2313 struct radv_image *image, 2314 VkImageLayout src_layout, 2315 VkImageLayout dst_layout, 2316 VkImageSubresourceRange range, 2317 VkImageAspectFlags pending_clears) 2318{ 2319 if (image->htile.size) 2320 radv_handle_depth_image_transition(cmd_buffer, image, src_layout, 2321 dst_layout, range, pending_clears); 2322 2323 if (image->cmask.size) 2324 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout, 2325 dst_layout, range, pending_clears); 2326 2327 if (image->surface.dcc_size) 2328 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout, 2329 dst_layout, range, pending_clears); 2330} 2331 2332void radv_CmdPipelineBarrier( 2333 VkCommandBuffer commandBuffer, 2334 VkPipelineStageFlags srcStageMask, 2335 VkPipelineStageFlags destStageMask, 2336 VkBool32 byRegion, 2337 uint32_t memoryBarrierCount, 2338 const VkMemoryBarrier* pMemoryBarriers, 2339 uint32_t bufferMemoryBarrierCount, 2340 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2341 uint32_t imageMemoryBarrierCount, 2342 const VkImageMemoryBarrier* pImageMemoryBarriers) 2343{ 2344 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2345 VkAccessFlags src_flags = 0; 2346 VkAccessFlags dst_flags = 0; 2347 uint32_t b; 2348 for (uint32_t i = 0; i < memoryBarrierCount; i++) { 2349 src_flags |= pMemoryBarriers[i].srcAccessMask; 2350 dst_flags |= pMemoryBarriers[i].dstAccessMask; 2351 } 2352 2353 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) { 2354 src_flags |= pBufferMemoryBarriers[i].srcAccessMask; 2355 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask; 2356 } 2357 2358 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2359 src_flags |= pImageMemoryBarriers[i].srcAccessMask; 2360 dst_flags |= pImageMemoryBarriers[i].dstAccessMask; 2361 } 2362 2363 enum radv_cmd_flush_bits flush_bits = 0; 2364 for_each_bit(b, src_flags) { 2365 switch ((VkAccessFlagBits)(1 << b)) { 2366 case VK_ACCESS_SHADER_WRITE_BIT: 2367 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2368 break; 2369 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: 2370 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2371 break; 2372 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: 2373 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB; 2374 break; 2375 case VK_ACCESS_TRANSFER_WRITE_BIT: 2376 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2377 break; 2378 default: 2379 break; 2380 } 2381 } 2382 cmd_buffer->state.flush_bits |= flush_bits; 2383 2384 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2385 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2386 radv_handle_image_transition(cmd_buffer, image, 2387 pImageMemoryBarriers[i].oldLayout, 2388 pImageMemoryBarriers[i].newLayout, 2389 pImageMemoryBarriers[i].subresourceRange, 2390 0); 2391 } 2392 2393 flush_bits = 0; 2394 2395 for_each_bit(b, dst_flags) { 2396 switch ((VkAccessFlagBits)(1 << b)) { 2397 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: 2398 case VK_ACCESS_INDEX_READ_BIT: 2399 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: 2400 case VK_ACCESS_UNIFORM_READ_BIT: 2401 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; 2402 break; 2403 case VK_ACCESS_SHADER_READ_BIT: 2404 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2405 break; 2406 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: 2407 case VK_ACCESS_TRANSFER_READ_BIT: 2408 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: 2409 flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | RADV_CMD_FLAG_INV_GLOBAL_L2; 2410 default: 2411 break; 2412 } 2413 } 2414 2415 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2416 RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 2417 2418 cmd_buffer->state.flush_bits |= flush_bits; 2419} 2420 2421 2422static void write_event(struct radv_cmd_buffer *cmd_buffer, 2423 struct radv_event *event, 2424 VkPipelineStageFlags stageMask, 2425 unsigned value) 2426{ 2427 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2428 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2429 2430 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2431 2432 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12); 2433 2434 /* TODO: this is overkill. Probably should figure something out from 2435 * the stage mask. */ 2436 2437 if (cmd_buffer->device->instance->physicalDevice.rad_info.chip_class == CIK) { 2438 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2439 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2440 EVENT_INDEX(5)); 2441 radeon_emit(cs, va); 2442 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2443 radeon_emit(cs, 2); 2444 radeon_emit(cs, 0); 2445 } 2446 2447 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2448 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2449 EVENT_INDEX(5)); 2450 radeon_emit(cs, va); 2451 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2452 radeon_emit(cs, value); 2453 radeon_emit(cs, 0); 2454 2455 assert(cmd_buffer->cs->cdw <= cdw_max); 2456} 2457 2458void radv_CmdSetEvent(VkCommandBuffer commandBuffer, 2459 VkEvent _event, 2460 VkPipelineStageFlags stageMask) 2461{ 2462 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2463 RADV_FROM_HANDLE(radv_event, event, _event); 2464 2465 write_event(cmd_buffer, event, stageMask, 1); 2466} 2467 2468void radv_CmdResetEvent(VkCommandBuffer commandBuffer, 2469 VkEvent _event, 2470 VkPipelineStageFlags stageMask) 2471{ 2472 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2473 RADV_FROM_HANDLE(radv_event, event, _event); 2474 2475 write_event(cmd_buffer, event, stageMask, 0); 2476} 2477 2478void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, 2479 uint32_t eventCount, 2480 const VkEvent* pEvents, 2481 VkPipelineStageFlags srcStageMask, 2482 VkPipelineStageFlags dstStageMask, 2483 uint32_t memoryBarrierCount, 2484 const VkMemoryBarrier* pMemoryBarriers, 2485 uint32_t bufferMemoryBarrierCount, 2486 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2487 uint32_t imageMemoryBarrierCount, 2488 const VkImageMemoryBarrier* pImageMemoryBarriers) 2489{ 2490 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2491 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2492 2493 for (unsigned i = 0; i < eventCount; ++i) { 2494 RADV_FROM_HANDLE(radv_event, event, pEvents[i]); 2495 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2496 2497 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2498 2499 unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); 2500 2501 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); 2502 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); 2503 radeon_emit(cs, va); 2504 radeon_emit(cs, va >> 32); 2505 radeon_emit(cs, 1); /* reference value */ 2506 radeon_emit(cs, 0xffffffff); /* mask */ 2507 radeon_emit(cs, 4); /* poll interval */ 2508 2509 assert(cmd_buffer->cs->cdw <= cdw_max); 2510 } 2511 2512 2513 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2514 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2515 2516 radv_handle_image_transition(cmd_buffer, image, 2517 pImageMemoryBarriers[i].oldLayout, 2518 pImageMemoryBarriers[i].newLayout, 2519 pImageMemoryBarriers[i].subresourceRange, 2520 0); 2521 } 2522 2523 /* TODO: figure out how to do memory barriers without waiting */ 2524 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 2525 RADV_CMD_FLAG_INV_GLOBAL_L2 | 2526 RADV_CMD_FLAG_INV_VMEM_L1 | 2527 RADV_CMD_FLAG_INV_SMEM_L1; 2528} 2529