1/* 2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24#include "si_pipe.h" 25#include "si_public.h" 26#include "si_shader_internal.h" 27#include "sid.h" 28 29#include "radeon/radeon_uvd.h" 30#include "util/u_memory.h" 31#include "util/u_suballoc.h" 32#include "vl/vl_decoder.h" 33#include "../ddebug/dd_util.h" 34 35#define SI_LLVM_DEFAULT_FEATURES \ 36 "+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals,-xnack" 37 38/* 39 * pipe_context 40 */ 41static void si_destroy_context(struct pipe_context *context) 42{ 43 struct si_context *sctx = (struct si_context *)context; 44 int i; 45 46 /* Unreference the framebuffer normally to disable related logic 47 * properly. 48 */ 49 struct pipe_framebuffer_state fb = {}; 50 context->set_framebuffer_state(context, &fb); 51 52 si_release_all_descriptors(sctx); 53 54 if (sctx->ce_suballocator) 55 u_suballocator_destroy(sctx->ce_suballocator); 56 57 pipe_resource_reference(&sctx->esgs_ring, NULL); 58 pipe_resource_reference(&sctx->gsvs_ring, NULL); 59 pipe_resource_reference(&sctx->tf_ring, NULL); 60 pipe_resource_reference(&sctx->tess_offchip_ring, NULL); 61 pipe_resource_reference(&sctx->null_const_buf.buffer, NULL); 62 r600_resource_reference(&sctx->border_color_buffer, NULL); 63 free(sctx->border_color_table); 64 r600_resource_reference(&sctx->scratch_buffer, NULL); 65 r600_resource_reference(&sctx->compute_scratch_buffer, NULL); 66 67 si_pm4_free_state(sctx, sctx->init_config, ~0); 68 if (sctx->init_config_gs_rings) 69 si_pm4_free_state(sctx, sctx->init_config_gs_rings, ~0); 70 for (i = 0; i < ARRAY_SIZE(sctx->vgt_shader_config); i++) 71 si_pm4_delete_state(sctx, vgt_shader_config, sctx->vgt_shader_config[i]); 72 73 if (sctx->fixed_func_tcs_shader.cso) 74 sctx->b.b.delete_tcs_state(&sctx->b.b, sctx->fixed_func_tcs_shader.cso); 75 if (sctx->custom_dsa_flush) 76 sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush); 77 if (sctx->custom_blend_resolve) 78 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve); 79 if (sctx->custom_blend_decompress) 80 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress); 81 if (sctx->custom_blend_fastclear) 82 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fastclear); 83 if (sctx->custom_blend_dcc_decompress) 84 sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_dcc_decompress); 85 86 if (sctx->blitter) 87 util_blitter_destroy(sctx->blitter); 88 89 r600_common_context_cleanup(&sctx->b); 90 91 LLVMDisposeTargetMachine(sctx->tm); 92 93 r600_resource_reference(&sctx->trace_buf, NULL); 94 r600_resource_reference(&sctx->last_trace_buf, NULL); 95 radeon_clear_saved_cs(&sctx->last_gfx); 96 97 FREE(sctx); 98} 99 100static enum pipe_reset_status 101si_amdgpu_get_reset_status(struct pipe_context *ctx) 102{ 103 struct si_context *sctx = (struct si_context *)ctx; 104 105 return sctx->b.ws->ctx_query_reset_status(sctx->b.ctx); 106} 107 108/* Apitrace profiling: 109 * 1) qapitrace : Tools -> Profile: Measure CPU & GPU times 110 * 2) In the middle panel, zoom in (mouse wheel) on some bad draw call 111 * and remember its number. 112 * 3) In Mesa, enable queries and performance counters around that draw 113 * call and print the results. 114 * 4) glretrace --benchmark --markers .. 115 */ 116static void si_emit_string_marker(struct pipe_context *ctx, 117 const char *string, int len) 118{ 119 struct si_context *sctx = (struct si_context *)ctx; 120 121 dd_parse_apitrace_marker(string, len, &sctx->apitrace_call_number); 122} 123 124static LLVMTargetMachineRef 125si_create_llvm_target_machine(struct si_screen *sscreen) 126{ 127 const char *triple = "amdgcn--"; 128 129 return LLVMCreateTargetMachine(si_llvm_get_amdgpu_target(triple), triple, 130 r600_get_llvm_processor_name(sscreen->b.family), 131#if HAVE_LLVM >= 0x0308 132 sscreen->b.debug_flags & DBG_SI_SCHED ? 133 SI_LLVM_DEFAULT_FEATURES ",+si-scheduler" : 134#endif 135 SI_LLVM_DEFAULT_FEATURES, 136 LLVMCodeGenLevelDefault, 137 LLVMRelocDefault, 138 LLVMCodeModelDefault); 139} 140 141static struct pipe_context *si_create_context(struct pipe_screen *screen, 142 void *priv, unsigned flags) 143{ 144 struct si_context *sctx = CALLOC_STRUCT(si_context); 145 struct si_screen* sscreen = (struct si_screen *)screen; 146 struct radeon_winsys *ws = sscreen->b.ws; 147 int shader, i; 148 149 if (!sctx) 150 return NULL; 151 152 if (sscreen->b.debug_flags & DBG_CHECK_VM) 153 flags |= PIPE_CONTEXT_DEBUG; 154 155 if (flags & PIPE_CONTEXT_DEBUG) 156 sscreen->record_llvm_ir = true; /* racy but not critical */ 157 158 sctx->b.b.screen = screen; /* this must be set first */ 159 sctx->b.b.priv = priv; 160 sctx->b.b.destroy = si_destroy_context; 161 sctx->b.b.emit_string_marker = si_emit_string_marker; 162 sctx->b.set_atom_dirty = (void *)si_set_atom_dirty; 163 sctx->screen = sscreen; /* Easy accessing of screen/winsys. */ 164 sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0; 165 166 if (!r600_common_context_init(&sctx->b, &sscreen->b, flags)) 167 goto fail; 168 169 if (sscreen->b.info.drm_major == 3) 170 sctx->b.b.get_device_reset_status = si_amdgpu_get_reset_status; 171 172 si_init_blit_functions(sctx); 173 si_init_compute_functions(sctx); 174 si_init_cp_dma_functions(sctx); 175 si_init_debug_functions(sctx); 176 177 if (sscreen->b.info.has_uvd) { 178 sctx->b.b.create_video_codec = si_uvd_create_decoder; 179 sctx->b.b.create_video_buffer = si_video_buffer_create; 180 } else { 181 sctx->b.b.create_video_codec = vl_create_decoder; 182 sctx->b.b.create_video_buffer = vl_video_buffer_create; 183 } 184 185 sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX, 186 si_context_gfx_flush, sctx); 187 188 /* SI + AMDGPU + CE = GPU hang */ 189 if (!(sscreen->b.debug_flags & DBG_NO_CE) && ws->cs_add_const_ib && 190 sscreen->b.chip_class != SI && 191 /* These can't use CE due to a power gating bug in the kernel. */ 192 sscreen->b.family != CHIP_CARRIZO && 193 sscreen->b.family != CHIP_STONEY) { 194 sctx->ce_ib = ws->cs_add_const_ib(sctx->b.gfx.cs); 195 if (!sctx->ce_ib) 196 goto fail; 197 198 if (ws->cs_add_const_preamble_ib) { 199 sctx->ce_preamble_ib = 200 ws->cs_add_const_preamble_ib(sctx->b.gfx.cs); 201 202 if (!sctx->ce_preamble_ib) 203 goto fail; 204 } 205 206 sctx->ce_suballocator = 207 u_suballocator_create(&sctx->b.b, 1024 * 1024, 208 0, PIPE_USAGE_DEFAULT, false); 209 if (!sctx->ce_suballocator) 210 goto fail; 211 } 212 213 sctx->b.gfx.flush = si_context_gfx_flush; 214 215 /* Border colors. */ 216 sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS * 217 sizeof(*sctx->border_color_table)); 218 if (!sctx->border_color_table) 219 goto fail; 220 221 sctx->border_color_buffer = (struct r600_resource*) 222 pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 223 SI_MAX_BORDER_COLORS * 224 sizeof(*sctx->border_color_table)); 225 if (!sctx->border_color_buffer) 226 goto fail; 227 228 sctx->border_color_map = 229 ws->buffer_map(sctx->border_color_buffer->buf, 230 NULL, PIPE_TRANSFER_WRITE); 231 if (!sctx->border_color_map) 232 goto fail; 233 234 si_init_all_descriptors(sctx); 235 si_init_state_functions(sctx); 236 si_init_shader_functions(sctx); 237 238 if (sctx->b.chip_class >= CIK) 239 cik_init_sdma_functions(sctx); 240 else 241 si_init_dma_functions(sctx); 242 243 if (sscreen->b.debug_flags & DBG_FORCE_DMA) 244 sctx->b.b.resource_copy_region = sctx->b.dma_copy; 245 246 sctx->blitter = util_blitter_create(&sctx->b.b); 247 if (sctx->blitter == NULL) 248 goto fail; 249 sctx->blitter->draw_rectangle = r600_draw_rectangle; 250 251 sctx->sample_mask.sample_mask = 0xffff; 252 253 /* these must be last */ 254 si_begin_new_cs(sctx); 255 r600_query_init_backend_mask(&sctx->b); /* this emits commands and must be last */ 256 257 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads 258 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */ 259 if (sctx->b.chip_class == CIK) { 260 sctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER, 261 PIPE_USAGE_DEFAULT, 16); 262 if (!sctx->null_const_buf.buffer) 263 goto fail; 264 sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0; 265 266 for (shader = 0; shader < SI_NUM_SHADERS; shader++) { 267 for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) { 268 sctx->b.b.set_constant_buffer(&sctx->b.b, shader, i, 269 &sctx->null_const_buf); 270 } 271 } 272 273 si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS, 274 &sctx->null_const_buf); 275 si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES, 276 &sctx->null_const_buf); 277 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, 278 &sctx->null_const_buf); 279 si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS, 280 &sctx->null_const_buf); 281 282 /* Clear the NULL constant buffer, because loads should return zeros. */ 283 sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0, 284 sctx->null_const_buf.buffer->width0, 0, 285 R600_COHERENCY_SHADER); 286 } 287 288 uint64_t max_threads_per_block; 289 screen->get_compute_param(screen, PIPE_SHADER_IR_TGSI, 290 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK, 291 &max_threads_per_block); 292 293 /* The maximum number of scratch waves. Scratch space isn't divided 294 * evenly between CUs. The number is only a function of the number of CUs. 295 * We can decrease the constant to decrease the scratch buffer size. 296 * 297 * sctx->scratch_waves must be >= the maximum posible size of 298 * 1 threadgroup, so that the hw doesn't hang from being unable 299 * to start any. 300 * 301 * The recommended value is 4 per CU at most. Higher numbers don't 302 * bring much benefit, but they still occupy chip resources (think 303 * async compute). I've seen ~2% performance difference between 4 and 32. 304 */ 305 sctx->scratch_waves = MAX2(32 * sscreen->b.info.num_good_compute_units, 306 max_threads_per_block / 64); 307 308 sctx->tm = si_create_llvm_target_machine(sscreen); 309 310 return &sctx->b.b; 311fail: 312 fprintf(stderr, "radeonsi: Failed to create a context.\n"); 313 si_destroy_context(&sctx->b.b); 314 return NULL; 315} 316 317/* 318 * pipe_screen 319 */ 320static bool si_have_tgsi_compute(struct si_screen *sscreen) 321{ 322 /* Old kernels disallowed some register writes for SI 323 * that are used for indirect dispatches. */ 324 return HAVE_LLVM >= 0x309 && 325 (sscreen->b.chip_class >= CIK || 326 sscreen->b.info.drm_major == 3 || 327 (sscreen->b.info.drm_major == 2 && 328 sscreen->b.info.drm_minor >= 45)); 329} 330 331static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param) 332{ 333 struct si_screen *sscreen = (struct si_screen *)pscreen; 334 335 switch (param) { 336 /* Supported features (boolean caps). */ 337 case PIPE_CAP_TWO_SIDED_STENCIL: 338 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS: 339 case PIPE_CAP_ANISOTROPIC_FILTER: 340 case PIPE_CAP_POINT_SPRITE: 341 case PIPE_CAP_OCCLUSION_QUERY: 342 case PIPE_CAP_TEXTURE_SHADOW_MAP: 343 case PIPE_CAP_TEXTURE_MIRROR_CLAMP: 344 case PIPE_CAP_BLEND_EQUATION_SEPARATE: 345 case PIPE_CAP_TEXTURE_SWIZZLE: 346 case PIPE_CAP_DEPTH_CLIP_DISABLE: 347 case PIPE_CAP_SHADER_STENCIL_EXPORT: 348 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR: 349 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS: 350 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT: 351 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER: 352 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: 353 case PIPE_CAP_SM3: 354 case PIPE_CAP_SEAMLESS_CUBE_MAP: 355 case PIPE_CAP_PRIMITIVE_RESTART: 356 case PIPE_CAP_CONDITIONAL_RENDER: 357 case PIPE_CAP_TEXTURE_BARRIER: 358 case PIPE_CAP_INDEP_BLEND_ENABLE: 359 case PIPE_CAP_INDEP_BLEND_FUNC: 360 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE: 361 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED: 362 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY: 363 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY: 364 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY: 365 case PIPE_CAP_USER_INDEX_BUFFERS: 366 case PIPE_CAP_USER_CONSTANT_BUFFERS: 367 case PIPE_CAP_START_INSTANCE: 368 case PIPE_CAP_NPOT_TEXTURES: 369 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES: 370 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS: 371 case PIPE_CAP_VERTEX_COLOR_CLAMPED: 372 case PIPE_CAP_FRAGMENT_COLOR_CLAMPED: 373 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER: 374 case PIPE_CAP_TGSI_INSTANCEID: 375 case PIPE_CAP_COMPUTE: 376 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS: 377 case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT: 378 case PIPE_CAP_QUERY_PIPELINE_STATISTICS: 379 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT: 380 case PIPE_CAP_CUBE_MAP_ARRAY: 381 case PIPE_CAP_SAMPLE_SHADING: 382 case PIPE_CAP_DRAW_INDIRECT: 383 case PIPE_CAP_CLIP_HALFZ: 384 case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION: 385 case PIPE_CAP_POLYGON_OFFSET_CLAMP: 386 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE: 387 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION: 388 case PIPE_CAP_TGSI_TEXCOORD: 389 case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE: 390 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED: 391 case PIPE_CAP_TEXTURE_FLOAT_LINEAR: 392 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR: 393 case PIPE_CAP_SHAREABLE_SHADERS: 394 case PIPE_CAP_DEPTH_BOUNDS_TEST: 395 case PIPE_CAP_SAMPLER_VIEW_TARGET: 396 case PIPE_CAP_TEXTURE_QUERY_LOD: 397 case PIPE_CAP_TEXTURE_GATHER_SM5: 398 case PIPE_CAP_TGSI_TXQS: 399 case PIPE_CAP_FORCE_PERSAMPLE_INTERP: 400 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS: 401 case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL: 402 case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL: 403 case PIPE_CAP_INVALIDATE_BUFFER: 404 case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS: 405 case PIPE_CAP_QUERY_MEMORY_INFO: 406 case PIPE_CAP_TGSI_PACK_HALF_FLOAT: 407 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT: 408 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR: 409 case PIPE_CAP_GENERATE_MIPMAP: 410 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED: 411 case PIPE_CAP_STRING_MARKER: 412 case PIPE_CAP_CLEAR_TEXTURE: 413 case PIPE_CAP_CULL_DISTANCE: 414 case PIPE_CAP_TGSI_ARRAY_COMPONENTS: 415 case PIPE_CAP_TGSI_CAN_READ_OUTPUTS: 416 case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY: 417 return 1; 418 419 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY: 420 return !SI_BIG_ENDIAN && sscreen->b.info.has_userptr; 421 422 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY: 423 return (sscreen->b.info.drm_major == 2 && 424 sscreen->b.info.drm_minor >= 43) || 425 sscreen->b.info.drm_major == 3; 426 427 case PIPE_CAP_TEXTURE_MULTISAMPLE: 428 /* 2D tiling on CIK is supported since DRM 2.35.0 */ 429 return sscreen->b.chip_class < CIK || 430 (sscreen->b.info.drm_major == 2 && 431 sscreen->b.info.drm_minor >= 35) || 432 sscreen->b.info.drm_major == 3; 433 434 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: 435 return R600_MAP_BUFFER_ALIGNMENT; 436 437 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT: 438 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT: 439 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS: 440 return 4; 441 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT: 442 return HAVE_LLVM >= 0x0309 ? 4 : 0; 443 444 case PIPE_CAP_GLSL_FEATURE_LEVEL: 445 if (si_have_tgsi_compute(sscreen)) 446 return 450; 447 return HAVE_LLVM >= 0x0309 ? 420 : 448 HAVE_LLVM >= 0x0307 ? 410 : 330; 449 450 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: 451 return MIN2(sscreen->b.info.max_alloc_size, INT_MAX); 452 453 case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY: 454 return 0; 455 456 /* Unsupported features. */ 457 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT: 458 case PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS: 459 case PIPE_CAP_USER_VERTEX_BUFFERS: 460 case PIPE_CAP_FAKE_SW_MSAA: 461 case PIPE_CAP_TEXTURE_GATHER_OFFSETS: 462 case PIPE_CAP_VERTEXID_NOBASE: 463 case PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES: 464 case PIPE_CAP_TGSI_VOTE: 465 case PIPE_CAP_MAX_WINDOW_RECTANGLES: 466 case PIPE_CAP_NATIVE_FENCE_FD: 467 case PIPE_CAP_TGSI_FS_FBFETCH: 468 return 0; 469 470 case PIPE_CAP_QUERY_BUFFER_OBJECT: 471 return si_have_tgsi_compute(sscreen); 472 473 case PIPE_CAP_DRAW_PARAMETERS: 474 case PIPE_CAP_MULTI_DRAW_INDIRECT: 475 case PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS: 476 return sscreen->has_draw_indirect_multi; 477 478 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS: 479 return 30; 480 481 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK: 482 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600; 483 484 /* Stream output. */ 485 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS: 486 return sscreen->b.has_streamout ? 4 : 0; 487 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME: 488 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS: 489 return sscreen->b.has_streamout ? 1 : 0; 490 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS: 491 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS: 492 return sscreen->b.has_streamout ? 32*4 : 0; 493 494 /* Geometry shader output. */ 495 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES: 496 return 1024; 497 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS: 498 return 4095; 499 case PIPE_CAP_MAX_VERTEX_STREAMS: 500 return 4; 501 502 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE: 503 return 2048; 504 505 /* Texturing. */ 506 case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: 507 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: 508 return 15; /* 16384 */ 509 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: 510 /* textures support 8192, but layered rendering supports 2048 */ 511 return 12; 512 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: 513 /* textures support 8192, but layered rendering supports 2048 */ 514 return 2048; 515 516 /* Render targets. */ 517 case PIPE_CAP_MAX_RENDER_TARGETS: 518 return 8; 519 520 case PIPE_CAP_MAX_VIEWPORTS: 521 return R600_MAX_VIEWPORTS; 522 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS: 523 return 8; 524 525 /* Timer queries, present when the clock frequency is non zero. */ 526 case PIPE_CAP_QUERY_TIMESTAMP: 527 case PIPE_CAP_QUERY_TIME_ELAPSED: 528 return sscreen->b.info.clock_crystal_freq != 0; 529 530 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET: 531 case PIPE_CAP_MIN_TEXEL_OFFSET: 532 return -32; 533 534 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET: 535 case PIPE_CAP_MAX_TEXEL_OFFSET: 536 return 31; 537 538 case PIPE_CAP_ENDIANNESS: 539 return PIPE_ENDIAN_LITTLE; 540 541 case PIPE_CAP_VENDOR_ID: 542 return ATI_VENDOR_ID; 543 case PIPE_CAP_DEVICE_ID: 544 return sscreen->b.info.pci_id; 545 case PIPE_CAP_ACCELERATED: 546 return 1; 547 case PIPE_CAP_VIDEO_MEMORY: 548 return sscreen->b.info.vram_size >> 20; 549 case PIPE_CAP_UMA: 550 return 0; 551 case PIPE_CAP_PCI_GROUP: 552 return sscreen->b.info.pci_domain; 553 case PIPE_CAP_PCI_BUS: 554 return sscreen->b.info.pci_bus; 555 case PIPE_CAP_PCI_DEVICE: 556 return sscreen->b.info.pci_dev; 557 case PIPE_CAP_PCI_FUNCTION: 558 return sscreen->b.info.pci_func; 559 } 560 return 0; 561} 562 563static int si_get_shader_param(struct pipe_screen* pscreen, unsigned shader, enum pipe_shader_cap param) 564{ 565 struct si_screen *sscreen = (struct si_screen *)pscreen; 566 567 switch(shader) 568 { 569 case PIPE_SHADER_FRAGMENT: 570 case PIPE_SHADER_VERTEX: 571 case PIPE_SHADER_GEOMETRY: 572 break; 573 case PIPE_SHADER_TESS_CTRL: 574 case PIPE_SHADER_TESS_EVAL: 575 /* LLVM 3.6.2 is required for tessellation because of bug fixes there */ 576 if (HAVE_LLVM == 0x0306 && MESA_LLVM_VERSION_PATCH < 2) 577 return 0; 578 break; 579 case PIPE_SHADER_COMPUTE: 580 switch (param) { 581 case PIPE_SHADER_CAP_PREFERRED_IR: 582 return PIPE_SHADER_IR_NATIVE; 583 584 case PIPE_SHADER_CAP_SUPPORTED_IRS: { 585 int ir = 1 << PIPE_SHADER_IR_NATIVE; 586 587 if (si_have_tgsi_compute(sscreen)) 588 ir |= 1 << PIPE_SHADER_IR_TGSI; 589 590 return ir; 591 } 592 case PIPE_SHADER_CAP_DOUBLES: 593 return HAVE_LLVM >= 0x0307; 594 595 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: { 596 uint64_t max_const_buffer_size; 597 pscreen->get_compute_param(pscreen, PIPE_SHADER_IR_TGSI, 598 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE, 599 &max_const_buffer_size); 600 return MIN2(max_const_buffer_size, INT_MAX); 601 } 602 default: 603 /* If compute shaders don't require a special value 604 * for this cap, we can return the same value we 605 * do for other shader types. */ 606 break; 607 } 608 break; 609 default: 610 return 0; 611 } 612 613 switch (param) { 614 /* Shader limits. */ 615 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS: 616 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS: 617 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS: 618 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS: 619 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH: 620 return 16384; 621 case PIPE_SHADER_CAP_MAX_INPUTS: 622 return shader == PIPE_SHADER_VERTEX ? SI_NUM_VERTEX_BUFFERS : 32; 623 case PIPE_SHADER_CAP_MAX_OUTPUTS: 624 return shader == PIPE_SHADER_FRAGMENT ? 8 : 32; 625 case PIPE_SHADER_CAP_MAX_TEMPS: 626 return 256; /* Max native temporaries. */ 627 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE: 628 return 4096 * sizeof(float[4]); /* actually only memory limits this */ 629 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: 630 return SI_NUM_CONST_BUFFERS; 631 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS: 632 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS: 633 return SI_NUM_SAMPLERS; 634 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS: 635 return HAVE_LLVM >= 0x0309 ? SI_NUM_SHADER_BUFFERS : 0; 636 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES: 637 return HAVE_LLVM >= 0x0309 ? SI_NUM_IMAGES : 0; 638 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT: 639 return 32; 640 case PIPE_SHADER_CAP_PREFERRED_IR: 641 return PIPE_SHADER_IR_TGSI; 642 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD: 643 return 3; 644 645 /* Supported boolean features. */ 646 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: 647 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED: 648 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR: 649 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR: 650 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR: 651 case PIPE_SHADER_CAP_INTEGERS: 652 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED: 653 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE: 654 return 1; 655 656 case PIPE_SHADER_CAP_DOUBLES: 657 return HAVE_LLVM >= 0x0307; 658 659 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR: 660 /* TODO: Indirection of geometry shader input dimension is not 661 * handled yet 662 */ 663 return shader != PIPE_SHADER_GEOMETRY; 664 665 /* Unsupported boolean features. */ 666 case PIPE_SHADER_CAP_MAX_PREDS: 667 case PIPE_SHADER_CAP_SUBROUTINES: 668 case PIPE_SHADER_CAP_SUPPORTED_IRS: 669 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED: 670 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED: 671 return 0; 672 } 673 return 0; 674} 675 676static void si_destroy_screen(struct pipe_screen* pscreen) 677{ 678 struct si_screen *sscreen = (struct si_screen *)pscreen; 679 struct si_shader_part *parts[] = { 680 sscreen->vs_prologs, 681 sscreen->vs_epilogs, 682 sscreen->tcs_epilogs, 683 sscreen->gs_prologs, 684 sscreen->ps_prologs, 685 sscreen->ps_epilogs 686 }; 687 unsigned i; 688 689 if (!sscreen) 690 return; 691 692 if (!sscreen->b.ws->unref(sscreen->b.ws)) 693 return; 694 695 if (util_queue_is_initialized(&sscreen->shader_compiler_queue)) 696 util_queue_destroy(&sscreen->shader_compiler_queue); 697 698 for (i = 0; i < ARRAY_SIZE(sscreen->tm); i++) 699 if (sscreen->tm[i]) 700 LLVMDisposeTargetMachine(sscreen->tm[i]); 701 702 /* Free shader parts. */ 703 for (i = 0; i < ARRAY_SIZE(parts); i++) { 704 while (parts[i]) { 705 struct si_shader_part *part = parts[i]; 706 707 parts[i] = part->next; 708 radeon_shader_binary_clean(&part->binary); 709 FREE(part); 710 } 711 } 712 pipe_mutex_destroy(sscreen->shader_parts_mutex); 713 si_destroy_shader_cache(sscreen); 714 r600_destroy_common_screen(&sscreen->b); 715} 716 717static bool si_init_gs_info(struct si_screen *sscreen) 718{ 719 switch (sscreen->b.family) { 720 case CHIP_OLAND: 721 case CHIP_HAINAN: 722 case CHIP_KAVERI: 723 case CHIP_KABINI: 724 case CHIP_MULLINS: 725 case CHIP_ICELAND: 726 case CHIP_CARRIZO: 727 case CHIP_STONEY: 728 sscreen->gs_table_depth = 16; 729 return true; 730 case CHIP_TAHITI: 731 case CHIP_PITCAIRN: 732 case CHIP_VERDE: 733 case CHIP_BONAIRE: 734 case CHIP_HAWAII: 735 case CHIP_TONGA: 736 case CHIP_FIJI: 737 case CHIP_POLARIS10: 738 case CHIP_POLARIS11: 739 case CHIP_POLARIS12: 740 sscreen->gs_table_depth = 32; 741 return true; 742 default: 743 return false; 744 } 745} 746 747static void si_handle_env_var_force_family(struct si_screen *sscreen) 748{ 749 const char *family = debug_get_option("SI_FORCE_FAMILY", NULL); 750 unsigned i; 751 752 if (!family) 753 return; 754 755 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) { 756 if (!strcmp(family, r600_get_llvm_processor_name(i))) { 757 /* Override family and chip_class. */ 758 sscreen->b.family = sscreen->b.info.family = i; 759 760 if (i >= CHIP_TONGA) 761 sscreen->b.chip_class = sscreen->b.info.chip_class = VI; 762 else if (i >= CHIP_BONAIRE) 763 sscreen->b.chip_class = sscreen->b.info.chip_class = CIK; 764 else 765 sscreen->b.chip_class = sscreen->b.info.chip_class = SI; 766 767 /* Don't submit any IBs. */ 768 setenv("RADEON_NOOP", "1", 1); 769 return; 770 } 771 } 772 773 fprintf(stderr, "radeonsi: Unknown family: %s\n", family); 774 exit(1); 775} 776 777struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws) 778{ 779 struct si_screen *sscreen = CALLOC_STRUCT(si_screen); 780 unsigned num_cpus, num_compiler_threads, i; 781 782 if (!sscreen) { 783 return NULL; 784 } 785 786 /* Set functions first. */ 787 sscreen->b.b.context_create = si_create_context; 788 sscreen->b.b.destroy = si_destroy_screen; 789 sscreen->b.b.get_param = si_get_param; 790 sscreen->b.b.get_shader_param = si_get_shader_param; 791 sscreen->b.b.resource_create = r600_resource_create_common; 792 793 si_init_screen_state_functions(sscreen); 794 795 if (!r600_common_screen_init(&sscreen->b, ws) || 796 !si_init_gs_info(sscreen) || 797 !si_init_shader_cache(sscreen)) { 798 FREE(sscreen); 799 return NULL; 800 } 801 802 si_handle_env_var_force_family(sscreen); 803 804 if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false)) 805 si_init_perfcounters(sscreen); 806 807 /* Hawaii has a bug with offchip buffers > 256 that can be worked 808 * around by setting 4K granularity. 809 */ 810 sscreen->tess_offchip_block_dw_size = 811 sscreen->b.family == CHIP_HAWAII ? 4096 : 8192; 812 813 sscreen->has_distributed_tess = 814 sscreen->b.chip_class >= VI && 815 sscreen->b.info.max_se >= 2; 816 817 sscreen->has_draw_indirect_multi = 818 (sscreen->b.family >= CHIP_POLARIS10) || 819 (sscreen->b.chip_class == VI && 820 sscreen->b.info.pfp_fw_version >= 121 && 821 sscreen->b.info.me_fw_version >= 87) || 822 (sscreen->b.chip_class == CIK && 823 sscreen->b.info.pfp_fw_version >= 211 && 824 sscreen->b.info.me_fw_version >= 173) || 825 (sscreen->b.chip_class == SI && 826 sscreen->b.info.pfp_fw_version >= 121 && 827 sscreen->b.info.me_fw_version >= 87); 828 829 sscreen->has_ds_bpermute = HAVE_LLVM >= 0x0309 && 830 sscreen->b.chip_class >= VI; 831 832 sscreen->b.has_cp_dma = true; 833 sscreen->b.has_streamout = true; 834 pipe_mutex_init(sscreen->shader_parts_mutex); 835 sscreen->use_monolithic_shaders = 836 HAVE_LLVM < 0x0308 || 837 (sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0; 838 839 sscreen->b.barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 | 840 SI_CONTEXT_INV_VMEM_L1 | 841 SI_CONTEXT_INV_GLOBAL_L2; 842 sscreen->b.barrier_flags.compute_to_L2 = SI_CONTEXT_CS_PARTIAL_FLUSH; 843 844 if (debug_get_bool_option("RADEON_DUMP_SHADERS", false)) 845 sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS; 846 847 /* Only enable as many threads as we have target machines and CPUs. */ 848 num_cpus = sysconf(_SC_NPROCESSORS_ONLN); 849 num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm)); 850 851 for (i = 0; i < num_compiler_threads; i++) 852 sscreen->tm[i] = si_create_llvm_target_machine(sscreen); 853 854 util_queue_init(&sscreen->shader_compiler_queue, "si_shader", 855 32, num_compiler_threads); 856 857 /* Create the auxiliary context. This must be done last. */ 858 sscreen->b.aux_context = sscreen->b.b.context_create(&sscreen->b.b, NULL, 0); 859 860 if (sscreen->b.debug_flags & DBG_TEST_DMA) 861 r600_test_dma(&sscreen->b); 862 863 return &sscreen->b.b; 864} 865