radeon_drm_bo.c revision a87730ff3f83253465fbe9a1e9e9b1ea92cb79b9
1/* 2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com> 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#define _FILE_OFFSET_BITS 64 28#include "radeon_drm_cs.h" 29 30#include "util/u_hash_table.h" 31#include "util/u_memory.h" 32#include "util/u_simple_list.h" 33#include "os/os_thread.h" 34 35#include "state_tracker/drm_driver.h" 36 37#include <sys/ioctl.h> 38#include <sys/mman.h> 39#include <xf86drm.h> 40#include <errno.h> 41 42#define RADEON_BO_FLAGS_MACRO_TILE 1 43#define RADEON_BO_FLAGS_MICRO_TILE 2 44#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20 45 46extern const struct pb_vtbl radeon_bo_vtbl; 47 48 49static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo) 50{ 51 assert(bo->vtbl == &radeon_bo_vtbl); 52 return (struct radeon_bo *)bo; 53} 54 55struct radeon_bomgr { 56 /* Base class. */ 57 struct pb_manager base; 58 59 /* Winsys. */ 60 struct radeon_drm_winsys *rws; 61 62 /* List of buffer handles and its mutex. */ 63 struct util_hash_table *bo_handles; 64 pipe_mutex bo_handles_mutex; 65}; 66 67static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr) 68{ 69 return (struct radeon_bomgr *)mgr; 70} 71 72static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) 73{ 74 struct radeon_bo *bo = NULL; 75 76 if (_buf->vtbl == &radeon_bo_vtbl) { 77 bo = radeon_bo(_buf); 78 } else { 79 struct pb_buffer *base_buf; 80 pb_size offset; 81 pb_get_base_buffer(_buf, &base_buf, &offset); 82 83 if (base_buf->vtbl == &radeon_bo_vtbl) 84 bo = radeon_bo(base_buf); 85 } 86 87 return bo; 88} 89 90void radeon_bo_unref(struct radeon_bo *bo) 91{ 92 struct drm_gem_close args = {}; 93 94 if (!p_atomic_dec_zero(&bo->ref_count)) 95 return; 96 97 if (bo->name) { 98 pipe_mutex_lock(bo->mgr->bo_handles_mutex); 99 util_hash_table_remove(bo->mgr->bo_handles, 100 (void*)(uintptr_t)bo->name); 101 pipe_mutex_unlock(bo->mgr->bo_handles_mutex); 102 } 103 104 if (bo->ptr) 105 munmap(bo->ptr, bo->size); 106 107 /* Close object. */ 108 args.handle = bo->handle; 109 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); 110 FREE(bo); 111} 112 113static void radeon_bo_wait(struct r300_winsys_bo *_buf) 114{ 115 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 116 struct drm_radeon_gem_wait_idle args = {}; 117 118 args.handle = bo->handle; 119 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE, 120 &args, sizeof(args)) == -EBUSY); 121} 122 123static boolean radeon_bo_is_busy(struct r300_winsys_bo *_buf) 124{ 125 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 126 struct drm_radeon_gem_busy args = {}; 127 128 args.handle = bo->handle; 129 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY, 130 &args, sizeof(args)) != 0; 131} 132 133static void radeon_bo_destroy(struct pb_buffer *_buf) 134{ 135 struct radeon_bo *bo = radeon_bo(_buf); 136 137 radeon_bo_unref(bo); 138} 139 140static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage) 141{ 142 unsigned res = 0; 143 144 if (usage & PIPE_TRANSFER_DONTBLOCK) 145 res |= PB_USAGE_DONTBLOCK; 146 147 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) 148 res |= PB_USAGE_UNSYNCHRONIZED; 149 150 return res; 151} 152 153static void *radeon_bo_map_internal(struct pb_buffer *_buf, 154 unsigned flags, void *flush_ctx) 155{ 156 struct radeon_bo *bo = radeon_bo(_buf); 157 struct radeon_drm_cs *cs = flush_ctx; 158 struct drm_radeon_gem_mmap args = {}; 159 /* prevents a call to radeon_bo_wait if (usage & DONTBLOCK) and 160 * radeon_is_busy returns FALSE. */ 161 boolean may_be_busy = TRUE; 162 163 if (flags & PB_USAGE_DONTBLOCK) { 164 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 165 return NULL; 166 } 167 168 if (radeon_bo_is_busy((struct r300_winsys_bo*)bo)) { 169 return NULL; 170 } 171 172 may_be_busy = FALSE; 173 } 174 175 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ 176 if (may_be_busy && !(flags & PB_USAGE_UNSYNCHRONIZED)) { 177 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 178 cs->flush_cs(cs->flush_data); 179 } 180 181 radeon_bo_wait((struct r300_winsys_bo*)bo); 182 } 183 184 /* Map buffer if it's not already mapped. */ 185 /* XXX We may get a race in bo->ptr. */ 186 if (!bo->ptr) { 187 void *ptr; 188 189 args.handle = bo->handle; 190 args.offset = 0; 191 args.size = (uint64_t)bo->size; 192 if (drmCommandWriteRead(bo->rws->fd, 193 DRM_RADEON_GEM_MMAP, 194 &args, 195 sizeof(args))) { 196 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n", 197 bo, bo->handle); 198 return NULL; 199 } 200 ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, 201 bo->rws->fd, args.addr_ptr); 202 if (ptr == MAP_FAILED) { 203 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno); 204 return NULL; 205 } 206 bo->ptr = ptr; 207 } 208 209 return bo->ptr; 210} 211 212static void radeon_bo_unmap_internal(struct pb_buffer *_buf) 213{ 214 /* NOP */ 215} 216 217static void radeon_bo_get_base_buffer(struct pb_buffer *buf, 218 struct pb_buffer **base_buf, 219 unsigned *offset) 220{ 221 *base_buf = buf; 222 *offset = 0; 223} 224 225static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf, 226 struct pb_validate *vl, 227 unsigned flags) 228{ 229 /* Always pinned */ 230 return PIPE_OK; 231} 232 233static void radeon_bo_fence(struct pb_buffer *buf, 234 struct pipe_fence_handle *fence) 235{ 236} 237 238const struct pb_vtbl radeon_bo_vtbl = { 239 radeon_bo_destroy, 240 radeon_bo_map_internal, 241 radeon_bo_unmap_internal, 242 radeon_bo_validate, 243 radeon_bo_fence, 244 radeon_bo_get_base_buffer, 245}; 246 247static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr, 248 pb_size size, 249 const struct pb_desc *desc) 250{ 251 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 252 struct radeon_drm_winsys *rws = mgr->rws; 253 struct radeon_bo *bo; 254 struct drm_radeon_gem_create args = {}; 255 256 args.size = size; 257 args.alignment = desc->alignment; 258 args.initial_domain = 259 (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ? 260 RADEON_GEM_DOMAIN_GTT : 0) | 261 (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ? 262 RADEON_GEM_DOMAIN_VRAM : 0); 263 264 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE, 265 &args, sizeof(args))) { 266 fprintf(stderr, "Failed to allocate :\n"); 267 fprintf(stderr, " size : %d bytes\n", size); 268 fprintf(stderr, " alignment : %d bytes\n", desc->alignment); 269 fprintf(stderr, " domains : %d\n", args.initial_domain); 270 return NULL; 271 } 272 273 bo = CALLOC_STRUCT(radeon_bo); 274 if (!bo) 275 return NULL; 276 277 pipe_reference_init(&bo->base.base.reference, 1); 278 bo->base.base.alignment = desc->alignment; 279 bo->base.base.usage = desc->usage; 280 bo->base.base.size = size; 281 bo->base.vtbl = &radeon_bo_vtbl; 282 bo->mgr = mgr; 283 bo->rws = mgr->rws; 284 bo->handle = args.handle; 285 bo->size = size; 286 287 radeon_bo_ref(bo); 288 return &bo->base; 289} 290 291static void radeon_bomgr_flush(struct pb_manager *mgr) 292{ 293 /* NOP */ 294} 295 296/* This is for the cache bufmgr. */ 297static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr, 298 struct pb_buffer *_buf) 299{ 300 struct radeon_bo *bo = radeon_bo(_buf); 301 302 if (radeon_bo_is_referenced_by_any_cs(bo)) { 303 return FALSE; 304 } 305 306 if (radeon_bo_is_busy((struct r300_winsys_bo*)bo)) { 307 return FALSE; 308 } 309 310 return TRUE; 311} 312 313static void radeon_bomgr_destroy(struct pb_manager *_mgr) 314{ 315 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 316 util_hash_table_destroy(mgr->bo_handles); 317 pipe_mutex_destroy(mgr->bo_handles_mutex); 318 FREE(mgr); 319} 320 321#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 322 323static unsigned handle_hash(void *key) 324{ 325 return PTR_TO_UINT(key); 326} 327 328static int handle_compare(void *key1, void *key2) 329{ 330 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2); 331} 332 333struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) 334{ 335 struct radeon_bomgr *mgr; 336 337 mgr = CALLOC_STRUCT(radeon_bomgr); 338 if (!mgr) 339 return NULL; 340 341 mgr->base.destroy = radeon_bomgr_destroy; 342 mgr->base.create_buffer = radeon_bomgr_create_bo; 343 mgr->base.flush = radeon_bomgr_flush; 344 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; 345 346 mgr->rws = rws; 347 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); 348 pipe_mutex_init(mgr->bo_handles_mutex); 349 return &mgr->base; 350} 351 352static void *radeon_bo_map(struct r300_winsys_bo *buf, 353 struct r300_winsys_cs *cs, 354 enum pipe_transfer_usage usage) 355{ 356 struct pb_buffer *_buf = pb_buffer(buf); 357 358 return pb_map(_buf, get_pb_usage_from_transfer_flags(usage), cs); 359} 360 361static void radeon_bo_get_tiling(struct r300_winsys_bo *_buf, 362 enum r300_buffer_tiling *microtiled, 363 enum r300_buffer_tiling *macrotiled) 364{ 365 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 366 struct drm_radeon_gem_set_tiling args = {}; 367 368 args.handle = bo->handle; 369 370 drmCommandWriteRead(bo->rws->fd, 371 DRM_RADEON_GEM_GET_TILING, 372 &args, 373 sizeof(args)); 374 375 *microtiled = R300_BUFFER_LINEAR; 376 *macrotiled = R300_BUFFER_LINEAR; 377 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) 378 *microtiled = R300_BUFFER_TILED; 379 380 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) 381 *macrotiled = R300_BUFFER_TILED; 382} 383 384static void radeon_bo_set_tiling(struct r300_winsys_bo *_buf, 385 enum r300_buffer_tiling microtiled, 386 enum r300_buffer_tiling macrotiled, 387 uint32_t pitch) 388{ 389 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 390 struct drm_radeon_gem_set_tiling args = {}; 391 392 if (microtiled == R300_BUFFER_TILED) 393 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; 394 else if (microtiled == R300_BUFFER_SQUARETILED) 395 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; 396 397 if (macrotiled == R300_BUFFER_TILED) 398 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; 399 400 args.handle = bo->handle; 401 args.pitch = pitch; 402 403 drmCommandWriteRead(bo->rws->fd, 404 DRM_RADEON_GEM_SET_TILING, 405 &args, 406 sizeof(args)); 407} 408 409static struct r300_winsys_cs_handle *radeon_drm_get_cs_handle( 410 struct r300_winsys_bo *_buf) 411{ 412 /* return radeon_bo. */ 413 return (struct r300_winsys_cs_handle*) 414 get_radeon_bo(pb_buffer(_buf)); 415} 416 417static unsigned get_pb_usage_from_create_flags(unsigned bind, unsigned usage, 418 enum r300_buffer_domain domain) 419{ 420 unsigned res = 0; 421 422 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) 423 res |= RADEON_PB_USAGE_CACHE; 424 425 if (domain & R300_DOMAIN_GTT) 426 res |= RADEON_PB_USAGE_DOMAIN_GTT; 427 428 if (domain & R300_DOMAIN_VRAM) 429 res |= RADEON_PB_USAGE_DOMAIN_VRAM; 430 431 return res; 432} 433 434static struct r300_winsys_bo * 435radeon_winsys_bo_create(struct r300_winsys_screen *rws, 436 unsigned size, 437 unsigned alignment, 438 unsigned bind, 439 unsigned usage, 440 enum r300_buffer_domain domain) 441{ 442 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 443 struct pb_desc desc; 444 struct pb_manager *provider; 445 struct pb_buffer *buffer; 446 447 memset(&desc, 0, sizeof(desc)); 448 desc.alignment = alignment; 449 desc.usage = get_pb_usage_from_create_flags(bind, usage, domain); 450 451 /* Assign a buffer manager. */ 452 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) 453 provider = ws->cman; 454 else 455 provider = ws->kman; 456 457 buffer = provider->create_buffer(provider, size, &desc); 458 if (!buffer) 459 return NULL; 460 461 return (struct r300_winsys_bo*)buffer; 462} 463 464static struct r300_winsys_bo *radeon_winsys_bo_from_handle(struct r300_winsys_screen *rws, 465 struct winsys_handle *whandle, 466 unsigned *stride, 467 unsigned *size) 468{ 469 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 470 struct radeon_bo *bo; 471 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); 472 struct drm_gem_open open_arg = {}; 473 474 /* We must maintain a list of pairs <handle, bo>, so that we always return 475 * the same BO for one particular handle. If we didn't do that and created 476 * more than one BO for the same handle and then relocated them in a CS, 477 * we would hit a deadlock in the kernel. 478 * 479 * The list of pairs is guarded by a mutex, of course. */ 480 pipe_mutex_lock(mgr->bo_handles_mutex); 481 482 /* First check if there already is an existing bo for the handle. */ 483 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); 484 if (bo) { 485 /* Increase the refcount. */ 486 struct pb_buffer *b = NULL; 487 pb_reference(&b, &bo->base); 488 goto done; 489 } 490 491 /* There isn't, create a new one. */ 492 bo = CALLOC_STRUCT(radeon_bo); 493 if (!bo) { 494 goto fail; 495 } 496 497 /* Open the BO. */ 498 open_arg.name = whandle->handle; 499 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { 500 FREE(bo); 501 goto fail; 502 } 503 bo->handle = open_arg.handle; 504 bo->size = open_arg.size; 505 bo->name = whandle->handle; 506 radeon_bo_ref(bo); 507 508 /* Initialize it. */ 509 pipe_reference_init(&bo->base.base.reference, 1); 510 bo->base.base.alignment = 0; 511 bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; 512 bo->base.base.size = bo->size; 513 bo->base.vtbl = &radeon_bo_vtbl; 514 bo->mgr = mgr; 515 bo->rws = mgr->rws; 516 517 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); 518 519done: 520 pipe_mutex_unlock(mgr->bo_handles_mutex); 521 522 if (stride) 523 *stride = whandle->stride; 524 if (size) 525 *size = bo->base.base.size; 526 527 return (struct r300_winsys_bo*)bo; 528 529fail: 530 pipe_mutex_unlock(mgr->bo_handles_mutex); 531 return NULL; 532} 533 534static boolean radeon_winsys_bo_get_handle(struct r300_winsys_bo *buffer, 535 unsigned stride, 536 struct winsys_handle *whandle) 537{ 538 struct drm_gem_flink flink = {}; 539 struct radeon_bo *bo = get_radeon_bo(pb_buffer(buffer)); 540 541 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { 542 if (!bo->flinked) { 543 flink.handle = bo->handle; 544 545 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) { 546 return FALSE; 547 } 548 549 bo->flinked = TRUE; 550 bo->flink = flink.name; 551 } 552 whandle->handle = bo->flink; 553 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { 554 whandle->handle = bo->handle; 555 } 556 557 whandle->stride = stride; 558 return TRUE; 559} 560 561void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) 562{ 563 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle; 564 ws->base.buffer_set_tiling = radeon_bo_set_tiling; 565 ws->base.buffer_get_tiling = radeon_bo_get_tiling; 566 ws->base.buffer_map = radeon_bo_map; 567 ws->base.buffer_unmap = pb_unmap; 568 ws->base.buffer_wait = radeon_bo_wait; 569 ws->base.buffer_is_busy = radeon_bo_is_busy; 570 ws->base.buffer_create = radeon_winsys_bo_create; 571 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle; 572 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle; 573} 574