radeon_drm_bo.c revision 89ee0d527c689b70a08c1eb396486d47da7f120d
1/* 2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com> 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#define _FILE_OFFSET_BITS 64 28#include "radeon_drm_cs.h" 29 30#include "util/u_hash_table.h" 31#include "util/u_memory.h" 32#include "util/u_simple_list.h" 33#include "os/os_thread.h" 34 35#include "state_tracker/drm_driver.h" 36 37#include <sys/ioctl.h> 38#include <sys/mman.h> 39#include <xf86drm.h> 40#include <errno.h> 41 42#define RADEON_BO_FLAGS_MACRO_TILE 1 43#define RADEON_BO_FLAGS_MICRO_TILE 2 44#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20 45 46extern const struct pb_vtbl radeon_bo_vtbl; 47 48 49static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo) 50{ 51 assert(bo->vtbl == &radeon_bo_vtbl); 52 return (struct radeon_bo *)bo; 53} 54 55struct radeon_bomgr { 56 /* Base class. */ 57 struct pb_manager base; 58 59 /* Winsys. */ 60 struct radeon_drm_winsys *rws; 61 62 /* List of buffer handles and its mutex. */ 63 struct util_hash_table *bo_handles; 64 pipe_mutex bo_handles_mutex; 65}; 66 67static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr) 68{ 69 return (struct radeon_bomgr *)mgr; 70} 71 72static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) 73{ 74 struct radeon_bo *bo = NULL; 75 76 if (_buf->vtbl == &radeon_bo_vtbl) { 77 bo = radeon_bo(_buf); 78 } else { 79 struct pb_buffer *base_buf; 80 pb_size offset; 81 pb_get_base_buffer(_buf, &base_buf, &offset); 82 83 if (base_buf->vtbl == &radeon_bo_vtbl) 84 bo = radeon_bo(base_buf); 85 } 86 87 return bo; 88} 89 90void radeon_bo_unref(struct radeon_bo *bo) 91{ 92 struct drm_gem_close args = {}; 93 94 if (!p_atomic_dec_zero(&bo->ref_count)) 95 return; 96 97 if (bo->name) { 98 pipe_mutex_lock(bo->mgr->bo_handles_mutex); 99 util_hash_table_remove(bo->mgr->bo_handles, 100 (void*)(uintptr_t)bo->name); 101 pipe_mutex_unlock(bo->mgr->bo_handles_mutex); 102 } 103 104 if (bo->ptr) 105 munmap(bo->ptr, bo->size); 106 107 /* Close object. */ 108 args.handle = bo->handle; 109 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); 110 FREE(bo); 111} 112 113static void radeon_bo_wait(struct r300_winsys_bo *_buf) 114{ 115 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 116 struct drm_radeon_gem_wait_idle args = {}; 117 118 args.handle = bo->handle; 119 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE, 120 &args, sizeof(args)) == -EBUSY); 121} 122 123static boolean radeon_bo_is_busy(struct r300_winsys_bo *_buf) 124{ 125 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 126 struct drm_radeon_gem_busy args = {}; 127 128 args.handle = bo->handle; 129 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY, 130 &args, sizeof(args)) != 0; 131} 132 133static void radeon_bo_destroy(struct pb_buffer *_buf) 134{ 135 struct radeon_bo *bo = radeon_bo(_buf); 136 137 radeon_bo_unref(bo); 138} 139 140static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage) 141{ 142 unsigned res = 0; 143 144 if (usage & PIPE_TRANSFER_DONTBLOCK) 145 res |= PB_USAGE_DONTBLOCK; 146 147 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) 148 res |= PB_USAGE_UNSYNCHRONIZED; 149 150 return res; 151} 152 153static void *radeon_bo_map_internal(struct pb_buffer *_buf, 154 unsigned flags, void *flush_ctx) 155{ 156 struct radeon_bo *bo = radeon_bo(_buf); 157 struct radeon_drm_cs *cs = flush_ctx; 158 struct drm_radeon_gem_mmap args = {}; 159 /* prevents a call to radeon_bo_wait if (usage & DONTBLOCK) and 160 * radeon_is_busy returns FALSE. */ 161 boolean may_be_busy = TRUE; 162 163 if (flags & PB_USAGE_DONTBLOCK) { 164 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 165 cs->flush_cs(cs->flush_data); 166 return NULL; 167 } 168 169 if (radeon_bo_is_busy((struct r300_winsys_bo*)bo)) { 170 return NULL; 171 } 172 173 may_be_busy = FALSE; 174 } 175 176 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ 177 if (may_be_busy && !(flags & PB_USAGE_UNSYNCHRONIZED)) { 178 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 179 cs->flush_cs(cs->flush_data); 180 } 181 182 radeon_bo_wait((struct r300_winsys_bo*)bo); 183 } 184 185 /* Map buffer if it's not already mapped. */ 186 /* XXX We may get a race in bo->ptr. */ 187 if (!bo->ptr) { 188 void *ptr; 189 190 args.handle = bo->handle; 191 args.offset = 0; 192 args.size = (uint64_t)bo->size; 193 if (drmCommandWriteRead(bo->rws->fd, 194 DRM_RADEON_GEM_MMAP, 195 &args, 196 sizeof(args))) { 197 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n", 198 bo, bo->handle); 199 return NULL; 200 } 201 ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, 202 bo->rws->fd, args.addr_ptr); 203 if (ptr == MAP_FAILED) { 204 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno); 205 return NULL; 206 } 207 bo->ptr = ptr; 208 } 209 210 return bo->ptr; 211} 212 213static void radeon_bo_unmap_internal(struct pb_buffer *_buf) 214{ 215 /* NOP */ 216} 217 218static void radeon_bo_get_base_buffer(struct pb_buffer *buf, 219 struct pb_buffer **base_buf, 220 unsigned *offset) 221{ 222 *base_buf = buf; 223 *offset = 0; 224} 225 226static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf, 227 struct pb_validate *vl, 228 unsigned flags) 229{ 230 /* Always pinned */ 231 return PIPE_OK; 232} 233 234static void radeon_bo_fence(struct pb_buffer *buf, 235 struct pipe_fence_handle *fence) 236{ 237} 238 239const struct pb_vtbl radeon_bo_vtbl = { 240 radeon_bo_destroy, 241 radeon_bo_map_internal, 242 radeon_bo_unmap_internal, 243 radeon_bo_validate, 244 radeon_bo_fence, 245 radeon_bo_get_base_buffer, 246}; 247 248static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr, 249 pb_size size, 250 const struct pb_desc *desc) 251{ 252 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 253 struct radeon_drm_winsys *rws = mgr->rws; 254 struct radeon_bo *bo; 255 struct drm_radeon_gem_create args = {}; 256 257 args.size = size; 258 args.alignment = desc->alignment; 259 args.initial_domain = 260 (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT ? 261 RADEON_GEM_DOMAIN_GTT : 0) | 262 (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ? 263 RADEON_GEM_DOMAIN_VRAM : 0); 264 265 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE, 266 &args, sizeof(args))) { 267 fprintf(stderr, "Failed to allocate :\n"); 268 fprintf(stderr, " size : %d bytes\n", size); 269 fprintf(stderr, " alignment : %d bytes\n", desc->alignment); 270 fprintf(stderr, " domains : %d\n", args.initial_domain); 271 return NULL; 272 } 273 274 bo = CALLOC_STRUCT(radeon_bo); 275 if (!bo) 276 return NULL; 277 278 pipe_reference_init(&bo->base.base.reference, 1); 279 bo->base.base.alignment = desc->alignment; 280 bo->base.base.usage = desc->usage; 281 bo->base.base.size = size; 282 bo->base.vtbl = &radeon_bo_vtbl; 283 bo->mgr = mgr; 284 bo->rws = mgr->rws; 285 bo->handle = args.handle; 286 bo->size = size; 287 288 radeon_bo_ref(bo); 289 return &bo->base; 290} 291 292static void radeon_bomgr_flush(struct pb_manager *mgr) 293{ 294 /* NOP */ 295} 296 297/* This is for the cache bufmgr. */ 298static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr, 299 struct pb_buffer *_buf) 300{ 301 struct radeon_bo *bo = radeon_bo(_buf); 302 303 if (radeon_bo_is_referenced_by_any_cs(bo)) { 304 return FALSE; 305 } 306 307 if (radeon_bo_is_busy((struct r300_winsys_bo*)bo)) { 308 return FALSE; 309 } 310 311 return TRUE; 312} 313 314static void radeon_bomgr_destroy(struct pb_manager *_mgr) 315{ 316 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 317 util_hash_table_destroy(mgr->bo_handles); 318 pipe_mutex_destroy(mgr->bo_handles_mutex); 319 FREE(mgr); 320} 321 322#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 323 324static unsigned handle_hash(void *key) 325{ 326 return PTR_TO_UINT(key); 327} 328 329static int handle_compare(void *key1, void *key2) 330{ 331 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2); 332} 333 334struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) 335{ 336 struct radeon_bomgr *mgr; 337 338 mgr = CALLOC_STRUCT(radeon_bomgr); 339 if (!mgr) 340 return NULL; 341 342 mgr->base.destroy = radeon_bomgr_destroy; 343 mgr->base.create_buffer = radeon_bomgr_create_bo; 344 mgr->base.flush = radeon_bomgr_flush; 345 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; 346 347 mgr->rws = rws; 348 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); 349 pipe_mutex_init(mgr->bo_handles_mutex); 350 return &mgr->base; 351} 352 353static void *radeon_bo_map(struct r300_winsys_bo *buf, 354 struct r300_winsys_cs *cs, 355 enum pipe_transfer_usage usage) 356{ 357 struct pb_buffer *_buf = pb_buffer(buf); 358 359 return pb_map(_buf, get_pb_usage_from_transfer_flags(usage), cs); 360} 361 362static void radeon_bo_get_tiling(struct r300_winsys_bo *_buf, 363 enum r300_buffer_tiling *microtiled, 364 enum r300_buffer_tiling *macrotiled) 365{ 366 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 367 struct drm_radeon_gem_set_tiling args = {}; 368 369 args.handle = bo->handle; 370 371 drmCommandWriteRead(bo->rws->fd, 372 DRM_RADEON_GEM_GET_TILING, 373 &args, 374 sizeof(args)); 375 376 *microtiled = R300_BUFFER_LINEAR; 377 *macrotiled = R300_BUFFER_LINEAR; 378 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) 379 *microtiled = R300_BUFFER_TILED; 380 381 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) 382 *macrotiled = R300_BUFFER_TILED; 383} 384 385static void radeon_bo_set_tiling(struct r300_winsys_bo *_buf, 386 enum r300_buffer_tiling microtiled, 387 enum r300_buffer_tiling macrotiled, 388 uint32_t pitch) 389{ 390 struct radeon_bo *bo = get_radeon_bo(pb_buffer(_buf)); 391 struct drm_radeon_gem_set_tiling args = {}; 392 393 if (microtiled == R300_BUFFER_TILED) 394 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; 395 else if (microtiled == R300_BUFFER_SQUARETILED) 396 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; 397 398 if (macrotiled == R300_BUFFER_TILED) 399 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; 400 401 args.handle = bo->handle; 402 args.pitch = pitch; 403 404 drmCommandWriteRead(bo->rws->fd, 405 DRM_RADEON_GEM_SET_TILING, 406 &args, 407 sizeof(args)); 408} 409 410static struct r300_winsys_cs_handle *radeon_drm_get_cs_handle( 411 struct r300_winsys_bo *_buf) 412{ 413 /* return radeon_bo. */ 414 return (struct r300_winsys_cs_handle*) 415 get_radeon_bo(pb_buffer(_buf)); 416} 417 418static unsigned get_pb_usage_from_create_flags(unsigned bind, unsigned usage, 419 enum r300_buffer_domain domain) 420{ 421 unsigned res = 0; 422 423 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) 424 res |= RADEON_PB_USAGE_CACHE; 425 426 if (domain & R300_DOMAIN_GTT) 427 res |= RADEON_PB_USAGE_DOMAIN_GTT; 428 429 if (domain & R300_DOMAIN_VRAM) 430 res |= RADEON_PB_USAGE_DOMAIN_VRAM; 431 432 return res; 433} 434 435static struct r300_winsys_bo * 436radeon_winsys_bo_create(struct r300_winsys_screen *rws, 437 unsigned size, 438 unsigned alignment, 439 unsigned bind, 440 unsigned usage, 441 enum r300_buffer_domain domain) 442{ 443 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 444 struct pb_desc desc; 445 struct pb_manager *provider; 446 struct pb_buffer *buffer; 447 448 memset(&desc, 0, sizeof(desc)); 449 desc.alignment = alignment; 450 desc.usage = get_pb_usage_from_create_flags(bind, usage, domain); 451 452 /* Assign a buffer manager. */ 453 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) 454 provider = ws->cman; 455 else 456 provider = ws->kman; 457 458 buffer = provider->create_buffer(provider, size, &desc); 459 if (!buffer) 460 return NULL; 461 462 return (struct r300_winsys_bo*)buffer; 463} 464 465static struct r300_winsys_bo *radeon_winsys_bo_from_handle(struct r300_winsys_screen *rws, 466 struct winsys_handle *whandle, 467 unsigned *stride, 468 unsigned *size) 469{ 470 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 471 struct radeon_bo *bo; 472 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); 473 struct drm_gem_open open_arg = {}; 474 475 /* We must maintain a list of pairs <handle, bo>, so that we always return 476 * the same BO for one particular handle. If we didn't do that and created 477 * more than one BO for the same handle and then relocated them in a CS, 478 * we would hit a deadlock in the kernel. 479 * 480 * The list of pairs is guarded by a mutex, of course. */ 481 pipe_mutex_lock(mgr->bo_handles_mutex); 482 483 /* First check if there already is an existing bo for the handle. */ 484 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); 485 if (bo) { 486 /* Increase the refcount. */ 487 struct pb_buffer *b = NULL; 488 pb_reference(&b, &bo->base); 489 goto done; 490 } 491 492 /* There isn't, create a new one. */ 493 bo = CALLOC_STRUCT(radeon_bo); 494 if (!bo) { 495 goto fail; 496 } 497 498 /* Open the BO. */ 499 open_arg.name = whandle->handle; 500 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { 501 FREE(bo); 502 goto fail; 503 } 504 bo->handle = open_arg.handle; 505 bo->size = open_arg.size; 506 bo->name = whandle->handle; 507 radeon_bo_ref(bo); 508 509 /* Initialize it. */ 510 pipe_reference_init(&bo->base.base.reference, 1); 511 bo->base.base.alignment = 0; 512 bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; 513 bo->base.base.size = bo->size; 514 bo->base.vtbl = &radeon_bo_vtbl; 515 bo->mgr = mgr; 516 bo->rws = mgr->rws; 517 518 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); 519 520done: 521 pipe_mutex_unlock(mgr->bo_handles_mutex); 522 523 if (stride) 524 *stride = whandle->stride; 525 if (size) 526 *size = bo->base.base.size; 527 528 return (struct r300_winsys_bo*)bo; 529 530fail: 531 pipe_mutex_unlock(mgr->bo_handles_mutex); 532 return NULL; 533} 534 535static boolean radeon_winsys_bo_get_handle(struct r300_winsys_bo *buffer, 536 unsigned stride, 537 struct winsys_handle *whandle) 538{ 539 struct drm_gem_flink flink = {}; 540 struct radeon_bo *bo = get_radeon_bo(pb_buffer(buffer)); 541 542 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { 543 if (!bo->flinked) { 544 flink.handle = bo->handle; 545 546 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) { 547 return FALSE; 548 } 549 550 bo->flinked = TRUE; 551 bo->flink = flink.name; 552 } 553 whandle->handle = bo->flink; 554 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { 555 whandle->handle = bo->handle; 556 } 557 558 whandle->stride = stride; 559 return TRUE; 560} 561 562void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) 563{ 564 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle; 565 ws->base.buffer_set_tiling = radeon_bo_set_tiling; 566 ws->base.buffer_get_tiling = radeon_bo_get_tiling; 567 ws->base.buffer_map = radeon_bo_map; 568 ws->base.buffer_unmap = pb_unmap; 569 ws->base.buffer_wait = radeon_bo_wait; 570 ws->base.buffer_is_busy = radeon_bo_is_busy; 571 ws->base.buffer_create = radeon_winsys_bo_create; 572 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle; 573 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle; 574} 575