radeon_drm_bo.c revision 3da5196263fb2ae60483044cbd34c94270e2accd
1/* 2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com> 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#define _FILE_OFFSET_BITS 64 28#include "radeon_drm_cs.h" 29 30#include "util/u_hash_table.h" 31#include "util/u_memory.h" 32#include "util/u_simple_list.h" 33#include "os/os_thread.h" 34#include "os/os_mman.h" 35 36#include "state_tracker/drm_driver.h" 37 38#include <sys/ioctl.h> 39#include <xf86drm.h> 40#include <errno.h> 41 42#define RADEON_BO_FLAGS_MACRO_TILE 1 43#define RADEON_BO_FLAGS_MICRO_TILE 2 44#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20 45 46#ifndef DRM_RADEON_GEM_WAIT 47#define DRM_RADEON_GEM_WAIT 0x2b 48 49#define RADEON_GEM_NO_WAIT 0x1 50#define RADEON_GEM_USAGE_READ 0x2 51#define RADEON_GEM_USAGE_WRITE 0x4 52 53struct drm_radeon_gem_wait { 54 uint32_t handle; 55 uint32_t flags; /* one of RADEON_GEM_* */ 56}; 57 58#endif 59 60 61extern const struct pb_vtbl radeon_bo_vtbl; 62 63 64static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo) 65{ 66 assert(bo->vtbl == &radeon_bo_vtbl); 67 return (struct radeon_bo *)bo; 68} 69 70struct radeon_bomgr { 71 /* Base class. */ 72 struct pb_manager base; 73 74 /* Winsys. */ 75 struct radeon_drm_winsys *rws; 76 77 /* List of buffer handles and its mutex. */ 78 struct util_hash_table *bo_handles; 79 pipe_mutex bo_handles_mutex; 80}; 81 82static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr) 83{ 84 return (struct radeon_bomgr *)mgr; 85} 86 87static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) 88{ 89 struct radeon_bo *bo = NULL; 90 91 if (_buf->vtbl == &radeon_bo_vtbl) { 92 bo = radeon_bo(_buf); 93 } else { 94 struct pb_buffer *base_buf; 95 pb_size offset; 96 pb_get_base_buffer(_buf, &base_buf, &offset); 97 98 if (base_buf->vtbl == &radeon_bo_vtbl) 99 bo = radeon_bo(base_buf); 100 } 101 102 return bo; 103} 104 105static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage) 106{ 107 struct radeon_bo *bo = get_radeon_bo(_buf); 108 109 while (p_atomic_read(&bo->num_active_ioctls)) { 110 sched_yield(); 111 } 112 113 /* XXX use this when it's ready */ 114 /*if (bo->rws->info.drm_minor >= 12) { 115 struct drm_radeon_gem_wait args = {}; 116 args.handle = bo->handle; 117 args.flags = usage; 118 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 119 &args, sizeof(args)) == -EBUSY); 120 } else*/ { 121 struct drm_radeon_gem_wait_idle args; 122 memset(&args, 0, sizeof(args)); 123 args.handle = bo->handle; 124 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE, 125 &args, sizeof(args)) == -EBUSY); 126 } 127} 128 129static boolean radeon_bo_is_busy(struct pb_buffer *_buf, 130 enum radeon_bo_usage usage) 131{ 132 struct radeon_bo *bo = get_radeon_bo(_buf); 133 134 if (p_atomic_read(&bo->num_active_ioctls)) { 135 return TRUE; 136 } 137 138 /* XXX use this when it's ready */ 139 /*if (bo->rws->info.drm_minor >= 12) { 140 struct drm_radeon_gem_wait args = {}; 141 args.handle = bo->handle; 142 args.flags = usage | RADEON_GEM_NO_WAIT; 143 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 144 &args, sizeof(args)) != 0; 145 } else*/ { 146 struct drm_radeon_gem_busy args; 147 memset(&args, 0, sizeof(args)); 148 args.handle = bo->handle; 149 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY, 150 &args, sizeof(args)) != 0; 151 } 152} 153 154static void radeon_bo_destroy(struct pb_buffer *_buf) 155{ 156 struct radeon_bo *bo = radeon_bo(_buf); 157 struct drm_gem_close args; 158 159 memset(&args, 0, sizeof(args)); 160 161 if (bo->name) { 162 pipe_mutex_lock(bo->mgr->bo_handles_mutex); 163 util_hash_table_remove(bo->mgr->bo_handles, 164 (void*)(uintptr_t)bo->name); 165 pipe_mutex_unlock(bo->mgr->bo_handles_mutex); 166 } 167 168 if (bo->ptr) 169 os_munmap(bo->ptr, bo->base.size); 170 171 /* Close object. */ 172 args.handle = bo->handle; 173 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); 174 pipe_mutex_destroy(bo->map_mutex); 175 FREE(bo); 176} 177 178static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage) 179{ 180 unsigned res = 0; 181 182 if (usage & PIPE_TRANSFER_WRITE) 183 res |= PB_USAGE_CPU_WRITE; 184 185 if (usage & PIPE_TRANSFER_DONTBLOCK) 186 res |= PB_USAGE_DONTBLOCK; 187 188 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) 189 res |= PB_USAGE_UNSYNCHRONIZED; 190 191 return res; 192} 193 194static void *radeon_bo_map_internal(struct pb_buffer *_buf, 195 unsigned flags, void *flush_ctx) 196{ 197 struct radeon_bo *bo = radeon_bo(_buf); 198 struct radeon_drm_cs *cs = flush_ctx; 199 struct drm_radeon_gem_mmap args; 200 void *ptr; 201 202 memset(&args, 0, sizeof(args)); 203 204 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ 205 if (!(flags & PB_USAGE_UNSYNCHRONIZED)) { 206 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */ 207 if (flags & PB_USAGE_DONTBLOCK) { 208 if (!(flags & PB_USAGE_CPU_WRITE)) { 209 /* Mapping for read. 210 * 211 * Since we are mapping for read, we don't need to wait 212 * if the GPU is using the buffer for read too 213 * (neither one is changing it). 214 * 215 * Only check whether the buffer is being used for write. */ 216 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 217 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 218 return NULL; 219 } 220 221 if (radeon_bo_is_busy((struct pb_buffer*)bo, 222 RADEON_USAGE_WRITE)) { 223 return NULL; 224 } 225 } else { 226 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 227 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 228 return NULL; 229 } 230 231 if (radeon_bo_is_busy((struct pb_buffer*)bo, 232 RADEON_USAGE_READWRITE)) { 233 return NULL; 234 } 235 } 236 } else { 237 if (!(flags & PB_USAGE_CPU_WRITE)) { 238 /* Mapping for read. 239 * 240 * Since we are mapping for read, we don't need to wait 241 * if the GPU is using the buffer for read too 242 * (neither one is changing it). 243 * 244 * Only check whether the buffer is being used for write. */ 245 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 246 cs->flush_cs(cs->flush_data, 0); 247 } 248 radeon_bo_wait((struct pb_buffer*)bo, 249 RADEON_USAGE_WRITE); 250 } else { 251 /* Mapping for write. */ 252 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 253 cs->flush_cs(cs->flush_data, 0); 254 } else { 255 /* Try to avoid busy-waiting in radeon_bo_wait. */ 256 if (p_atomic_read(&bo->num_active_ioctls)) 257 radeon_drm_cs_sync_flush(cs); 258 } 259 260 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE); 261 } 262 } 263 } 264 265 /* Return the pointer if it's already mapped. */ 266 if (bo->ptr) 267 return bo->ptr; 268 269 /* Map the buffer. */ 270 pipe_mutex_lock(bo->map_mutex); 271 /* Return the pointer if it's already mapped (in case of a race). */ 272 if (bo->ptr) { 273 pipe_mutex_unlock(bo->map_mutex); 274 return bo->ptr; 275 } 276 args.handle = bo->handle; 277 args.offset = 0; 278 args.size = (uint64_t)bo->base.size; 279 if (drmCommandWriteRead(bo->rws->fd, 280 DRM_RADEON_GEM_MMAP, 281 &args, 282 sizeof(args))) { 283 pipe_mutex_unlock(bo->map_mutex); 284 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n", 285 bo, bo->handle); 286 return NULL; 287 } 288 289 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, 290 bo->rws->fd, args.addr_ptr); 291 if (ptr == MAP_FAILED) { 292 pipe_mutex_unlock(bo->map_mutex); 293 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno); 294 return NULL; 295 } 296 bo->ptr = ptr; 297 pipe_mutex_unlock(bo->map_mutex); 298 299 return bo->ptr; 300} 301 302static void radeon_bo_unmap_internal(struct pb_buffer *_buf) 303{ 304 /* NOP */ 305} 306 307static void radeon_bo_get_base_buffer(struct pb_buffer *buf, 308 struct pb_buffer **base_buf, 309 unsigned *offset) 310{ 311 *base_buf = buf; 312 *offset = 0; 313} 314 315static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf, 316 struct pb_validate *vl, 317 unsigned flags) 318{ 319 /* Always pinned */ 320 return PIPE_OK; 321} 322 323static void radeon_bo_fence(struct pb_buffer *buf, 324 struct pipe_fence_handle *fence) 325{ 326} 327 328const struct pb_vtbl radeon_bo_vtbl = { 329 radeon_bo_destroy, 330 radeon_bo_map_internal, 331 radeon_bo_unmap_internal, 332 radeon_bo_validate, 333 radeon_bo_fence, 334 radeon_bo_get_base_buffer, 335}; 336 337static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr, 338 pb_size size, 339 const struct pb_desc *desc) 340{ 341 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 342 struct radeon_drm_winsys *rws = mgr->rws; 343 struct radeon_bo *bo; 344 struct drm_radeon_gem_create args; 345 struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc; 346 347 memset(&args, 0, sizeof(args)); 348 349 assert(rdesc->initial_domains && rdesc->reloc_domains); 350 assert((rdesc->initial_domains & 351 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0); 352 assert((rdesc->reloc_domains & 353 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0); 354 355 args.size = size; 356 args.alignment = desc->alignment; 357 args.initial_domain = rdesc->initial_domains; 358 359 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE, 360 &args, sizeof(args))) { 361 fprintf(stderr, "radeon: Failed to allocate a buffer:\n"); 362 fprintf(stderr, "radeon: size : %d bytes\n", size); 363 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment); 364 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain); 365 return NULL; 366 } 367 368 bo = CALLOC_STRUCT(radeon_bo); 369 if (!bo) 370 return NULL; 371 372 pipe_reference_init(&bo->base.reference, 1); 373 bo->base.alignment = desc->alignment; 374 bo->base.usage = desc->usage; 375 bo->base.size = size; 376 bo->base.vtbl = &radeon_bo_vtbl; 377 bo->mgr = mgr; 378 bo->rws = mgr->rws; 379 bo->handle = args.handle; 380 bo->reloc_domains = rdesc->reloc_domains; 381 pipe_mutex_init(bo->map_mutex); 382 383 return &bo->base; 384} 385 386static void radeon_bomgr_flush(struct pb_manager *mgr) 387{ 388 /* NOP */ 389} 390 391/* This is for the cache bufmgr. */ 392static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr, 393 struct pb_buffer *_buf) 394{ 395 struct radeon_bo *bo = radeon_bo(_buf); 396 397 if (radeon_bo_is_referenced_by_any_cs(bo)) { 398 return TRUE; 399 } 400 401 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) { 402 return TRUE; 403 } 404 405 return FALSE; 406} 407 408static void radeon_bomgr_destroy(struct pb_manager *_mgr) 409{ 410 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 411 util_hash_table_destroy(mgr->bo_handles); 412 pipe_mutex_destroy(mgr->bo_handles_mutex); 413 FREE(mgr); 414} 415 416#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 417 418static unsigned handle_hash(void *key) 419{ 420 return PTR_TO_UINT(key); 421} 422 423static int handle_compare(void *key1, void *key2) 424{ 425 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2); 426} 427 428struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) 429{ 430 struct radeon_bomgr *mgr; 431 432 mgr = CALLOC_STRUCT(radeon_bomgr); 433 if (!mgr) 434 return NULL; 435 436 mgr->base.destroy = radeon_bomgr_destroy; 437 mgr->base.create_buffer = radeon_bomgr_create_bo; 438 mgr->base.flush = radeon_bomgr_flush; 439 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; 440 441 mgr->rws = rws; 442 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); 443 pipe_mutex_init(mgr->bo_handles_mutex); 444 return &mgr->base; 445} 446 447static void *radeon_bo_map(struct pb_buffer *buf, 448 struct radeon_winsys_cs *cs, 449 enum pipe_transfer_usage usage) 450{ 451 return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs); 452} 453 454static void radeon_bo_get_tiling(struct pb_buffer *_buf, 455 enum radeon_bo_layout *microtiled, 456 enum radeon_bo_layout *macrotiled) 457{ 458 struct radeon_bo *bo = get_radeon_bo(_buf); 459 struct drm_radeon_gem_set_tiling args; 460 461 memset(&args, 0, sizeof(args)); 462 463 args.handle = bo->handle; 464 465 drmCommandWriteRead(bo->rws->fd, 466 DRM_RADEON_GEM_GET_TILING, 467 &args, 468 sizeof(args)); 469 470 *microtiled = RADEON_LAYOUT_LINEAR; 471 *macrotiled = RADEON_LAYOUT_LINEAR; 472 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) 473 *microtiled = RADEON_LAYOUT_TILED; 474 475 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) 476 *macrotiled = RADEON_LAYOUT_TILED; 477} 478 479static void radeon_bo_set_tiling(struct pb_buffer *_buf, 480 struct radeon_winsys_cs *rcs, 481 enum radeon_bo_layout microtiled, 482 enum radeon_bo_layout macrotiled, 483 uint32_t pitch) 484{ 485 struct radeon_bo *bo = get_radeon_bo(_buf); 486 struct radeon_drm_cs *cs = radeon_drm_cs(rcs); 487 struct drm_radeon_gem_set_tiling args; 488 489 memset(&args, 0, sizeof(args)); 490 491 /* Tiling determines how DRM treats the buffer data. 492 * We must flush CS when changing it if the buffer is referenced. */ 493 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) { 494 cs->flush_cs(cs->flush_data, 0); 495 } 496 497 while (p_atomic_read(&bo->num_active_ioctls)) { 498 sched_yield(); 499 } 500 501 if (microtiled == RADEON_LAYOUT_TILED) 502 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; 503 else if (microtiled == RADEON_LAYOUT_SQUARETILED) 504 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; 505 506 if (macrotiled == RADEON_LAYOUT_TILED) 507 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; 508 509 args.handle = bo->handle; 510 args.pitch = pitch; 511 512 drmCommandWriteRead(bo->rws->fd, 513 DRM_RADEON_GEM_SET_TILING, 514 &args, 515 sizeof(args)); 516} 517 518static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle( 519 struct pb_buffer *_buf) 520{ 521 /* return radeon_bo. */ 522 return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf); 523} 524 525static struct pb_buffer * 526radeon_winsys_bo_create(struct radeon_winsys *rws, 527 unsigned size, 528 unsigned alignment, 529 unsigned bind, unsigned usage) 530{ 531 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 532 struct radeon_bo_desc desc; 533 struct pb_manager *provider; 534 struct pb_buffer *buffer; 535 536 memset(&desc, 0, sizeof(desc)); 537 desc.base.alignment = alignment; 538 539 /* Determine the memory domains. */ 540 switch (usage) { 541 case PIPE_USAGE_STAGING: 542 case PIPE_USAGE_STREAM: 543 case PIPE_USAGE_DYNAMIC: 544 desc.initial_domains = RADEON_GEM_DOMAIN_GTT; 545 desc.reloc_domains = RADEON_GEM_DOMAIN_GTT; 546 break; 547 case PIPE_USAGE_IMMUTABLE: 548 case PIPE_USAGE_STATIC: 549 desc.initial_domains = RADEON_GEM_DOMAIN_VRAM; 550 desc.reloc_domains = RADEON_GEM_DOMAIN_VRAM; 551 break; 552 default: 553 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | 554 PIPE_BIND_CONSTANT_BUFFER)) { 555 desc.initial_domains = RADEON_GEM_DOMAIN_GTT; 556 } else { 557 desc.initial_domains = RADEON_GEM_DOMAIN_VRAM; 558 } 559 desc.reloc_domains = RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM; 560 } 561 562 /* Additional criteria for the cache manager. */ 563 desc.base.usage = desc.initial_domains; 564 565 /* Assign a buffer manager. */ 566 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | 567 PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM)) 568 provider = ws->cman; 569 else 570 provider = ws->kman; 571 572 buffer = provider->create_buffer(provider, size, &desc.base); 573 if (!buffer) 574 return NULL; 575 576 return (struct pb_buffer*)buffer; 577} 578 579static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws, 580 struct winsys_handle *whandle, 581 unsigned *stride) 582{ 583 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 584 struct radeon_bo *bo; 585 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); 586 struct drm_gem_open open_arg = {}; 587 588 memset(&open_arg, 0, sizeof(open_arg)); 589 590 /* We must maintain a list of pairs <handle, bo>, so that we always return 591 * the same BO for one particular handle. If we didn't do that and created 592 * more than one BO for the same handle and then relocated them in a CS, 593 * we would hit a deadlock in the kernel. 594 * 595 * The list of pairs is guarded by a mutex, of course. */ 596 pipe_mutex_lock(mgr->bo_handles_mutex); 597 598 /* First check if there already is an existing bo for the handle. */ 599 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); 600 if (bo) { 601 /* Increase the refcount. */ 602 struct pb_buffer *b = NULL; 603 pb_reference(&b, &bo->base); 604 goto done; 605 } 606 607 /* There isn't, create a new one. */ 608 bo = CALLOC_STRUCT(radeon_bo); 609 if (!bo) { 610 goto fail; 611 } 612 613 /* Open the BO. */ 614 open_arg.name = whandle->handle; 615 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { 616 FREE(bo); 617 goto fail; 618 } 619 bo->handle = open_arg.handle; 620 bo->name = whandle->handle; 621 bo->reloc_domains = RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM; 622 623 /* Initialize it. */ 624 pipe_reference_init(&bo->base.reference, 1); 625 bo->base.alignment = 0; 626 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; 627 bo->base.size = open_arg.size; 628 bo->base.vtbl = &radeon_bo_vtbl; 629 bo->mgr = mgr; 630 bo->rws = mgr->rws; 631 pipe_mutex_init(bo->map_mutex); 632 633 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); 634 635done: 636 pipe_mutex_unlock(mgr->bo_handles_mutex); 637 638 if (stride) 639 *stride = whandle->stride; 640 641 return (struct pb_buffer*)bo; 642 643fail: 644 pipe_mutex_unlock(mgr->bo_handles_mutex); 645 return NULL; 646} 647 648static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer, 649 unsigned stride, 650 struct winsys_handle *whandle) 651{ 652 struct drm_gem_flink flink; 653 struct radeon_bo *bo = get_radeon_bo(buffer); 654 655 memset(&flink, 0, sizeof(flink)); 656 657 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { 658 if (!bo->flinked) { 659 flink.handle = bo->handle; 660 661 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) { 662 return FALSE; 663 } 664 665 bo->flinked = TRUE; 666 bo->flink = flink.name; 667 } 668 whandle->handle = bo->flink; 669 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { 670 whandle->handle = bo->handle; 671 } 672 673 whandle->stride = stride; 674 return TRUE; 675} 676 677void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) 678{ 679 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle; 680 ws->base.buffer_set_tiling = radeon_bo_set_tiling; 681 ws->base.buffer_get_tiling = radeon_bo_get_tiling; 682 ws->base.buffer_map = radeon_bo_map; 683 ws->base.buffer_unmap = pb_unmap; 684 ws->base.buffer_wait = radeon_bo_wait; 685 ws->base.buffer_is_busy = radeon_bo_is_busy; 686 ws->base.buffer_create = radeon_winsys_bo_create; 687 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle; 688 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle; 689} 690