radeon_drm_bo.c revision 2717b8f034db16cf551e167aa5ce3a9be3bf730b
1/* 2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com> 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#define _FILE_OFFSET_BITS 64 28#include "radeon_drm_cs.h" 29 30#include "util/u_hash_table.h" 31#include "util/u_memory.h" 32#include "util/u_simple_list.h" 33#include "os/os_thread.h" 34#include "os/os_mman.h" 35 36#include "state_tracker/drm_driver.h" 37 38#include <sys/ioctl.h> 39#include <xf86drm.h> 40#include <errno.h> 41 42#define RADEON_BO_FLAGS_MACRO_TILE 1 43#define RADEON_BO_FLAGS_MICRO_TILE 2 44#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20 45 46#ifndef DRM_RADEON_GEM_WAIT 47#define DRM_RADEON_GEM_WAIT 0x2b 48 49#define RADEON_GEM_NO_WAIT 0x1 50#define RADEON_GEM_USAGE_READ 0x2 51#define RADEON_GEM_USAGE_WRITE 0x4 52 53struct drm_radeon_gem_wait { 54 uint32_t handle; 55 uint32_t flags; /* one of RADEON_GEM_* */ 56}; 57 58#endif 59 60 61extern const struct pb_vtbl radeon_bo_vtbl; 62 63 64static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo) 65{ 66 assert(bo->vtbl == &radeon_bo_vtbl); 67 return (struct radeon_bo *)bo; 68} 69 70struct radeon_bomgr { 71 /* Base class. */ 72 struct pb_manager base; 73 74 /* Winsys. */ 75 struct radeon_drm_winsys *rws; 76 77 /* List of buffer handles and its mutex. */ 78 struct util_hash_table *bo_handles; 79 pipe_mutex bo_handles_mutex; 80}; 81 82static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr) 83{ 84 return (struct radeon_bomgr *)mgr; 85} 86 87static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) 88{ 89 struct radeon_bo *bo = NULL; 90 91 if (_buf->vtbl == &radeon_bo_vtbl) { 92 bo = radeon_bo(_buf); 93 } else { 94 struct pb_buffer *base_buf; 95 pb_size offset; 96 pb_get_base_buffer(_buf, &base_buf, &offset); 97 98 if (base_buf->vtbl == &radeon_bo_vtbl) 99 bo = radeon_bo(base_buf); 100 } 101 102 return bo; 103} 104 105static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage) 106{ 107 struct radeon_bo *bo = get_radeon_bo(_buf); 108 109 while (p_atomic_read(&bo->num_active_ioctls)) { 110 sched_yield(); 111 } 112 113 if (bo->rws->info.drm_minor >= 12) { 114 struct drm_radeon_gem_wait args = {}; 115 args.handle = bo->handle; 116 args.flags = usage; 117 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 118 &args, sizeof(args)) == -EBUSY); 119 } else { 120 struct drm_radeon_gem_wait_idle args = {}; 121 args.handle = bo->handle; 122 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE, 123 &args, sizeof(args)) == -EBUSY); 124 } 125} 126 127static boolean radeon_bo_is_busy(struct pb_buffer *_buf, 128 enum radeon_bo_usage usage) 129{ 130 struct radeon_bo *bo = get_radeon_bo(_buf); 131 132 if (p_atomic_read(&bo->num_active_ioctls)) { 133 return TRUE; 134 } 135 136 if (bo->rws->info.drm_minor >= 12) { 137 struct drm_radeon_gem_wait args = {}; 138 args.handle = bo->handle; 139 args.flags = usage | RADEON_GEM_NO_WAIT; 140 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 141 &args, sizeof(args)) != 0; 142 } else { 143 struct drm_radeon_gem_busy args = {}; 144 args.handle = bo->handle; 145 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY, 146 &args, sizeof(args)) != 0; 147 } 148} 149 150static void radeon_bo_destroy(struct pb_buffer *_buf) 151{ 152 struct radeon_bo *bo = radeon_bo(_buf); 153 struct drm_gem_close args = {}; 154 155 if (bo->name) { 156 pipe_mutex_lock(bo->mgr->bo_handles_mutex); 157 util_hash_table_remove(bo->mgr->bo_handles, 158 (void*)(uintptr_t)bo->name); 159 pipe_mutex_unlock(bo->mgr->bo_handles_mutex); 160 } 161 162 if (bo->ptr) 163 os_munmap(bo->ptr, bo->base.size); 164 165 /* Close object. */ 166 args.handle = bo->handle; 167 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); 168 pipe_mutex_destroy(bo->map_mutex); 169 FREE(bo); 170} 171 172static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage) 173{ 174 unsigned res = 0; 175 176 if (usage & PIPE_TRANSFER_WRITE) 177 res |= PB_USAGE_CPU_WRITE; 178 179 if (usage & PIPE_TRANSFER_DONTBLOCK) 180 res |= PB_USAGE_DONTBLOCK; 181 182 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) 183 res |= PB_USAGE_UNSYNCHRONIZED; 184 185 return res; 186} 187 188static void *radeon_bo_map_internal(struct pb_buffer *_buf, 189 unsigned flags, void *flush_ctx) 190{ 191 struct radeon_bo *bo = radeon_bo(_buf); 192 struct radeon_drm_cs *cs = flush_ctx; 193 struct drm_radeon_gem_mmap args = {}; 194 void *ptr; 195 196 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ 197 if (!(flags & PB_USAGE_UNSYNCHRONIZED)) { 198 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */ 199 if (flags & PB_USAGE_DONTBLOCK) { 200 if (!(flags & PB_USAGE_CPU_WRITE)) { 201 /* Mapping for read. 202 * 203 * Since we are mapping for read, we don't need to wait 204 * if the GPU is using the buffer for read too 205 * (neither one is changing it). 206 * 207 * Only check whether the buffer is being used for write. */ 208 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 209 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 210 return NULL; 211 } 212 213 if (radeon_bo_is_busy((struct pb_buffer*)bo, 214 RADEON_USAGE_WRITE)) { 215 return NULL; 216 } 217 } else { 218 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 219 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 220 return NULL; 221 } 222 223 if (radeon_bo_is_busy((struct pb_buffer*)bo, 224 RADEON_USAGE_READWRITE)) { 225 return NULL; 226 } 227 } 228 } else { 229 if (!(flags & PB_USAGE_CPU_WRITE)) { 230 /* Mapping for read. 231 * 232 * Since we are mapping for read, we don't need to wait 233 * if the GPU is using the buffer for read too 234 * (neither one is changing it). 235 * 236 * Only check whether the buffer is being used for write. */ 237 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 238 cs->flush_cs(cs->flush_data, 0); 239 } 240 radeon_bo_wait((struct pb_buffer*)bo, 241 RADEON_USAGE_WRITE); 242 } else { 243 /* Mapping for write. */ 244 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 245 cs->flush_cs(cs->flush_data, 0); 246 } else { 247 /* Try to avoid busy-waiting in radeon_bo_wait. */ 248 if (p_atomic_read(&bo->num_active_ioctls)) 249 radeon_drm_cs_sync_flush(cs); 250 } 251 252 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE); 253 } 254 } 255 } 256 257 /* Return the pointer if it's already mapped. */ 258 if (bo->ptr) 259 return bo->ptr; 260 261 /* Map the buffer. */ 262 pipe_mutex_lock(bo->map_mutex); 263 /* Return the pointer if it's already mapped (in case of a race). */ 264 if (bo->ptr) { 265 pipe_mutex_unlock(bo->map_mutex); 266 return bo->ptr; 267 } 268 args.handle = bo->handle; 269 args.offset = 0; 270 args.size = (uint64_t)bo->base.size; 271 if (drmCommandWriteRead(bo->rws->fd, 272 DRM_RADEON_GEM_MMAP, 273 &args, 274 sizeof(args))) { 275 pipe_mutex_unlock(bo->map_mutex); 276 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n", 277 bo, bo->handle); 278 return NULL; 279 } 280 281 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, 282 bo->rws->fd, args.addr_ptr); 283 if (ptr == MAP_FAILED) { 284 pipe_mutex_unlock(bo->map_mutex); 285 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno); 286 return NULL; 287 } 288 bo->ptr = ptr; 289 pipe_mutex_unlock(bo->map_mutex); 290 291 return bo->ptr; 292} 293 294static void radeon_bo_unmap_internal(struct pb_buffer *_buf) 295{ 296 /* NOP */ 297} 298 299static void radeon_bo_get_base_buffer(struct pb_buffer *buf, 300 struct pb_buffer **base_buf, 301 unsigned *offset) 302{ 303 *base_buf = buf; 304 *offset = 0; 305} 306 307static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf, 308 struct pb_validate *vl, 309 unsigned flags) 310{ 311 /* Always pinned */ 312 return PIPE_OK; 313} 314 315static void radeon_bo_fence(struct pb_buffer *buf, 316 struct pipe_fence_handle *fence) 317{ 318} 319 320const struct pb_vtbl radeon_bo_vtbl = { 321 radeon_bo_destroy, 322 radeon_bo_map_internal, 323 radeon_bo_unmap_internal, 324 radeon_bo_validate, 325 radeon_bo_fence, 326 radeon_bo_get_base_buffer, 327}; 328 329static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr, 330 pb_size size, 331 const struct pb_desc *desc) 332{ 333 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 334 struct radeon_drm_winsys *rws = mgr->rws; 335 struct radeon_bo *bo; 336 struct drm_radeon_gem_create args = {}; 337 struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc; 338 339 assert(rdesc->initial_domains && rdesc->reloc_domains); 340 assert((rdesc->initial_domains & 341 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0); 342 assert((rdesc->reloc_domains & 343 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0); 344 345 args.size = size; 346 args.alignment = desc->alignment; 347 args.initial_domain = rdesc->initial_domains; 348 349 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE, 350 &args, sizeof(args))) { 351 fprintf(stderr, "radeon: Failed to allocate a buffer:\n"); 352 fprintf(stderr, "radeon: size : %d bytes\n", size); 353 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment); 354 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain); 355 return NULL; 356 } 357 358 bo = CALLOC_STRUCT(radeon_bo); 359 if (!bo) 360 return NULL; 361 362 pipe_reference_init(&bo->base.reference, 1); 363 bo->base.alignment = desc->alignment; 364 bo->base.usage = desc->usage; 365 bo->base.size = size; 366 bo->base.vtbl = &radeon_bo_vtbl; 367 bo->mgr = mgr; 368 bo->rws = mgr->rws; 369 bo->handle = args.handle; 370 bo->reloc_domains = rdesc->reloc_domains; 371 pipe_mutex_init(bo->map_mutex); 372 373 return &bo->base; 374} 375 376static void radeon_bomgr_flush(struct pb_manager *mgr) 377{ 378 /* NOP */ 379} 380 381/* This is for the cache bufmgr. */ 382static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr, 383 struct pb_buffer *_buf) 384{ 385 struct radeon_bo *bo = radeon_bo(_buf); 386 387 if (radeon_bo_is_referenced_by_any_cs(bo)) { 388 return TRUE; 389 } 390 391 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) { 392 return TRUE; 393 } 394 395 return FALSE; 396} 397 398static void radeon_bomgr_destroy(struct pb_manager *_mgr) 399{ 400 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 401 util_hash_table_destroy(mgr->bo_handles); 402 pipe_mutex_destroy(mgr->bo_handles_mutex); 403 FREE(mgr); 404} 405 406#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 407 408static unsigned handle_hash(void *key) 409{ 410 return PTR_TO_UINT(key); 411} 412 413static int handle_compare(void *key1, void *key2) 414{ 415 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2); 416} 417 418struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) 419{ 420 struct radeon_bomgr *mgr; 421 422 mgr = CALLOC_STRUCT(radeon_bomgr); 423 if (!mgr) 424 return NULL; 425 426 mgr->base.destroy = radeon_bomgr_destroy; 427 mgr->base.create_buffer = radeon_bomgr_create_bo; 428 mgr->base.flush = radeon_bomgr_flush; 429 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; 430 431 mgr->rws = rws; 432 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); 433 pipe_mutex_init(mgr->bo_handles_mutex); 434 return &mgr->base; 435} 436 437static void *radeon_bo_map(struct pb_buffer *buf, 438 struct radeon_winsys_cs *cs, 439 enum pipe_transfer_usage usage) 440{ 441 return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs); 442} 443 444static void radeon_bo_get_tiling(struct pb_buffer *_buf, 445 enum radeon_bo_layout *microtiled, 446 enum radeon_bo_layout *macrotiled) 447{ 448 struct radeon_bo *bo = get_radeon_bo(_buf); 449 struct drm_radeon_gem_set_tiling args = {}; 450 451 args.handle = bo->handle; 452 453 drmCommandWriteRead(bo->rws->fd, 454 DRM_RADEON_GEM_GET_TILING, 455 &args, 456 sizeof(args)); 457 458 *microtiled = RADEON_LAYOUT_LINEAR; 459 *macrotiled = RADEON_LAYOUT_LINEAR; 460 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) 461 *microtiled = RADEON_LAYOUT_TILED; 462 463 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) 464 *macrotiled = RADEON_LAYOUT_TILED; 465} 466 467static void radeon_bo_set_tiling(struct pb_buffer *_buf, 468 struct radeon_winsys_cs *rcs, 469 enum radeon_bo_layout microtiled, 470 enum radeon_bo_layout macrotiled, 471 uint32_t pitch) 472{ 473 struct radeon_bo *bo = get_radeon_bo(_buf); 474 struct radeon_drm_cs *cs = radeon_drm_cs(rcs); 475 struct drm_radeon_gem_set_tiling args = {}; 476 477 /* Tiling determines how DRM treats the buffer data. 478 * We must flush CS when changing it if the buffer is referenced. */ 479 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) { 480 cs->flush_cs(cs->flush_data, 0); 481 } 482 483 while (p_atomic_read(&bo->num_active_ioctls)) { 484 sched_yield(); 485 } 486 487 if (microtiled == RADEON_LAYOUT_TILED) 488 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; 489 else if (microtiled == RADEON_LAYOUT_SQUARETILED) 490 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; 491 492 if (macrotiled == RADEON_LAYOUT_TILED) 493 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; 494 495 args.handle = bo->handle; 496 args.pitch = pitch; 497 498 drmCommandWriteRead(bo->rws->fd, 499 DRM_RADEON_GEM_SET_TILING, 500 &args, 501 sizeof(args)); 502} 503 504static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle( 505 struct pb_buffer *_buf) 506{ 507 /* return radeon_bo. */ 508 return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf); 509} 510 511static struct pb_buffer * 512radeon_winsys_bo_create(struct radeon_winsys *rws, 513 unsigned size, 514 unsigned alignment, 515 unsigned bind, unsigned usage) 516{ 517 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 518 struct radeon_bo_desc desc; 519 struct pb_manager *provider; 520 struct pb_buffer *buffer; 521 522 memset(&desc, 0, sizeof(desc)); 523 desc.base.alignment = alignment; 524 525 /* Determine the memory domains. */ 526 switch (usage) { 527 case PIPE_USAGE_STAGING: 528 case PIPE_USAGE_STREAM: 529 case PIPE_USAGE_DYNAMIC: 530 desc.initial_domains = RADEON_GEM_DOMAIN_GTT; 531 desc.reloc_domains = RADEON_GEM_DOMAIN_GTT; 532 break; 533 case PIPE_USAGE_IMMUTABLE: 534 case PIPE_USAGE_STATIC: 535 desc.initial_domains = RADEON_GEM_DOMAIN_VRAM; 536 desc.reloc_domains = RADEON_GEM_DOMAIN_VRAM; 537 break; 538 default: 539 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | 540 PIPE_BIND_CONSTANT_BUFFER)) { 541 desc.initial_domains = RADEON_GEM_DOMAIN_GTT; 542 } else { 543 desc.initial_domains = RADEON_GEM_DOMAIN_VRAM; 544 } 545 desc.reloc_domains = RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM; 546 } 547 548 /* Additional criteria for the cache manager. */ 549 desc.base.usage = desc.initial_domains; 550 551 /* Assign a buffer manager. */ 552 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | 553 PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM)) 554 provider = ws->cman; 555 else 556 provider = ws->kman; 557 558 buffer = provider->create_buffer(provider, size, &desc.base); 559 if (!buffer) 560 return NULL; 561 562 return (struct pb_buffer*)buffer; 563} 564 565static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws, 566 struct winsys_handle *whandle, 567 unsigned *stride) 568{ 569 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 570 struct radeon_bo *bo; 571 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); 572 struct drm_gem_open open_arg = {}; 573 574 /* We must maintain a list of pairs <handle, bo>, so that we always return 575 * the same BO for one particular handle. If we didn't do that and created 576 * more than one BO for the same handle and then relocated them in a CS, 577 * we would hit a deadlock in the kernel. 578 * 579 * The list of pairs is guarded by a mutex, of course. */ 580 pipe_mutex_lock(mgr->bo_handles_mutex); 581 582 /* First check if there already is an existing bo for the handle. */ 583 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); 584 if (bo) { 585 /* Increase the refcount. */ 586 struct pb_buffer *b = NULL; 587 pb_reference(&b, &bo->base); 588 goto done; 589 } 590 591 /* There isn't, create a new one. */ 592 bo = CALLOC_STRUCT(radeon_bo); 593 if (!bo) { 594 goto fail; 595 } 596 597 /* Open the BO. */ 598 open_arg.name = whandle->handle; 599 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { 600 FREE(bo); 601 goto fail; 602 } 603 bo->handle = open_arg.handle; 604 bo->name = whandle->handle; 605 bo->reloc_domains = RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM; 606 607 /* Initialize it. */ 608 pipe_reference_init(&bo->base.reference, 1); 609 bo->base.alignment = 0; 610 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; 611 bo->base.size = open_arg.size; 612 bo->base.vtbl = &radeon_bo_vtbl; 613 bo->mgr = mgr; 614 bo->rws = mgr->rws; 615 pipe_mutex_init(bo->map_mutex); 616 617 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); 618 619done: 620 pipe_mutex_unlock(mgr->bo_handles_mutex); 621 622 if (stride) 623 *stride = whandle->stride; 624 625 return (struct pb_buffer*)bo; 626 627fail: 628 pipe_mutex_unlock(mgr->bo_handles_mutex); 629 return NULL; 630} 631 632static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer, 633 unsigned stride, 634 struct winsys_handle *whandle) 635{ 636 struct drm_gem_flink flink = {}; 637 struct radeon_bo *bo = get_radeon_bo(buffer); 638 639 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { 640 if (!bo->flinked) { 641 flink.handle = bo->handle; 642 643 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) { 644 return FALSE; 645 } 646 647 bo->flinked = TRUE; 648 bo->flink = flink.name; 649 } 650 whandle->handle = bo->flink; 651 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { 652 whandle->handle = bo->handle; 653 } 654 655 whandle->stride = stride; 656 return TRUE; 657} 658 659void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) 660{ 661 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle; 662 ws->base.buffer_set_tiling = radeon_bo_set_tiling; 663 ws->base.buffer_get_tiling = radeon_bo_get_tiling; 664 ws->base.buffer_map = radeon_bo_map; 665 ws->base.buffer_unmap = pb_unmap; 666 ws->base.buffer_wait = radeon_bo_wait; 667 ws->base.buffer_is_busy = radeon_bo_is_busy; 668 ws->base.buffer_create = radeon_winsys_bo_create; 669 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle; 670 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle; 671} 672