radeon_drm_bo.c revision 1f455ef5bc3c9711d9452dcc09fd849656ad8b33
1/* 2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com> 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26 27#define _FILE_OFFSET_BITS 64 28#include "radeon_drm_cs.h" 29 30#include "util/u_hash_table.h" 31#include "util/u_memory.h" 32#include "util/u_simple_list.h" 33#include "util/u_double_list.h" 34#include "os/os_thread.h" 35#include "os/os_mman.h" 36 37#include "state_tracker/drm_driver.h" 38 39#include <sys/ioctl.h> 40#include <xf86drm.h> 41#include <errno.h> 42 43/* 44 * this are copy from radeon_drm, once an updated libdrm is released 45 * we should bump configure.ac requirement for it and remove the following 46 * field 47 */ 48#define RADEON_BO_FLAGS_MACRO_TILE 1 49#define RADEON_BO_FLAGS_MICRO_TILE 2 50#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20 51 52#ifndef DRM_RADEON_GEM_WAIT 53#define DRM_RADEON_GEM_WAIT 0x2b 54 55#define RADEON_GEM_NO_WAIT 0x1 56#define RADEON_GEM_USAGE_READ 0x2 57#define RADEON_GEM_USAGE_WRITE 0x4 58 59struct drm_radeon_gem_wait { 60 uint32_t handle; 61 uint32_t flags; /* one of RADEON_GEM_* */ 62}; 63 64#endif 65 66#ifndef RADEON_VA_MAP 67 68#define RADEON_VA_MAP 1 69#define RADEON_VA_UNMAP 2 70 71#define RADEON_VA_RESULT_OK 0 72#define RADEON_VA_RESULT_ERROR 1 73#define RADEON_VA_RESULT_VA_EXIST 2 74 75#define RADEON_VM_PAGE_VALID (1 << 0) 76#define RADEON_VM_PAGE_READABLE (1 << 1) 77#define RADEON_VM_PAGE_WRITEABLE (1 << 2) 78#define RADEON_VM_PAGE_SYSTEM (1 << 3) 79#define RADEON_VM_PAGE_SNOOPED (1 << 4) 80 81struct drm_radeon_gem_va { 82 uint32_t handle; 83 uint32_t operation; 84 uint32_t vm_id; 85 uint32_t flags; 86 uint64_t offset; 87}; 88 89#define DRM_RADEON_GEM_VA 0x2b 90#endif 91 92 93 94extern const struct pb_vtbl radeon_bo_vtbl; 95 96 97static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo) 98{ 99 assert(bo->vtbl == &radeon_bo_vtbl); 100 return (struct radeon_bo *)bo; 101} 102 103struct radeon_bo_va_hole { 104 struct list_head list; 105 uint64_t offset; 106 uint64_t size; 107}; 108 109struct radeon_bomgr { 110 /* Base class. */ 111 struct pb_manager base; 112 113 /* Winsys. */ 114 struct radeon_drm_winsys *rws; 115 116 /* List of buffer handles and its mutex. */ 117 struct util_hash_table *bo_handles; 118 pipe_mutex bo_handles_mutex; 119 pipe_mutex bo_va_mutex; 120 121 /* is virtual address supported */ 122 bool va; 123 uint64_t va_offset; 124 struct list_head va_holes; 125}; 126 127static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr) 128{ 129 return (struct radeon_bomgr *)mgr; 130} 131 132static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf) 133{ 134 struct radeon_bo *bo = NULL; 135 136 if (_buf->vtbl == &radeon_bo_vtbl) { 137 bo = radeon_bo(_buf); 138 } else { 139 struct pb_buffer *base_buf; 140 pb_size offset; 141 pb_get_base_buffer(_buf, &base_buf, &offset); 142 143 if (base_buf->vtbl == &radeon_bo_vtbl) 144 bo = radeon_bo(base_buf); 145 } 146 147 return bo; 148} 149 150static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage) 151{ 152 struct radeon_bo *bo = get_radeon_bo(_buf); 153 154 while (p_atomic_read(&bo->num_active_ioctls)) { 155 sched_yield(); 156 } 157 158 /* XXX use this when it's ready */ 159 /*if (bo->rws->info.drm_minor >= 12) { 160 struct drm_radeon_gem_wait args = {}; 161 args.handle = bo->handle; 162 args.flags = usage; 163 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 164 &args, sizeof(args)) == -EBUSY); 165 } else*/ { 166 struct drm_radeon_gem_wait_idle args; 167 memset(&args, 0, sizeof(args)); 168 args.handle = bo->handle; 169 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE, 170 &args, sizeof(args)) == -EBUSY); 171 } 172} 173 174static boolean radeon_bo_is_busy(struct pb_buffer *_buf, 175 enum radeon_bo_usage usage) 176{ 177 struct radeon_bo *bo = get_radeon_bo(_buf); 178 179 if (p_atomic_read(&bo->num_active_ioctls)) { 180 return TRUE; 181 } 182 183 /* XXX use this when it's ready */ 184 /*if (bo->rws->info.drm_minor >= 12) { 185 struct drm_radeon_gem_wait args = {}; 186 args.handle = bo->handle; 187 args.flags = usage | RADEON_GEM_NO_WAIT; 188 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT, 189 &args, sizeof(args)) != 0; 190 } else*/ { 191 struct drm_radeon_gem_busy args; 192 memset(&args, 0, sizeof(args)); 193 args.handle = bo->handle; 194 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY, 195 &args, sizeof(args)) != 0; 196 } 197} 198 199static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment) 200{ 201 struct radeon_bo_va_hole *hole, *n; 202 uint64_t offset = 0, waste = 0; 203 204 pipe_mutex_lock(mgr->bo_va_mutex); 205 /* first look for a hole */ 206 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { 207 offset = hole->offset; 208 waste = 0; 209 if (alignment) { 210 waste = offset % alignment; 211 waste = waste ? alignment - waste : 0; 212 } 213 offset += waste; 214 if (offset >= (hole->offset + hole->size)) { 215 continue; 216 } 217 if (!waste && hole->size == size) { 218 offset = hole->offset; 219 list_del(&hole->list); 220 FREE(hole); 221 pipe_mutex_unlock(mgr->bo_va_mutex); 222 return offset; 223 } 224 if ((hole->size - waste) > size) { 225 if (waste) { 226 n = CALLOC_STRUCT(radeon_bo_va_hole); 227 n->size = waste; 228 n->offset = hole->offset; 229 list_add(&n->list, &hole->list); 230 } 231 hole->size -= (size + waste); 232 hole->offset += size + waste; 233 pipe_mutex_unlock(mgr->bo_va_mutex); 234 return offset; 235 } 236 if ((hole->size - waste) == size) { 237 hole->size = waste; 238 pipe_mutex_unlock(mgr->bo_va_mutex); 239 return offset; 240 } 241 } 242 243 offset = mgr->va_offset; 244 waste = 0; 245 if (alignment) { 246 waste = offset % alignment; 247 waste = waste ? alignment - waste : 0; 248 } 249 offset += waste; 250 mgr->va_offset += size + waste; 251 pipe_mutex_unlock(mgr->bo_va_mutex); 252 return offset; 253} 254 255static void radeon_bomgr_force_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size) 256{ 257 pipe_mutex_lock(mgr->bo_va_mutex); 258 if (va >= mgr->va_offset) { 259 if (va > mgr->va_offset) { 260 struct radeon_bo_va_hole *hole; 261 hole = CALLOC_STRUCT(radeon_bo_va_hole); 262 if (hole) { 263 hole->size = va - mgr->va_offset; 264 hole->offset = mgr->va_offset; 265 list_add(&hole->list, &mgr->va_holes); 266 } 267 } 268 mgr->va_offset = va + size; 269 } else { 270 struct radeon_bo_va_hole *hole, *n; 271 uint64_t hole_end, va_end; 272 273 /* Prune/free all holes that fall into the range 274 */ 275 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) { 276 hole_end = hole->offset + hole->size; 277 va_end = va + size; 278 if (hole->offset >= va_end || hole_end <= va) 279 continue; 280 if (hole->offset >= va && hole_end <= va_end) { 281 list_del(&hole->list); 282 FREE(hole); 283 continue; 284 } 285 if (hole->offset >= va) 286 hole->offset = va_end; 287 else 288 hole_end = va; 289 hole->size = hole_end - hole->offset; 290 } 291 } 292 pipe_mutex_unlock(mgr->bo_va_mutex); 293} 294 295static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size) 296{ 297 struct radeon_bo_va_hole *hole; 298 299 pipe_mutex_lock(mgr->bo_va_mutex); 300 if ((va + size) == mgr->va_offset) { 301 mgr->va_offset = va; 302 /* Delete uppermost hole if it reaches the new top */ 303 if (!LIST_IS_EMPTY(&mgr->va_holes)) { 304 hole = container_of(mgr->va_holes.next, hole, list); 305 if ((hole->offset + hole->size) == va) { 306 mgr->va_offset = hole->offset; 307 list_del(&hole->list); 308 FREE(hole); 309 } 310 } 311 } else { 312 struct radeon_bo_va_hole *next; 313 314 hole = container_of(&mgr->va_holes, hole, list); 315 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { 316 if (next->offset < va) 317 break; 318 hole = next; 319 } 320 321 if (&hole->list != &mgr->va_holes) { 322 /* Grow upper hole if it's adjacent */ 323 if (hole->offset == (va + size)) { 324 hole->offset = va; 325 hole->size += size; 326 /* Merge lower hole if it's adjacent */ 327 if (next != hole && &next->list != &mgr->va_holes && 328 (next->offset + next->size) == va) { 329 next->size += hole->size; 330 list_del(&hole->list); 331 FREE(hole); 332 } 333 goto out; 334 } 335 } 336 337 /* Grow lower hole if it's adjacent */ 338 if (next != hole && &next->list != &mgr->va_holes && 339 (next->offset + next->size) == va) { 340 next->size += size; 341 goto out; 342 } 343 344 /* FIXME on allocation failure we just lose virtual address space 345 * maybe print a warning 346 */ 347 next = CALLOC_STRUCT(radeon_bo_va_hole); 348 if (next) { 349 next->size = size; 350 next->offset = va; 351 list_add(&next->list, &hole->list); 352 } 353 } 354out: 355 pipe_mutex_unlock(mgr->bo_va_mutex); 356} 357 358static void radeon_bo_destroy(struct pb_buffer *_buf) 359{ 360 struct radeon_bo *bo = radeon_bo(_buf); 361 struct radeon_bomgr *mgr = bo->mgr; 362 struct drm_gem_close args; 363 364 memset(&args, 0, sizeof(args)); 365 366 if (bo->name) { 367 pipe_mutex_lock(bo->mgr->bo_handles_mutex); 368 util_hash_table_remove(bo->mgr->bo_handles, 369 (void*)(uintptr_t)bo->name); 370 pipe_mutex_unlock(bo->mgr->bo_handles_mutex); 371 } 372 373 if (bo->ptr) 374 os_munmap(bo->ptr, bo->base.size); 375 376 /* Close object. */ 377 args.handle = bo->handle; 378 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args); 379 380 if (mgr->va) { 381 radeon_bomgr_free_va(mgr, bo->va, bo->va_size); 382 } 383 384 pipe_mutex_destroy(bo->map_mutex); 385 FREE(bo); 386} 387 388static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf, 389 struct radeon_winsys_cs *rcs, 390 enum pipe_transfer_usage usage) 391{ 392 struct radeon_bo *bo = (struct radeon_bo*)buf; 393 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs; 394 struct drm_radeon_gem_mmap args = {0}; 395 void *ptr; 396 397 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */ 398 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { 399 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */ 400 if (usage & PIPE_TRANSFER_DONTBLOCK) { 401 if (!(usage & PIPE_TRANSFER_WRITE)) { 402 /* Mapping for read. 403 * 404 * Since we are mapping for read, we don't need to wait 405 * if the GPU is using the buffer for read too 406 * (neither one is changing it). 407 * 408 * Only check whether the buffer is being used for write. */ 409 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 410 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 411 return NULL; 412 } 413 414 if (radeon_bo_is_busy((struct pb_buffer*)bo, 415 RADEON_USAGE_WRITE)) { 416 return NULL; 417 } 418 } else { 419 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 420 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC); 421 return NULL; 422 } 423 424 if (radeon_bo_is_busy((struct pb_buffer*)bo, 425 RADEON_USAGE_READWRITE)) { 426 return NULL; 427 } 428 } 429 } else { 430 if (!(usage & PIPE_TRANSFER_WRITE)) { 431 /* Mapping for read. 432 * 433 * Since we are mapping for read, we don't need to wait 434 * if the GPU is using the buffer for read too 435 * (neither one is changing it). 436 * 437 * Only check whether the buffer is being used for write. */ 438 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) { 439 cs->flush_cs(cs->flush_data, 0); 440 } 441 radeon_bo_wait((struct pb_buffer*)bo, 442 RADEON_USAGE_WRITE); 443 } else { 444 /* Mapping for write. */ 445 if (radeon_bo_is_referenced_by_cs(cs, bo)) { 446 cs->flush_cs(cs->flush_data, 0); 447 } else { 448 /* Try to avoid busy-waiting in radeon_bo_wait. */ 449 if (p_atomic_read(&bo->num_active_ioctls)) 450 radeon_drm_cs_sync_flush(cs); 451 } 452 453 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE); 454 } 455 } 456 } 457 458 /* Return the pointer if it's already mapped. */ 459 if (bo->ptr) 460 return bo->ptr; 461 462 /* Map the buffer. */ 463 pipe_mutex_lock(bo->map_mutex); 464 /* Return the pointer if it's already mapped (in case of a race). */ 465 if (bo->ptr) { 466 pipe_mutex_unlock(bo->map_mutex); 467 return bo->ptr; 468 } 469 args.handle = bo->handle; 470 args.offset = 0; 471 args.size = (uint64_t)bo->base.size; 472 if (drmCommandWriteRead(bo->rws->fd, 473 DRM_RADEON_GEM_MMAP, 474 &args, 475 sizeof(args))) { 476 pipe_mutex_unlock(bo->map_mutex); 477 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n", 478 bo, bo->handle); 479 return NULL; 480 } 481 482 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, 483 bo->rws->fd, args.addr_ptr); 484 if (ptr == MAP_FAILED) { 485 pipe_mutex_unlock(bo->map_mutex); 486 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno); 487 return NULL; 488 } 489 bo->ptr = ptr; 490 pipe_mutex_unlock(bo->map_mutex); 491 492 return bo->ptr; 493} 494 495static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf) 496{ 497 /* NOP */ 498} 499 500static void radeon_bo_get_base_buffer(struct pb_buffer *buf, 501 struct pb_buffer **base_buf, 502 unsigned *offset) 503{ 504 *base_buf = buf; 505 *offset = 0; 506} 507 508static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf, 509 struct pb_validate *vl, 510 unsigned flags) 511{ 512 /* Always pinned */ 513 return PIPE_OK; 514} 515 516static void radeon_bo_fence(struct pb_buffer *buf, 517 struct pipe_fence_handle *fence) 518{ 519} 520 521const struct pb_vtbl radeon_bo_vtbl = { 522 radeon_bo_destroy, 523 NULL, /* never called */ 524 NULL, /* never called */ 525 radeon_bo_validate, 526 radeon_bo_fence, 527 radeon_bo_get_base_buffer, 528}; 529 530static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr, 531 pb_size size, 532 const struct pb_desc *desc) 533{ 534 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 535 struct radeon_drm_winsys *rws = mgr->rws; 536 struct radeon_bo *bo; 537 struct drm_radeon_gem_create args; 538 struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc; 539 int r; 540 541 memset(&args, 0, sizeof(args)); 542 543 assert(rdesc->initial_domains); 544 assert((rdesc->initial_domains & 545 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0); 546 547 args.size = size; 548 args.alignment = desc->alignment; 549 args.initial_domain = rdesc->initial_domains; 550 551 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE, 552 &args, sizeof(args))) { 553 fprintf(stderr, "radeon: Failed to allocate a buffer:\n"); 554 fprintf(stderr, "radeon: size : %d bytes\n", size); 555 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment); 556 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain); 557 return NULL; 558 } 559 560 bo = CALLOC_STRUCT(radeon_bo); 561 if (!bo) 562 return NULL; 563 564 pipe_reference_init(&bo->base.reference, 1); 565 bo->base.alignment = desc->alignment; 566 bo->base.usage = desc->usage; 567 bo->base.size = size; 568 bo->base.vtbl = &radeon_bo_vtbl; 569 bo->mgr = mgr; 570 bo->rws = mgr->rws; 571 bo->handle = args.handle; 572 bo->va = 0; 573 pipe_mutex_init(bo->map_mutex); 574 575 if (mgr->va) { 576 struct drm_radeon_gem_va va; 577 578 bo->va_size = align(size, 4096); 579 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment); 580 581 va.handle = bo->handle; 582 va.vm_id = 0; 583 va.operation = RADEON_VA_MAP; 584 va.flags = RADEON_VM_PAGE_READABLE | 585 RADEON_VM_PAGE_WRITEABLE | 586 RADEON_VM_PAGE_SNOOPED; 587 va.offset = bo->va; 588 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va)); 589 if (r && va.operation == RADEON_VA_RESULT_ERROR) { 590 fprintf(stderr, "radeon: Failed to allocate a buffer:\n"); 591 fprintf(stderr, "radeon: size : %d bytes\n", size); 592 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment); 593 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain); 594 radeon_bo_destroy(&bo->base); 595 return NULL; 596 } 597 if (va.operation == RADEON_VA_RESULT_VA_EXIST) { 598 radeon_bomgr_free_va(mgr, bo->va, bo->va_size); 599 bo->va = va.offset; 600 radeon_bomgr_force_va(mgr, bo->va, bo->va_size); 601 } 602 } 603 604 return &bo->base; 605} 606 607static void radeon_bomgr_flush(struct pb_manager *mgr) 608{ 609 /* NOP */ 610} 611 612/* This is for the cache bufmgr. */ 613static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr, 614 struct pb_buffer *_buf) 615{ 616 struct radeon_bo *bo = radeon_bo(_buf); 617 618 if (radeon_bo_is_referenced_by_any_cs(bo)) { 619 return TRUE; 620 } 621 622 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) { 623 return TRUE; 624 } 625 626 return FALSE; 627} 628 629static void radeon_bomgr_destroy(struct pb_manager *_mgr) 630{ 631 struct radeon_bomgr *mgr = radeon_bomgr(_mgr); 632 util_hash_table_destroy(mgr->bo_handles); 633 pipe_mutex_destroy(mgr->bo_handles_mutex); 634 pipe_mutex_destroy(mgr->bo_va_mutex); 635 FREE(mgr); 636} 637 638#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x))) 639 640static unsigned handle_hash(void *key) 641{ 642 return PTR_TO_UINT(key); 643} 644 645static int handle_compare(void *key1, void *key2) 646{ 647 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2); 648} 649 650struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws) 651{ 652 struct radeon_bomgr *mgr; 653 654 mgr = CALLOC_STRUCT(radeon_bomgr); 655 if (!mgr) 656 return NULL; 657 658 mgr->base.destroy = radeon_bomgr_destroy; 659 mgr->base.create_buffer = radeon_bomgr_create_bo; 660 mgr->base.flush = radeon_bomgr_flush; 661 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy; 662 663 mgr->rws = rws; 664 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare); 665 pipe_mutex_init(mgr->bo_handles_mutex); 666 pipe_mutex_init(mgr->bo_va_mutex); 667 668 mgr->va = rws->info.r600_virtual_address; 669 mgr->va_offset = rws->info.r600_va_start; 670 list_inithead(&mgr->va_holes); 671 672 return &mgr->base; 673} 674 675static unsigned eg_tile_split(unsigned tile_split) 676{ 677 switch (tile_split) { 678 case 0: tile_split = 64; break; 679 case 1: tile_split = 128; break; 680 case 2: tile_split = 256; break; 681 case 3: tile_split = 512; break; 682 default: 683 case 4: tile_split = 1024; break; 684 case 5: tile_split = 2048; break; 685 case 6: tile_split = 4096; break; 686 } 687 return tile_split; 688} 689 690static unsigned eg_tile_split_rev(unsigned eg_tile_split) 691{ 692 switch (eg_tile_split) { 693 case 64: return 0; 694 case 128: return 1; 695 case 256: return 2; 696 case 512: return 3; 697 default: 698 case 1024: return 4; 699 case 2048: return 5; 700 case 4096: return 6; 701 } 702} 703 704static void radeon_bo_get_tiling(struct pb_buffer *_buf, 705 enum radeon_bo_layout *microtiled, 706 enum radeon_bo_layout *macrotiled, 707 unsigned *bankw, unsigned *bankh, 708 unsigned *tile_split, 709 unsigned *stencil_tile_split, 710 unsigned *mtilea) 711{ 712 struct radeon_bo *bo = get_radeon_bo(_buf); 713 struct drm_radeon_gem_set_tiling args; 714 715 memset(&args, 0, sizeof(args)); 716 717 args.handle = bo->handle; 718 719 drmCommandWriteRead(bo->rws->fd, 720 DRM_RADEON_GEM_GET_TILING, 721 &args, 722 sizeof(args)); 723 724 *microtiled = RADEON_LAYOUT_LINEAR; 725 *macrotiled = RADEON_LAYOUT_LINEAR; 726 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) 727 *microtiled = RADEON_LAYOUT_TILED; 728 729 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) 730 *macrotiled = RADEON_LAYOUT_TILED; 731 if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) { 732 *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; 733 *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; 734 *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; 735 *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; 736 *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; 737 *tile_split = eg_tile_split(*tile_split); 738 } 739} 740 741static void radeon_bo_set_tiling(struct pb_buffer *_buf, 742 struct radeon_winsys_cs *rcs, 743 enum radeon_bo_layout microtiled, 744 enum radeon_bo_layout macrotiled, 745 unsigned bankw, unsigned bankh, 746 unsigned tile_split, 747 unsigned stencil_tile_split, 748 unsigned mtilea, 749 uint32_t pitch) 750{ 751 struct radeon_bo *bo = get_radeon_bo(_buf); 752 struct radeon_drm_cs *cs = radeon_drm_cs(rcs); 753 struct drm_radeon_gem_set_tiling args; 754 755 memset(&args, 0, sizeof(args)); 756 757 /* Tiling determines how DRM treats the buffer data. 758 * We must flush CS when changing it if the buffer is referenced. */ 759 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) { 760 cs->flush_cs(cs->flush_data, 0); 761 } 762 763 while (p_atomic_read(&bo->num_active_ioctls)) { 764 sched_yield(); 765 } 766 767 if (microtiled == RADEON_LAYOUT_TILED) 768 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; 769 else if (microtiled == RADEON_LAYOUT_SQUARETILED) 770 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; 771 772 if (macrotiled == RADEON_LAYOUT_TILED) 773 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; 774 775 args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) << 776 RADEON_TILING_EG_BANKW_SHIFT; 777 args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) << 778 RADEON_TILING_EG_BANKH_SHIFT; 779 if (tile_split) { 780 args.tiling_flags |= (eg_tile_split_rev(tile_split) & 781 RADEON_TILING_EG_TILE_SPLIT_MASK) << 782 RADEON_TILING_EG_TILE_SPLIT_SHIFT; 783 } 784 args.tiling_flags |= (stencil_tile_split & 785 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) << 786 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT; 787 args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) << 788 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT; 789 790 args.handle = bo->handle; 791 args.pitch = pitch; 792 793 drmCommandWriteRead(bo->rws->fd, 794 DRM_RADEON_GEM_SET_TILING, 795 &args, 796 sizeof(args)); 797} 798 799static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle( 800 struct pb_buffer *_buf) 801{ 802 /* return radeon_bo. */ 803 return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf); 804} 805 806static struct pb_buffer * 807radeon_winsys_bo_create(struct radeon_winsys *rws, 808 unsigned size, 809 unsigned alignment, 810 unsigned bind, 811 enum radeon_bo_domain domain) 812{ 813 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 814 struct radeon_bo_desc desc; 815 struct pb_manager *provider; 816 struct pb_buffer *buffer; 817 818 memset(&desc, 0, sizeof(desc)); 819 desc.base.alignment = alignment; 820 821 /* Additional criteria for the cache manager. */ 822 desc.base.usage = domain; 823 desc.initial_domains = domain; 824 825 /* Assign a buffer manager. */ 826 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER | 827 PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM)) 828 provider = ws->cman; 829 else 830 provider = ws->kman; 831 832 buffer = provider->create_buffer(provider, size, &desc.base); 833 if (!buffer) 834 return NULL; 835 836 return (struct pb_buffer*)buffer; 837} 838 839static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws, 840 struct winsys_handle *whandle, 841 unsigned *stride) 842{ 843 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); 844 struct radeon_bo *bo; 845 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); 846 struct drm_gem_open open_arg = {}; 847 int r; 848 849 memset(&open_arg, 0, sizeof(open_arg)); 850 851 /* We must maintain a list of pairs <handle, bo>, so that we always return 852 * the same BO for one particular handle. If we didn't do that and created 853 * more than one BO for the same handle and then relocated them in a CS, 854 * we would hit a deadlock in the kernel. 855 * 856 * The list of pairs is guarded by a mutex, of course. */ 857 pipe_mutex_lock(mgr->bo_handles_mutex); 858 859 /* First check if there already is an existing bo for the handle. */ 860 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); 861 if (bo) { 862 /* Increase the refcount. */ 863 struct pb_buffer *b = NULL; 864 pb_reference(&b, &bo->base); 865 goto done; 866 } 867 868 /* There isn't, create a new one. */ 869 bo = CALLOC_STRUCT(radeon_bo); 870 if (!bo) { 871 goto fail; 872 } 873 874 /* Open the BO. */ 875 open_arg.name = whandle->handle; 876 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { 877 FREE(bo); 878 goto fail; 879 } 880 bo->handle = open_arg.handle; 881 bo->name = whandle->handle; 882 883 /* Initialize it. */ 884 pipe_reference_init(&bo->base.reference, 1); 885 bo->base.alignment = 0; 886 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; 887 bo->base.size = open_arg.size; 888 bo->base.vtbl = &radeon_bo_vtbl; 889 bo->mgr = mgr; 890 bo->rws = mgr->rws; 891 bo->va = 0; 892 pipe_mutex_init(bo->map_mutex); 893 894 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); 895 896done: 897 pipe_mutex_unlock(mgr->bo_handles_mutex); 898 899 if (stride) 900 *stride = whandle->stride; 901 902 if (mgr->va) { 903 struct drm_radeon_gem_va va; 904 905 bo->va_size = ((bo->base.size + 4095) & ~4095); 906 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20); 907 908 va.handle = bo->handle; 909 va.operation = RADEON_VA_MAP; 910 va.vm_id = 0; 911 va.offset = bo->va; 912 va.flags = RADEON_VM_PAGE_READABLE | 913 RADEON_VM_PAGE_WRITEABLE | 914 RADEON_VM_PAGE_SNOOPED; 915 va.offset = bo->va; 916 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va)); 917 if (r && va.operation == RADEON_VA_RESULT_ERROR) { 918 fprintf(stderr, "radeon: Failed to assign virtual address space\n"); 919 radeon_bo_destroy(&bo->base); 920 return NULL; 921 } 922 if (va.operation == RADEON_VA_RESULT_VA_EXIST) { 923 radeon_bomgr_free_va(mgr, bo->va, bo->va_size); 924 bo->va = va.offset; 925 radeon_bomgr_force_va(mgr, bo->va, bo->va_size); 926 } 927 } 928 929 return (struct pb_buffer*)bo; 930 931fail: 932 pipe_mutex_unlock(mgr->bo_handles_mutex); 933 return NULL; 934} 935 936static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer, 937 unsigned stride, 938 struct winsys_handle *whandle) 939{ 940 struct drm_gem_flink flink; 941 struct radeon_bo *bo = get_radeon_bo(buffer); 942 943 memset(&flink, 0, sizeof(flink)); 944 945 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) { 946 if (!bo->flinked) { 947 flink.handle = bo->handle; 948 949 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) { 950 return FALSE; 951 } 952 953 bo->flinked = TRUE; 954 bo->flink = flink.name; 955 } 956 whandle->handle = bo->flink; 957 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) { 958 whandle->handle = bo->handle; 959 } 960 961 whandle->stride = stride; 962 return TRUE; 963} 964 965static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf) 966{ 967 return ((struct radeon_bo*)buf)->va; 968} 969 970void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws) 971{ 972 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle; 973 ws->base.buffer_set_tiling = radeon_bo_set_tiling; 974 ws->base.buffer_get_tiling = radeon_bo_get_tiling; 975 ws->base.buffer_map = radeon_bo_map; 976 ws->base.buffer_unmap = radeon_bo_unmap; 977 ws->base.buffer_wait = radeon_bo_wait; 978 ws->base.buffer_is_busy = radeon_bo_is_busy; 979 ws->base.buffer_create = radeon_winsys_bo_create; 980 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle; 981 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle; 982 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va; 983} 984