Lines Matching defs:bo

97 static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
99 assert(bo->vtbl == &radeon_bo_vtbl);
100 return (struct radeon_bo *)bo;
134 struct radeon_bo *bo = NULL;
137 bo = radeon_bo(_buf);
144 bo = radeon_bo(base_buf);
147 return bo;
152 struct radeon_bo *bo = get_radeon_bo(_buf);
154 while (p_atomic_read(&bo->num_active_ioctls)) {
159 /*if (bo->rws->info.drm_minor >= 12) {
161 args.handle = bo->handle;
163 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
168 args.handle = bo->handle;
169 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
177 struct radeon_bo *bo = get_radeon_bo(_buf);
179 if (p_atomic_read(&bo->num_active_ioctls)) {
184 /*if (bo->rws->info.drm_minor >= 12) {
186 args.handle = bo->handle;
188 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
193 args.handle = bo->handle;
194 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
366 struct radeon_bo *bo = radeon_bo(_buf);
367 struct radeon_bomgr *mgr = bo->mgr;
372 if (bo->name) {
373 pipe_mutex_lock(bo->mgr->bo_handles_mutex);
374 util_hash_table_remove(bo->mgr->bo_handles,
375 (void*)(uintptr_t)bo->name);
376 pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
379 if (bo->ptr)
380 os_munmap(bo->ptr, bo->base.size);
383 args.handle = bo->handle;
384 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
387 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
390 pipe_mutex_destroy(bo->map_mutex);
391 FREE(bo);
398 struct radeon_bo *bo = (struct radeon_bo*)buf;
415 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
420 if (radeon_bo_is_busy((struct pb_buffer*)bo,
425 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
430 if (radeon_bo_is_busy((struct pb_buffer*)bo,
444 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
447 radeon_bo_wait((struct pb_buffer*)bo,
451 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
455 if (p_atomic_read(&bo->num_active_ioctls))
459 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
465 if (bo->ptr)
466 return bo->ptr;
469 pipe_mutex_lock(bo->map_mutex);
471 if (bo->ptr) {
472 pipe_mutex_unlock(bo->map_mutex);
473 return bo->ptr;
475 args.handle = bo->handle;
477 args.size = (uint64_t)bo->base.size;
478 if (drmCommandWriteRead(bo->rws->fd,
482 pipe_mutex_unlock(bo->map_mutex);
484 bo, bo->handle);
489 bo->rws->fd, args.addr_ptr);
491 pipe_mutex_unlock(bo->map_mutex);
495 bo->ptr = ptr;
496 pipe_mutex_unlock(bo->map_mutex);
498 return bo->ptr;
542 struct radeon_bo *bo;
566 bo = CALLOC_STRUCT(radeon_bo);
567 if (!bo)
570 pipe_reference_init(&bo->base.reference, 1);
571 bo->base.alignment = desc->alignment;
572 bo->base.usage = desc->usage;
573 bo->base.size = size;
574 bo->base.vtbl = &radeon_bo_vtbl;
575 bo->mgr = mgr;
576 bo->rws = mgr->rws;
577 bo->handle = args.handle;
578 bo->va = 0;
579 pipe_mutex_init(bo->map_mutex);
584 bo->va_size = align(size, 4096);
585 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment);
587 va.handle = bo->handle;
593 va.offset = bo->va;
600 radeon_bo_destroy(&bo->base);
604 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
605 bo->va = va.offset;
606 radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
610 return &bo->base;
622 struct radeon_bo *bo = radeon_bo(_buf);
624 if (radeon_bo_is_referenced_by_any_cs(bo)) {
628 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
718 struct radeon_bo *bo = get_radeon_bo(_buf);
723 args.handle = bo->handle;
725 drmCommandWriteRead(bo->rws->fd,
757 struct radeon_bo *bo = get_radeon_bo(_buf);
765 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
769 while (p_atomic_read(&bo->num_active_ioctls)) {
796 args.handle = bo->handle;
799 drmCommandWriteRead(bo->rws->fd,
850 struct radeon_bo *bo;
857 /* We must maintain a list of pairs <handle, bo>, so that we always return
865 /* First check if there already is an existing bo for the handle. */
866 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
867 if (bo) {
870 pb_reference(&b, &bo->base);
875 bo = CALLOC_STRUCT(radeon_bo);
876 if (!bo) {
883 FREE(bo);
886 bo->handle = open_arg.handle;
887 bo->name = whandle->handle;
890 pipe_reference_init(&bo->base.reference, 1);
891 bo->base.alignment = 0;
892 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
893 bo->base.size = open_arg.size;
894 bo->base.vtbl = &radeon_bo_vtbl;
895 bo->mgr = mgr;
896 bo->rws = mgr->rws;
897 bo->va = 0;
898 pipe_mutex_init(bo->map_mutex);
900 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);
908 if (mgr->va && !bo->va) {
911 bo->va_size = ((bo->base.size + 4095) & ~4095);
912 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20);
914 va.handle = bo->handle;
917 va.offset = bo->va;
921 va.offset = bo->va;
925 radeon_bo_destroy(&bo->base);
929 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
930 bo->va = va.offset;
931 radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
935 return (struct pb_buffer*)bo;
947 struct radeon_bo *bo = get_radeon_bo(buffer);
952 if (!bo->flinked) {
953 flink.handle = bo->handle;
955 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
959 bo->flinked = TRUE;
960 bo->flink = flink.name;
962 whandle->handle = bo->flink;
964 whandle->handle = bo->handle;