Searched defs:bo (Results 1 - 25 of 38) sorted by relevance

12

/drivers/gpu/drm/radeon/
H A Dradeon_object.h55 int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
57 static inline void radeon_bo_unreserve(struct radeon_bo *bo) argument
59 ttm_bo_unreserve(&bo->tbo);
63 * radeon_bo_gpu_offset - return GPU offset of bo
64 * @bo: radeon object for which we query the offset
71 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) argument
73 return bo->tbo.offset;
76 static inline unsigned long radeon_bo_size(struct radeon_bo *bo) argument
78 return bo->tbo.num_pages << PAGE_SHIFT;
81 static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) argument
86 radeon_bo_ngpu_pages(struct radeon_bo *bo) argument
91 radeon_bo_gpu_page_alignment(struct radeon_bo *bo) argument
105 radeon_bo_mmap_offset(struct radeon_bo *bo) argument
[all...]
H A Dradeon_semaphore.c36 struct radeon_semaphore_bo *bo; local
43 bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
44 if (bo == NULL) {
47 INIT_LIST_HEAD(&bo->free);
48 INIT_LIST_HEAD(&bo->list);
49 bo->nused = 0;
51 r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
53 dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
54 kfree(bo);
58 gpu_addr += bo
75 radeon_semaphore_del_bo_locked(struct radeon_device *rdev, struct radeon_semaphore_bo *bo) argument
86 struct radeon_semaphore_bo *bo, *n; local
106 struct radeon_semaphore_bo *bo; local
166 struct radeon_semaphore_bo *bo, *n; local
[all...]
H A Dradeon_cs.c79 p->relocs[i].lobj.bo = p->relocs[i].robj;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
370 struct radeon_bo *bo; local
374 bo = lobj->bo;
375 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
434 * offset inside the pool bo
H A Dradeon_gart.c296 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
339 if (rdev->vm_manager.sa_manager.bo == NULL)
349 if (rdev->vm_manager.sa_manager.bo == NULL) {
437 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
438 &rdev->ib_pool.sa_manager.bo->tbo.mem);
444 struct radeon_bo *bo,
450 uint64_t size = radeon_bo_size(bo), last_offset = 0;
458 bo_va->bo = bo;
492 /* bo ca
442 radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, uint64_t offset, uint32_t flags) argument
542 radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, struct ttm_mem_reg *mem) argument
590 radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) argument
612 radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo) argument
[all...]
H A Dradeon_object.c42 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
49 void radeon_bo_clear_va(struct radeon_bo *bo) argument
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
65 struct radeon_bo *bo; local
67 bo = container_of(tbo, struct radeon_bo, tbo);
68 mutex_lock(&bo->rdev->gem.mutex);
69 list_del_init(&bo->list);
70 mutex_unlock(&bo->rdev->gem.mutex);
71 radeon_bo_clear_surface_reg(bo);
72 radeon_bo_clear_va(bo);
77 radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) argument
109 struct radeon_bo *bo; local
179 radeon_bo_kmap(struct radeon_bo *bo, void **ptr) argument
202 radeon_bo_kunmap(struct radeon_bo *bo) argument
211 radeon_bo_unref(struct radeon_bo **bo) argument
227 radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) argument
255 radeon_bo_unpin(struct radeon_bo *bo) argument
287 struct radeon_bo *bo, *n; local
338 struct radeon_bo *bo; local
369 radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) argument
375 radeon_bo_get_surface_reg(struct radeon_bo *bo) argument
430 radeon_bo_clear_surface_reg(struct radeon_bo *bo) argument
445 radeon_bo_set_tiling_flags(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch) argument
459 radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) argument
470 radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) argument
498 radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) argument
509 radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) argument
540 radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) argument
568 radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) argument
[all...]
H A Dradeon_pm.c156 struct radeon_bo *bo, *n; local
161 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
162 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
163 ttm_bo_unmap_virtual(&bo->tbo);
/drivers/gpu/drm/nouveau/
H A Dnv50_display.h41 struct nouveau_bo *bo; member in struct:nv50_display_crtc::__anon593
H A Dnouveau_dma.c40 const int ib_size = pushbuf->bo.mem.size / 2;
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
168 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, argument
176 vma = nouveau_bo_vma_find(bo, chan->vm);
H A Dnouveau_display.c379 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
383 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
390 ttm_bo_unreserve(&new_bo->bo);
402 ttm_bo_unreserve(&new_bo->bo);
405 ttm_bo_unreserve(&old_bo->bo);
428 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
485 new_bo->bo.offset };
488 chan = nouveau_fence_channel(new_bo->bo.sync_obj);
567 struct nouveau_bo *bo; local
574 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
599 struct nouveau_bo *bo = gem->driver_private; local
[all...]
H A Dnouveau_gem.c45 struct ttm_buffer_object *bo = &nvbo->bo; local
56 ttm_bo_unref(&bo);
73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
95 ttm_bo_unreserve(&nvbo->bo);
110 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
121 ttm_bo_unreserve(&nvbo->bo);
156 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
162 nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
175 if (nvbo->bo
236 struct ttm_buffer_object *bo = &nvbo->bo; local
543 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_bo *bo) argument
632 struct drm_nouveau_gem_pushbuf_bo *bo; local
[all...]
H A Dnouveau_bo.c43 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) argument
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
107 nvbo->bo.bdev = &dev_priv->ttm.bdev;
116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo
195 struct ttm_buffer_object *bo = &nvbo->bo; local
238 struct ttm_buffer_object *bo = &nvbo->bo; local
445 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) argument
487 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
526 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
616 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, struct nouveau_channel *chan, struct ttm_mem_reg *mem) argument
625 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
691 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
743 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
777 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
810 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) argument
835 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, struct nouveau_tile_reg **new_tile) argument
857 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, struct nouveau_tile_reg *new_tile, struct nouveau_tile_reg **old_tile) argument
869 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
925 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) argument
1017 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) argument
[all...]
H A Dnouveau_mem.c426 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
765 struct ttm_buffer_object *bo,
772 struct nouveau_bo *nvbo = nouveau_bo(bo);
851 struct ttm_buffer_object *bo,
855 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
764 nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) argument
850 nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) argument
H A Dnouveau_object.c727 struct nouveau_bo *bo; local
729 bo = nvd0_display_crtc_sema(dev, i);
731 bo = nv50_display(dev)->crtc[i].sem.bo;
733 ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
804 u64 offset = dispc->sem.bo->bo.offset;
884 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); local
885 nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
892 nouveau_bo_vma_del(dispc->sem.bo,
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_bo_manager.c51 struct ttm_buffer_object *bo,
50 ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) argument
H A Dttm_execbuf_util.c40 struct ttm_buffer_object *bo = entry->bo; local
45 ttm_bo_add_to_lru(bo);
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
60 struct ttm_buffer_object *bo = entry->bo; local
65 entry->put_count = ttm_bo_del_from_lru(bo);
76 struct ttm_buffer_object *bo = entry->bo; local
85 ttm_eu_wait_unreserved_locked(struct list_head *list, struct ttm_buffer_object *bo) argument
153 struct ttm_buffer_object *bo = entry->bo; local
206 struct ttm_buffer_object *bo; local
[all...]
H A Dttm_bo_vm.c47 struct ttm_buffer_object *bo; local
51 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52 cur_offset = bo->vm_node->start;
55 best_bo = bo;
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) local
76 struct ttm_bo_device *bdev = bo->bdev;
87 &bdev->man[bo->mem.mem_type];
95 ret = ttm_bo_reserve(bo, true, true, false, 0);
103 ret = bdev->driver->fault_reserve_notify(bo);
124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo
230 struct ttm_buffer_object *bo = local
238 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; local
254 struct ttm_buffer_object *bo; local
295 ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) argument
312 struct ttm_buffer_object *bo; local
401 ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) argument
[all...]
H A Dttm_bo_util.c40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) argument
42 ttm_bo_mem_put(bo, &bo->mem);
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, argument
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
55 ttm_bo_free_old_node(bo);
100 struct ttm_buffer_object *bo; local
105 bo = list_first_entry(&man->io_reserve_lru,
108 list_del_init(&bo
153 ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) argument
173 ttm_mem_io_free_vm(struct ttm_buffer_object *bo) argument
316 ttm_bo_move_memcpy(struct ttm_buffer_object *bo, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
398 ttm_transfered_destroy(struct ttm_buffer_object *bo) argument
418 ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object **new_obj) argument
483 ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long offset, unsigned long size, struct ttm_bo_kmap_obj *map) argument
505 ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
546 ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
583 struct ttm_buffer_object *bo = map->bo; local
612 ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, void *sync_obj_arg, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
[all...]
/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_dmabuf.c45 * Flushes and unpins the query bo to avoid failures.
56 struct ttm_buffer_object *bo = &buf->base; local
65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
69 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
71 ttm_bo_unreserve(bo);
84 * Flushes and unpins the query bo if @pin == true to avoid failures.
99 struct ttm_buffer_object *bo = &buf->base; local
110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
126 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
141 ret = ttm_bo_validate(bo, placemen
202 struct ttm_buffer_object *bo = &buf->base; local
279 vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, SVGAGuestPtr *ptr) argument
300 vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) argument
[all...]
H A Dvmwgfx_gmrid_manager.c48 struct ttm_buffer_object *bo,
62 gman->used_gmr_pages += bo->num_pages;
86 mem->num_pages = bo->num_pages;
96 gman->used_gmr_pages -= bo->num_pages;
47 vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) argument
H A Dvmwgfx_buffer.c229 * one slot per bo. There is an upper limit of the number of
230 * slots as well as the bo size.
245 void vmw_evict_flags(struct ttm_buffer_object *bo, argument
255 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) argument
291 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) argument
H A Dvmwgfx_fifo.c539 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; local
557 if (bo->mem.mem_type == TTM_PL_VRAM) {
559 cmd->body.guestResult.offset = bo->offset;
561 cmd->body.guestResult.gmrId = bo->mem.start;
H A Dvmwgfx_scrn.c204 struct ttm_buffer_object *bo; local
209 bo = &sou->buffer->base;
210 ttm_bo_unref(&bo);
/drivers/staging/omapdrm/
H A Domap_fb.c64 struct drm_gem_object *bo; member in struct:plane
84 omap_fb->planes[0].bo, handle);
98 if (plane->bo)
99 drm_gem_object_unreference_unlocked(plane->bo);
134 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
144 omap_gem_put_paddr(plane->bo);
157 omap_gem_put_paddr(plane->bo);
195 return omap_fb->planes[p].bo;
338 plane->bo = bos[i];
H A Domap_fbdev.c38 struct drm_gem_object *bo; member in struct:omap_fbdev
93 omap_gem_roll(fbdev->bo, var->yoffset * npages);
162 /* allocate backing bo */
167 fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
168 if (!fbdev->bo) {
174 fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
178 * to unref the bo:
180 drm_gem_object_unreference(fbdev->bo);
185 /* note: this keeps the bo pinned.. which is perhaps not ideal,
193 ret = omap_gem_get_paddr(fbdev->bo,
[all...]
/drivers/usb/wusbcore/
H A Dcrypto.c128 u8 *bo = _bo; local
132 bo[itr] = bi1[itr] ^ bi2[itr];

Completed in 270 milliseconds

12