Searched refs:bo (Results 1 - 25 of 56) sorted by relevance

123

/drivers/gpu/drm/radeon/
H A Dradeon_semaphore.c36 struct radeon_semaphore_bo *bo; local
43 bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
44 if (bo == NULL) {
47 INIT_LIST_HEAD(&bo->free);
48 INIT_LIST_HEAD(&bo->list);
49 bo->nused = 0;
51 r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
53 dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
54 kfree(bo);
58 gpu_addr += bo
75 radeon_semaphore_del_bo_locked(struct radeon_device *rdev, struct radeon_semaphore_bo *bo) argument
86 struct radeon_semaphore_bo *bo, *n; local
106 struct radeon_semaphore_bo *bo; local
166 struct radeon_semaphore_bo *bo, *n; local
[all...]
H A Dradeon_object.c42 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
49 void radeon_bo_clear_va(struct radeon_bo *bo) argument
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
65 struct radeon_bo *bo; local
67 bo = container_of(tbo, struct radeon_bo, tbo);
68 mutex_lock(&bo->rdev->gem.mutex);
69 list_del_init(&bo->list);
70 mutex_unlock(&bo->rdev->gem.mutex);
71 radeon_bo_clear_surface_reg(bo);
72 radeon_bo_clear_va(bo);
77 radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) argument
109 struct radeon_bo *bo; local
179 radeon_bo_kmap(struct radeon_bo *bo, void **ptr) argument
202 radeon_bo_kunmap(struct radeon_bo *bo) argument
211 radeon_bo_unref(struct radeon_bo **bo) argument
227 radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, u64 *gpu_addr) argument
277 radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) argument
282 radeon_bo_unpin(struct radeon_bo *bo) argument
314 struct radeon_bo *bo, *n; local
365 struct radeon_bo *bo; local
396 radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) argument
402 radeon_bo_get_surface_reg(struct radeon_bo *bo) argument
457 radeon_bo_clear_surface_reg(struct radeon_bo *bo) argument
472 radeon_bo_set_tiling_flags(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch) argument
532 radeon_bo_get_tiling_flags(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch) argument
543 radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, bool force_drop) argument
571 radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) argument
582 radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) argument
613 radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) argument
641 radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) argument
[all...]
H A Dradeon_object.h55 int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait);
57 static inline void radeon_bo_unreserve(struct radeon_bo *bo) argument
59 ttm_bo_unreserve(&bo->tbo);
63 * radeon_bo_gpu_offset - return GPU offset of bo
64 * @bo: radeon object for which we query the offset
71 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) argument
73 return bo->tbo.offset;
76 static inline unsigned long radeon_bo_size(struct radeon_bo *bo) argument
78 return bo->tbo.num_pages << PAGE_SHIFT;
81 static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) argument
86 radeon_bo_ngpu_pages(struct radeon_bo *bo) argument
91 radeon_bo_gpu_page_alignment(struct radeon_bo *bo) argument
105 radeon_bo_mmap_offset(struct radeon_bo *bo) argument
[all...]
H A Dradeon_sa.c40 sa_manager->bo = NULL;
46 RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
48 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
66 radeon_bo_unref(&sa_manager->bo);
75 if (sa_manager->bo == NULL) {
76 dev_err(rdev->dev, "no bo for sa manager\n");
81 r = radeon_bo_reserve(sa_manager->bo, false);
83 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
86 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
88 radeon_bo_unreserve(sa_manager->bo);
[all...]
H A Dradeon_trace.h16 TP_PROTO(struct radeon_bo *bo),
17 TP_ARGS(bo),
19 __field(struct radeon_bo *, bo)
24 __entry->bo = bo;
25 __entry->pages = bo->tbo.num_pages;
27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
H A Dradeon_gart.c297 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
340 if (rdev->vm_manager.sa_manager.bo == NULL)
350 if (rdev->vm_manager.sa_manager.bo == NULL) {
438 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
439 &rdev->ib_pool.sa_manager.bo->tbo.mem);
445 struct radeon_bo *bo,
451 uint64_t size = radeon_bo_size(bo), last_offset = 0;
459 bo_va->bo = bo;
499 /* bo ca
443 radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, uint64_t offset, uint32_t flags) argument
549 radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, struct ttm_mem_reg *mem) argument
597 radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) argument
619 radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo) argument
[all...]
H A Dradeon_ttm.c173 static void radeon_evict_flags(struct ttm_buffer_object *bo, argument
179 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
188 rbo = container_of(bo, struct radeon_bo, tbo);
189 switch (bo->mem.mem_type) {
203 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) argument
208 static void radeon_move_null(struct ttm_buffer_object *bo, argument
211 struct ttm_mem_reg *old_mem = &bo->mem;
218 static int radeon_move_blit(struct ttm_buffer_object *bo, argument
228 rdev = radeon_get_rdev(bo->bdev);
299 r = ttm_bo_move_accel_cleanup(bo, (voi
305 radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
352 radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
391 radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
810 struct ttm_buffer_object *bo; local
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_bo.c48 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, argument
90 bo, bo->mem.num_pages, bo->mem.size >> 10,
91 bo->mem.size >> 20);
99 ttm_mem_type_debug(bo->bdev, mem_type);
137 struct ttm_buffer_object *bo = local
139 struct ttm_bo_device *bdev = bo->bdev;
140 size_t acc_size = bo
161 ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) argument
173 ttm_bo_add_to_lru(struct ttm_buffer_object *bo) argument
195 ttm_bo_del_from_lru(struct ttm_buffer_object *bo) argument
216 ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) argument
276 ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, bool never_free) argument
283 ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) argument
303 ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) argument
310 ttm_bo_unreserve(struct ttm_buffer_object *bo) argument
323 ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) argument
355 ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
467 ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) argument
488 ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) argument
555 ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
678 struct ttm_buffer_object *bo = local
699 struct ttm_buffer_object *bo = *p_bo; local
723 ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
785 struct ttm_buffer_object *bo; local
842 ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) argument
855 ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
936 ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
1043 ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) argument
1053 ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
1112 ttm_bo_validate(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) argument
1153 ttm_bo_check_placement(struct ttm_buffer_object *bo, struct ttm_placement *placement) argument
1162 ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, size_t acc_size, void (*destroy) (struct ttm_buffer_object *)) argument
1297 struct ttm_buffer_object *bo; local
1603 ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) argument
1615 ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) argument
1628 ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) argument
1664 ttm_bo_setup_vm(struct ttm_buffer_object *bo) argument
1701 ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) argument
1759 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) argument
1781 ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) argument
1797 struct ttm_buffer_object *bo; local
[all...]
H A Dttm_bo_vm.c49 struct ttm_buffer_object *bo; local
53 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54 cur_offset = bo->vm_node->start;
57 best_bo = bo;
76 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) local
78 struct ttm_bo_device *bdev = bo->bdev;
89 &bdev->man[bo->mem.mem_type];
97 ret = ttm_bo_reserve(bo, true, true, false, 0);
105 ret = bdev->driver->fault_reserve_notify(bo);
126 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo
232 struct ttm_buffer_object *bo = local
240 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; local
256 struct ttm_buffer_object *bo; local
296 ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) argument
313 struct ttm_buffer_object *bo; local
402 ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) argument
[all...]
H A Dttm_execbuf_util.c40 struct ttm_buffer_object *bo = entry->bo; local
45 ttm_bo_add_to_lru(bo);
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
60 struct ttm_buffer_object *bo = entry->bo; local
65 entry->put_count = ttm_bo_del_from_lru(bo);
76 struct ttm_buffer_object *bo = entry->bo; local
85 ttm_eu_wait_unreserved_locked(struct list_head *list, struct ttm_buffer_object *bo) argument
153 struct ttm_buffer_object *bo = entry->bo; local
206 struct ttm_buffer_object *bo; local
[all...]
H A Dttm_bo_util.c40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) argument
42 ttm_bo_mem_put(bo, &bo->mem);
45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, argument
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
55 ttm_bo_free_old_node(bo);
100 struct ttm_buffer_object *bo; local
105 bo = list_first_entry(&man->io_reserve_lru,
108 list_del_init(&bo
153 ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) argument
173 ttm_mem_io_free_vm(struct ttm_buffer_object *bo) argument
316 ttm_bo_move_memcpy(struct ttm_buffer_object *bo, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
398 ttm_transfered_destroy(struct ttm_buffer_object *bo) argument
418 ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object **new_obj) argument
483 ttm_bo_ioremap(struct ttm_buffer_object *bo, unsigned long offset, unsigned long size, struct ttm_bo_kmap_obj *map) argument
505 ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
546 ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) argument
583 struct ttm_buffer_object *bo = map->bo; local
612 ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, void *sync_obj_arg, bool evict, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
[all...]
/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_dmabuf.c45 * Flushes and unpins the query bo to avoid failures.
56 struct ttm_buffer_object *bo = &buf->base; local
65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
69 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
71 ttm_bo_unreserve(bo);
84 * Flushes and unpins the query bo if @pin == true to avoid failures.
99 struct ttm_buffer_object *bo = &buf->base; local
110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
126 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
141 ret = ttm_bo_validate(bo, placemen
202 struct ttm_buffer_object *bo = &buf->base; local
279 vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, SVGAGuestPtr *ptr) argument
300 vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) argument
[all...]
H A Dvmwgfx_gmrid_manager.c48 struct ttm_buffer_object *bo,
62 gman->used_gmr_pages += bo->num_pages;
86 mem->num_pages = bo->num_pages;
96 gman->used_gmr_pages -= bo->num_pages;
47 vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) argument
H A Dvmwgfx_execbuf.c60 * vmw_bo_to_validate_list - add a bo to a validate list
63 * @bo: The buffer object to add.
73 struct ttm_buffer_object *bo,
80 val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
91 val_buf->bo = ttm_bo_reference(bo);
446 struct ttm_buffer_object *bo; local
456 bo = &vmw_bo->base;
468 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
553 struct ttm_buffer_object *bo; local
72 vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, uint32_t fence_flags, uint32_t *p_val_node) argument
867 struct ttm_buffer_object *bo; local
910 vmw_validate_single_buffer(struct vmw_private *dev_priv, struct ttm_buffer_object *bo) argument
[all...]
/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.c43 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) argument
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
107 nvbo->bo.bdev = &dev_priv->ttm.bdev;
116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo
195 struct ttm_buffer_object *bo = &nvbo->bo; local
238 struct ttm_buffer_object *bo = &nvbo->bo; local
445 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) argument
487 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
526 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
616 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, struct nouveau_channel *chan, struct ttm_mem_reg *mem) argument
625 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) argument
691 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
738 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
772 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
805 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) argument
830 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, struct nouveau_tile_reg **new_tile) argument
852 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, struct nouveau_tile_reg *new_tile, struct nouveau_tile_reg **old_tile) argument
864 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) argument
920 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) argument
1012 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) argument
[all...]
H A Dnouveau_gem.c45 struct ttm_buffer_object *bo = &nvbo->bo; local
56 ttm_bo_unref(&bo);
73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
95 ttm_bo_unreserve(&nvbo->bo);
110 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
121 ttm_bo_unreserve(&nvbo->bo);
156 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
162 nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
175 if (nvbo->bo
236 struct ttm_buffer_object *bo = &nvbo->bo; local
541 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_bo *bo) argument
630 struct drm_nouveau_gem_pushbuf_bo *bo; local
[all...]
H A Dnouveau_fence.c299 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
328 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
519 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
534 /* map fence bo into channel's vm */
535 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
563 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
576 0, 0, &dev_priv->fence.bo);
580 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
584 ret = nouveau_bo_map(dev_priv->fence.bo);
[all...]
H A Dnv50_evo.c157 u64 pushbuf = evo->pushbuf_bo->bo.offset;
228 if (disp->crtc[i].sem.bo) {
229 nouveau_bo_unmap(disp->crtc[i].sem.bo);
230 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
336 0, 0x0000, &dispc->sem.bo);
338 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
340 ret = nouveau_bo_map(dispc->sem.bo);
342 nouveau_bo_ref(NULL, &dispc->sem.bo);
343 offset = dispc->sem.bo->bo
[all...]
H A Dnouveau_dma.c40 const int ib_size = pushbuf->bo.mem.size / 2;
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
47 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
49 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
109 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, argument
117 vma = nouveau_bo_vma_find(bo, chan->vm);
H A Dnouveau_display.c390 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
394 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
401 ttm_bo_unreserve(&new_bo->bo);
413 ttm_bo_unreserve(&new_bo->bo);
416 ttm_bo_unreserve(&old_bo->bo);
439 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
500 new_bo->bo.offset };
503 chan = nouveau_fence_channel(new_bo->bo.sync_obj);
582 struct nouveau_bo *bo; local
589 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
614 struct nouveau_bo *bo = gem->driver_private; local
[all...]
H A Dnouveau_notifier.c68 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
131 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
135 offset = chan->notifier_bo->bo.offset;
/drivers/staging/rtl8192u/ieee80211/
H A Daes.c127 #define f_rn(bo, bi, n, k) \
128 bo[n] = ft_tab[0][byte(bi[n],0)] ^ \
133 #define i_rn(bo, bi, n, k) \
134 bo[n] = it_tab[0][byte(bi[n],0)] ^ \
145 #define f_rl(bo, bi, n, k) \
146 bo[n] = fl_tab[0][byte(bi[n],0)] ^ \
151 #define i_rl(bo, bi, n, k) \
152 bo[n] = il_tab[0][byte(bi[n],0)] ^ \
329 #define f_nround(bo, bi, k) \
330 f_rn(bo, b
[all...]
/drivers/staging/omapdrm/
H A Domap_fbdev.c38 struct drm_gem_object *bo; member in struct:omap_fbdev
89 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
178 /* allocate backing bo */
183 fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
184 if (!fbdev->bo) {
190 fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
194 * to unref the bo:
196 drm_gem_object_unreference(fbdev->bo);
201 /* note: this keeps the bo pinned.. which is perhaps not ideal,
209 ret = omap_gem_get_paddr(fbdev->bo,
[all...]
H A Domap_fb.c78 struct drm_gem_object *bo; member in struct:plane
98 omap_fb->planes[0].bo, handle);
112 if (plane->bo)
113 drm_gem_object_unreference_unlocked(plane->bo);
180 void (*unpin)(void *arg, struct drm_gem_object *bo))
196 unpin(arg, pa->bo);
201 ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
209 unpin(arg, pb->bo);
223 return omap_fb->planes[p].bo;
293 omap_gem_describe(plane->bo,
178 omap_framebuffer_replace(struct drm_framebuffer *a, struct drm_framebuffer *b, void *arg, void (*unpin)(void *arg, struct drm_gem_object *bo)) argument
[all...]
H A Domap_plane.c60 /* set of bo's pending unpin until next END_WIN irq */
101 struct drm_gem_object *bo = NULL; local
102 int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
104 omap_gem_put_paddr(bo);
105 drm_gem_object_unreference_unlocked(bo);
240 static void unpin(void *arg, struct drm_gem_object *bo) argument
246 (const struct drm_gem_object **)&bo)) {
249 drm_gem_object_reference(bo);
252 omap_gem_put_paddr(bo);

Completed in 307 milliseconds

123