Lines Matching refs:bo

43 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
107 nvbo->bo.bdev = &dev_priv->ttm.bdev;
116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
156 nvbo->bo.mem.num_pages < vram_pages / 4) {
194 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
195 struct ttm_buffer_object *bo = &nvbo->bo;
198 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
199 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
200 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
201 1 << bo->mem.mem_type, memtype);
208 ret = ttm_bo_reserve(bo, false, false, false, 0);
216 switch (bo->mem.mem_type) {
218 dev_priv->fb_aper_free -= bo->mem.size;
221 dev_priv->gart_info.aper_free -= bo->mem.size;
227 ttm_bo_unreserve(bo);
237 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
238 struct ttm_buffer_object *bo = &nvbo->bo;
244 ret = ttm_bo_reserve(bo, false, false, false, 0);
248 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
252 switch (bo->mem.mem_type) {
254 dev_priv->fb_aper_free += bo->mem.size;
257 dev_priv->gart_info.aper_free += bo->mem.size;
264 ttm_bo_unreserve(bo);
273 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
277 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
278 ttm_bo_unreserve(&nvbo->bo);
295 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
445 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
447 struct nouveau_bo *nvbo = nouveau_bo(bo);
449 switch (bo->mem.mem_type) {
480 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
487 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
526 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
530 struct nouveau_bo *nvbo = nouveau_bo(bo);
616 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
625 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
638 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
639 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
691 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
695 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
696 struct nouveau_bo *nvbo = nouveau_bo(bo);
697 struct ttm_mem_reg *old_mem = &bo->mem;
724 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
727 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
729 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
743 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
758 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
762 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
766 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
770 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
772 ttm_bo_mem_put(bo, &tmp_mem);
777 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
792 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
796 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
800 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
805 ttm_bo_mem_put(bo, &tmp_mem);
810 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
812 struct nouveau_bo *nvbo = nouveau_bo(bo);
816 if (bo->destroy != nouveau_bo_del_ttm)
835 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
838 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
840 struct nouveau_bo *nvbo = nouveau_bo(bo);
857 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
861 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
864 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
869 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
873 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
874 struct nouveau_bo *nvbo = nouveau_bo(bo);
875 struct ttm_mem_reg *old_mem = &bo->mem;
880 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
885 /* Fake bo copy. */
886 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
887 BUG_ON(bo->mem.mm_node != NULL);
888 bo->mem = *new_mem;
895 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
901 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
903 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
905 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
911 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
916 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
918 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
925 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1017 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1019 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1020 struct nouveau_bo *nvbo = nouveau_bo(bo);
1022 /* as long as the bo isn't in vram, and isn't tiled, we've got
1025 if (bo->mem.mem_type != TTM_PL_VRAM) {
1031 /* make sure bo is in mappable vram */
1032 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
1050 spin_lock(&nvbo->bo.bdev->fence_lock);
1051 old_fence = nvbo->bo.sync_obj;
1052 nvbo->bo.sync_obj = fence;
1053 spin_unlock(&nvbo->bo.bdev->fence_lock);
1178 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1179 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1187 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1188 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1190 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1202 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1203 spin_lock(&nvbo->bo.bdev->fence_lock);
1204 ttm_bo_wait(&nvbo->bo, false, false, false);
1205 spin_unlock(&nvbo->bo.bdev->fence_lock);