Lines Matching refs:bo

48 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
90 bo, bo->mem.num_pages, bo->mem.size >> 10,
91 bo->mem.size >> 20);
99 ttm_mem_type_debug(bo->bdev, mem_type);
137 struct ttm_buffer_object *bo =
139 struct ttm_bo_device *bdev = bo->bdev;
140 size_t acc_size = bo->acc_size;
142 BUG_ON(atomic_read(&bo->list_kref.refcount));
143 BUG_ON(atomic_read(&bo->kref.refcount));
144 BUG_ON(atomic_read(&bo->cpu_writers));
145 BUG_ON(bo->sync_obj != NULL);
146 BUG_ON(bo->mem.mm_node != NULL);
147 BUG_ON(!list_empty(&bo->lru));
148 BUG_ON(!list_empty(&bo->ddestroy));
150 if (bo->ttm)
151 ttm_tt_destroy(bo->ttm);
152 atomic_dec(&bo->glob->bo_count);
153 if (bo->destroy)
154 bo->destroy(bo);
156 kfree(bo);
161 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
164 return wait_event_interruptible(bo->event_queue,
165 atomic_read(&bo->reserved) == 0);
167 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
173 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
175 struct ttm_bo_device *bdev = bo->bdev;
178 BUG_ON(!atomic_read(&bo->reserved));
180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
182 BUG_ON(!list_empty(&bo->lru));
184 man = &bdev->man[bo->mem.mem_type];
185 list_add_tail(&bo->lru, &man->lru);
186 kref_get(&bo->list_kref);
188 if (bo->ttm != NULL) {
189 list_add_tail(&bo->swap, &bo->glob->swap_lru);
190 kref_get(&bo->list_kref);
195 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
199 if (!list_empty(&bo->swap)) {
200 list_del_init(&bo->swap);
203 if (!list_empty(&bo->lru)) {
204 list_del_init(&bo->lru);
216 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
220 struct ttm_bo_global *glob = bo->glob;
223 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
225 * Deadlock avoidance for multi-bo reserving.
227 if (use_sequence && bo->seq_valid) {
231 if (unlikely(sequence == bo->val_seq))
237 if (unlikely(sequence - bo->val_seq < (1 << 31)))
245 ret = ttm_bo_wait_unreserved(bo, interruptible);
257 if (unlikely((bo->val_seq - sequence < (1 << 31))
258 || !bo->seq_valid))
259 wake_up_all(&bo->event_queue);
261 bo->val_seq = sequence;
262 bo->seq_valid = true;
264 bo->seq_valid = false;
276 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
279 kref_sub(&bo->list_kref, count,
283 int ttm_bo_reserve(struct ttm_buffer_object *bo,
287 struct ttm_bo_global *glob = bo->glob;
292 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
295 put_count = ttm_bo_del_from_lru(bo);
298 ttm_bo_list_ref_sub(bo, put_count, true);
303 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
305 ttm_bo_add_to_lru(bo);
306 atomic_set(&bo->reserved, 0);
307 wake_up_all(&bo->event_queue);
310 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
312 struct ttm_bo_global *glob = bo->glob;
315 ttm_bo_unreserve_locked(bo);
321 * Call bo->mutex locked.
323 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
325 struct ttm_bo_device *bdev = bo->bdev;
326 struct ttm_bo_global *glob = bo->glob;
330 TTM_ASSERT_LOCKED(&bo->mutex);
331 bo->ttm = NULL;
336 switch (bo->type) {
341 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
343 if (unlikely(bo->ttm == NULL))
355 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
360 struct ttm_bo_device *bdev = bo->bdev;
361 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
368 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
372 ttm_bo_unmap_virtual_locked(bo);
381 if (bo->ttm == NULL) {
383 ret = ttm_bo_add_ttm(bo, zero);
388 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
393 ret = ttm_tt_bind(bo->ttm, mem);
398 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
400 bdev->driver->move_notify(bo, mem);
401 bo->mem = *mem;
408 bdev->driver->move_notify(bo, mem);
412 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
414 ret = bdev->driver->move(bo, evict, interruptible,
417 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
422 *mem = bo->mem;
423 bo->mem = tmp_mem;
424 bdev->driver->move_notify(bo, mem);
425 bo->mem = *mem;
432 if (bo->evicted) {
433 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
436 bo->evicted = false;
439 if (bo->mem.mm_node) {
440 bo->offset = (bo->mem.start << PAGE_SHIFT) +
441 bdev->man[bo->mem.mem_type].gpu_offset;
442 bo->cur_placement = bo->mem.placement;
444 bo->offset = 0;
449 new_man = &bdev->man[bo->mem.mem_type];
450 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
451 ttm_tt_unbind(bo->ttm);
452 ttm_tt_destroy(bo->ttm);
453 bo->ttm = NULL;
460 * Call bo::reserved.
464 * Will release the bo::reserved lock.
467 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
469 if (bo->bdev->driver->move_notify)
470 bo->bdev->driver->move_notify(bo, NULL);
472 if (bo->ttm) {
473 ttm_tt_unbind(bo->ttm);
474 ttm_tt_destroy(bo->ttm);
475 bo->ttm = NULL;
477 ttm_bo_mem_put(bo, &bo->mem);
479 atomic_set(&bo->reserved, 0);
485 wake_up_all(&bo->event_queue);
488 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
490 struct ttm_bo_device *bdev = bo->bdev;
491 struct ttm_bo_global *glob = bo->glob;
499 (void) ttm_bo_wait(bo, false, false, true);
500 if (!bo->sync_obj) {
505 * Lock inversion between bo:reserve and bdev::fence_lock here,
509 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
515 put_count = ttm_bo_del_from_lru(bo);
518 ttm_bo_cleanup_memtype_use(bo);
520 ttm_bo_list_ref_sub(bo, put_count, true);
528 if (bo->sync_obj)
529 sync_obj = driver->sync_obj_ref(bo->sync_obj);
530 sync_obj_arg = bo->sync_obj_arg;
532 kref_get(&bo->list_kref);
533 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
547 * If bo idle, remove from delayed- and lru lists, and unref.
555 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
560 struct ttm_bo_device *bdev = bo->bdev;
561 struct ttm_bo_global *glob = bo->glob;
567 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
575 if (unlikely(list_empty(&bo->ddestroy))) {
580 ret = ttm_bo_reserve_locked(bo, interruptible,
590 * the bo::lock since setting the sync object requires
591 * also bo::reserved. A busy object at this point may
596 if (unlikely(bo->sync_obj)) {
597 atomic_set(&bo->reserved, 0);
598 wake_up_all(&bo->event_queue);
603 put_count = ttm_bo_del_from_lru(bo);
604 list_del_init(&bo->ddestroy);
608 ttm_bo_cleanup_memtype_use(bo);
610 ttm_bo_list_ref_sub(bo, put_count, true);
678 struct ttm_buffer_object *bo =
680 struct ttm_bo_device *bdev = bo->bdev;
681 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
683 if (likely(bo->vm_node != NULL)) {
684 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
685 drm_mm_put_block(bo->vm_node);
686 bo->vm_node = NULL;
690 ttm_mem_io_free_vm(bo);
692 ttm_bo_cleanup_refs_or_queue(bo);
693 kref_put(&bo->list_kref, ttm_bo_release_list);
699 struct ttm_buffer_object *bo = *p_bo;
700 struct ttm_bo_device *bdev = bo->bdev;
704 kref_put(&bo->kref, ttm_bo_release);
723 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
726 struct ttm_bo_device *bdev = bo->bdev;
732 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
742 BUG_ON(!atomic_read(&bo->reserved));
744 evict_mem = bo->mem;
753 bdev->driver->evict_flags(bo, &placement);
754 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
759 bo);
760 ttm_bo_mem_space_debug(bo, &placement);
765 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
770 ttm_bo_mem_put(bo, &evict_mem);
773 bo->evicted = true;
785 struct ttm_buffer_object *bo;
795 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
796 kref_get(&bo->list_kref);
798 if (!list_empty(&bo->ddestroy)) {
800 ret = ttm_bo_cleanup_refs(bo, interruptible,
802 kref_put(&bo->list_kref, ttm_bo_release_list);
810 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
815 ret = ttm_bo_wait_unreserved(bo, interruptible);
817 kref_put(&bo->list_kref, ttm_bo_release_list);
828 put_count = ttm_bo_del_from_lru(bo);
833 ttm_bo_list_ref_sub(bo, put_count, true);
835 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
836 ttm_bo_unreserve(bo);
838 kref_put(&bo->list_kref, ttm_bo_release_list);
842 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
844 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
855 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
863 struct ttm_bo_device *bdev = bo->bdev;
868 ret = (*man->func->get_node)(man, bo, placement, mem);
936 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
942 struct ttm_bo_device *bdev = bo->bdev;
967 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
981 ret = (*man->func->get_node)(man, bo, placement, mem);
1012 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1029 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1043 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1045 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1048 return wait_event_interruptible(bo->event_queue,
1049 atomic_read(&bo->cpu_writers) == 0);
1053 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1060 struct ttm_bo_device *bdev = bo->bdev;
1062 BUG_ON(!atomic_read(&bo->reserved));
1070 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1074 mem.num_pages = bo->num_pages;
1076 mem.page_alignment = bo->mem.page_alignment;
1082 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1085 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1088 ttm_bo_mem_put(bo, &mem);
1112 int ttm_bo_validate(struct ttm_buffer_object *bo,
1119 BUG_ON(!atomic_read(&bo->reserved));
1123 (placement->lpfn - placement->fpfn) < bo->num_pages)
1128 ret = ttm_bo_mem_compat(placement, &bo->mem);
1130 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1138 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1144 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1145 ret = ttm_bo_add_ttm(bo, true);
1153 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1157 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1163 struct ttm_buffer_object *bo,
1182 (*destroy)(bo);
1184 kfree(bo);
1193 (*destroy)(bo);
1195 kfree(bo);
1199 bo->destroy = destroy;
1201 kref_init(&bo->kref);
1202 kref_init(&bo->list_kref);
1203 atomic_set(&bo->cpu_writers, 0);
1204 atomic_set(&bo->reserved, 1);
1205 init_waitqueue_head(&bo->event_queue);
1206 INIT_LIST_HEAD(&bo->lru);
1207 INIT_LIST_HEAD(&bo->ddestroy);
1208 INIT_LIST_HEAD(&bo->swap);
1209 INIT_LIST_HEAD(&bo->io_reserve_lru);
1210 bo->bdev = bdev;
1211 bo->glob = bdev->glob;
1212 bo->type = type;
1213 bo->num_pages = num_pages;
1214 bo->mem.size = num_pages << PAGE_SHIFT;
1215 bo->mem.mem_type = TTM_PL_SYSTEM;
1216 bo->mem.num_pages = bo->num_pages;
1217 bo->mem.mm_node = NULL;
1218 bo->mem.page_alignment = page_alignment;
1219 bo->mem.bus.io_reserved_vm = false;
1220 bo->mem.bus.io_reserved_count = 0;
1221 bo->buffer_start = buffer_start & PAGE_MASK;
1222 bo->priv_flags = 0;
1223 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1224 bo->seq_valid = false;
1225 bo->persistent_swap_storage = persistent_swap_storage;
1226 bo->acc_size = acc_size;
1227 atomic_inc(&bo->glob->bo_count);
1229 ret = ttm_bo_check_placement(bo, placement);
1237 if (bo->type == ttm_bo_type_device) {
1238 ret = ttm_bo_setup_vm(bo);
1243 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1247 ttm_bo_unreserve(bo);
1251 ttm_bo_unreserve(bo);
1252 ttm_bo_unref(&bo);
1297 struct ttm_buffer_object *bo;
1301 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1302 if (unlikely(bo == NULL))
1306 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1310 *p_bo = bo;
1603 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1605 struct ttm_bo_device *bdev = bo->bdev;
1606 loff_t offset = (loff_t) bo->addr_space_offset;
1607 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1612 ttm_mem_io_free_vm(bo);
1615 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1617 struct ttm_bo_device *bdev = bo->bdev;
1618 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1621 ttm_bo_unmap_virtual_locked(bo);
1628 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1630 struct ttm_bo_device *bdev = bo->bdev;
1634 unsigned long offset = bo->vm_node->start;
1649 rb_link_node(&bo->vm_rb, parent, cur);
1650 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1656 * @bo: the buffer to allocate address space for
1664 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1666 struct ttm_bo_device *bdev = bo->bdev;
1675 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1676 bo->mem.num_pages, 0, 0);
1678 if (unlikely(bo->vm_node == NULL)) {
1683 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1684 bo->mem.num_pages, 0);
1686 if (unlikely(bo->vm_node == NULL)) {
1691 ttm_bo_vm_insert_rb(bo);
1693 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1701 int ttm_bo_wait(struct ttm_buffer_object *bo,
1704 struct ttm_bo_driver *driver = bo->bdev->driver;
1705 struct ttm_bo_device *bdev = bo->bdev;
1710 if (likely(bo->sync_obj == NULL))
1713 while (bo->sync_obj) {
1715 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1716 void *tmp_obj = bo->sync_obj;
1717 bo->sync_obj = NULL;
1718 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1728 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1729 sync_obj_arg = bo->sync_obj_arg;
1739 if (likely(bo->sync_obj == sync_obj &&
1740 bo->sync_obj_arg == sync_obj_arg)) {
1741 void *tmp_obj = bo->sync_obj;
1742 bo->sync_obj = NULL;
1744 &bo->priv_flags);
1759 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1761 struct ttm_bo_device *bdev = bo->bdev;
1768 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1772 ret = ttm_bo_wait(bo, false, true, no_wait);
1775 atomic_inc(&bo->cpu_writers);
1776 ttm_bo_unreserve(bo);
1781 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1783 if (atomic_dec_and_test(&bo->cpu_writers))
1784 wake_up_all(&bo->event_queue);
1797 struct ttm_buffer_object *bo;
1809 bo = list_first_entry(&glob->swap_lru,
1811 kref_get(&bo->list_kref);
1813 if (!list_empty(&bo->ddestroy)) {
1815 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1816 kref_put(&bo->list_kref, ttm_bo_release_list);
1827 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1830 ttm_bo_wait_unreserved(bo, false);
1831 kref_put(&bo->list_kref, ttm_bo_release_list);
1837 put_count = ttm_bo_del_from_lru(bo);
1840 ttm_bo_list_ref_sub(bo, put_count, true);
1846 spin_lock(&bo->bdev->fence_lock);
1847 ret = ttm_bo_wait(bo, false, false, false);
1848 spin_unlock(&bo->bdev->fence_lock);
1853 if ((bo->mem.placement & swap_placement) != swap_placement) {
1856 evict_mem = bo->mem;
1861 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1867 ttm_bo_unmap_virtual(bo);
1874 if (bo->bdev->driver->swap_notify)
1875 bo->bdev->driver->swap_notify(bo);
1877 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1886 atomic_set(&bo->reserved, 0);
1887 wake_up_all(&bo->event_queue);
1888 kref_put(&bo->list_kref, ttm_bo_release_list);