Lines Matching defs:bo

47 	struct ttm_buffer_object *bo;
51 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52 cur_offset = bo->vm_node->start;
55 best_bo = bo;
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 struct ttm_bo_device *bdev = bo->bdev;
87 &bdev->man[bo->mem.mem_type];
95 ret = ttm_bo_reserve(bo, true, true, false, 0);
103 ret = bdev->driver->fault_reserve_notify(bo);
124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
125 ret = ttm_bo_wait(bo, false, true, false);
140 ret = ttm_mem_io_reserve_vm(bo);
147 bo->vm_node->start - vma->vm_pgoff;
149 bo->vm_node->start - vma->vm_pgoff;
151 if (unlikely(page_offset >= bo->num_pages)) {
161 * the bo->mutex, as we should be the only writers.
165 * TODO: Add a list of vmas to the bo, and change the
169 if (bo->mem.bus.is_iomem) {
170 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
173 ttm = bo->ttm;
174 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
176 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
190 if (bo->mem.bus.is_iomem)
191 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
224 ttm_bo_unreserve(bo);
230 struct ttm_buffer_object *bo =
233 (void)ttm_bo_reference(bo);
238 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
240 ttm_bo_unref(&bo);
254 struct ttm_buffer_object *bo;
258 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
260 if (likely(bo != NULL))
261 ttm_bo_reference(bo);
264 if (unlikely(bo == NULL)) {
270 driver = bo->bdev->driver;
275 ret = driver->verify_access(bo, filp);
282 * Note: We're transferring the bo reference to
286 vma->vm_private_data = bo;
290 ttm_bo_unref(&bo);
295 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
301 vma->vm_private_data = ttm_bo_reference(bo);
312 struct ttm_buffer_object *bo;
327 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
328 if (likely(bo != NULL))
329 ttm_bo_reference(bo);
332 if (unlikely(bo == NULL))
335 driver = bo->bdev->driver;
341 ret = driver->verify_access(bo, filp);
345 kmap_offset = dev_offset - bo->vm_node->start;
346 if (unlikely(kmap_offset >= bo->num_pages)) {
352 io_size = bo->num_pages - kmap_offset;
360 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
372 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
374 ttm_bo_unreserve(bo);
387 ttm_bo_unreserve(bo);
388 ttm_bo_unref(&bo);
397 ttm_bo_unref(&bo);
401 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
417 if (unlikely(kmap_offset >= bo->num_pages))
421 io_size = bo->num_pages - kmap_offset;
429 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
440 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
442 ttm_bo_unreserve(bo);
455 ttm_bo_unreserve(bo);
456 ttm_bo_unref(&bo);