Lines Matching refs:vma

45 				struct vm_area_struct *vma,
69 up_read(&vma->vm_mm->mmap_sem);
86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
89 vma->vm_private_data;
117 up_read(&vma->vm_mm->mmap_sem);
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
179 page_last = vma_pages(vma) + vma->vm_pgoff -
188 * Make a local vma copy to modify the page_prot member
189 * and vm_flags if necessary. The vma parameter is protected
192 cvma = *vma;
225 page->mapping = vma->vm_file->f_mapping;
231 if (vma->vm_flags & VM_MIXEDMAP)
260 static void ttm_bo_vm_open(struct vm_area_struct *vma)
263 (struct ttm_buffer_object *)vma->vm_private_data;
265 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
270 static void ttm_bo_vm_close(struct vm_area_struct *vma)
272 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
275 vma->vm_private_data = NULL;
308 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
315 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
328 vma->vm_ops = &ttm_bo_vm_ops;
332 * vma->vm_private_data here.
335 vma->vm_private_data = bo;
339 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
344 vma->vm_flags |= VM_MIXEDMAP;
345 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
353 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
355 if (vma->vm_pgoff != 0)
358 vma->vm_ops = &ttm_bo_vm_ops;
359 vma->vm_private_data = ttm_bo_reference(bo);
360 vma->vm_flags |= VM_MIXEDMAP;
361 vma->vm_flags |= VM_IO | VM_DONTEXPAND;