Lines Matching refs:vma

47                struct vm_area_struct *vma,
162 struct vm_area_struct *vma;
170 struct vm_area_struct *vma = st->vma;
180 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
183 rc = xen_remap_domain_mfn_range(vma,
186 vma->vm_page_prot,
200 struct vm_area_struct *vma;
226 vma = find_vma(mm, msg->va);
229 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
231 vma->vm_private_data = PRIV_VMA_LOCKED;
234 state.va = vma->vm_start;
235 state.vma = vma;
255 struct vm_area_struct *vma;
279 struct vm_area_struct *vma = st->vma;
280 struct page **pages = vma->vm_private_data;
287 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
288 st->vma->vm_page_prot, st->domain,
343 * the vma with the page info to use later.
346 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
362 BUG_ON(vma->vm_private_data != NULL);
363 vma->vm_private_data = pages;
375 struct vm_area_struct *vma;
423 vma = find_vma(mm, m.addr);
424 if (!vma ||
425 vma->vm_ops != &privcmd_vm_ops) {
441 if (vma->vm_private_data == NULL) {
442 if (m.addr != vma->vm_start ||
443 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
448 ret = alloc_empty_pages(vma, m.num);
452 vma->vm_private_data = PRIV_VMA_LOCKED;
454 if (m.addr < vma->vm_start ||
455 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
459 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
466 state.vma = vma;
532 static void privcmd_close(struct vm_area_struct *vma)
534 struct page **pages = vma->vm_private_data;
535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
541 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
550 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
552 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
553 vma, vma->vm_start, vma->vm_end,
564 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
568 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
570 vma->vm_ops = &privcmd_vm_ops;
571 vma->vm_private_data = NULL;
588 struct vm_area_struct *vma,
592 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,